SV* sva;
SV* svanext;
void *arena, *arenanext;
+ int i;
+ void **arenaroots[] = {
+ (void**) &PL_xnv_arenaroot,
+ (void**) &PL_xpv_arenaroot,
+ (void**) &PL_xpviv_arenaroot,
+ (void**) &PL_xpvnv_arenaroot,
+ (void**) &PL_xpvcv_arenaroot,
+ (void**) &PL_xpvav_arenaroot,
+ (void**) &PL_xpvhv_arenaroot,
+ (void**) &PL_xpvmg_arenaroot,
+ (void**) &PL_xpvgv_arenaroot,
+ (void**) &PL_xpvlv_arenaroot,
+ (void**) &PL_xpvbm_arenaroot,
+ (void**) 0
+ };
+ void **roots[] = {
+ (void**) &PL_xnv_root,
+ (void**) &PL_xpv_root,
+ (void**) &PL_xpviv_root,
+ (void**) &PL_xpvnv_root,
+ (void**) &PL_xpvcv_root,
+ (void**) &PL_xpvav_root,
+ (void**) &PL_xpvhv_root,
+ (void**) &PL_xpvmg_root,
+ (void**) &PL_xpvgv_root,
+ (void**) &PL_xpvlv_root,
+ (void**) &PL_xpvbm_root,
+ (void**) 0
+ };
/* Free arenas here, but be careful about fake ones. (We assume
contiguity of the fake ones with the corresponding real ones.) */
if (!SvFAKE(sva))
Safefree(sva);
}
+
+ assert(sizeof(arenaroots) == sizeof(roots));
- for (arena = PL_xnv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xnv_arenaroot = 0;
- PL_xnv_root = 0;
-
- for (arena = PL_xpv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpv_arenaroot = 0;
- PL_xpv_root = 0;
-
- for (arena = PL_xpviv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpviv_arenaroot = 0;
- PL_xpviv_root = 0;
-
- for (arena = PL_xpvnv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpvnv_arenaroot = 0;
- PL_xpvnv_root = 0;
-
- for (arena = PL_xpvcv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpvcv_arenaroot = 0;
- PL_xpvcv_root = 0;
-
- for (arena = PL_xpvav_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpvav_arenaroot = 0;
- PL_xpvav_root = 0;
-
- for (arena = PL_xpvhv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpvhv_arenaroot = 0;
- PL_xpvhv_root = 0;
-
- for (arena = PL_xpvmg_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpvmg_arenaroot = 0;
- PL_xpvmg_root = 0;
-
- for (arena = PL_xpvgv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpvgv_arenaroot = 0;
- PL_xpvgv_root = 0;
+ for (i=0; arenaroots[i]; i++) {
- for (arena = PL_xpvlv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpvlv_arenaroot = 0;
- PL_xpvlv_root = 0;
-
- for (arena = PL_xpvbm_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
+ arena = *arenaroots[i];
+ for (; arena; arena = arenanext) {
+ arenanext = *(void **)arena;
+ Safefree(arena);
+ }
+ *arenaroots[i] = 0;
+ *roots[i] = 0;
}
- PL_xpvbm_arenaroot = 0;
- PL_xpvbm_root = 0;
{
HE *he;
else {
U32 u;
CV *cv = find_runcv(&u);
- STRLEN len;
- const char *str;
if (!cv || !CvPADLIST(cv))
return Nullsv;;
av = (AV*)(*av_fetch(CvPADLIST(cv), 0, FALSE));
sv = *av_fetch(av, targ, FALSE);
/* SvLEN in a pad name is not to be trusted */
- str = SvPV_const(sv,len);
- sv_setpvn(name, str, len);
+ sv_setpv(name, SvPV_nolen_const(sv));
}
if (subscript_type == FUV_SUBSCRIPT_HASH) {
"", "", "");
}
-/* allocate another arena's worth of NV bodies */
-
-STATIC void
-S_more_xnv(pTHX)
-{
- NV* xnv;
- NV* xnvend;
- void *ptr;
- New(711, ptr, PERL_ARENA_SIZE/sizeof(NV), NV);
- *((void **) ptr) = (void *)PL_xnv_arenaroot;
- PL_xnv_arenaroot = ptr;
-
- xnv = (NV*) ptr;
- xnvend = &xnv[PERL_ARENA_SIZE / sizeof(NV) - 1];
- xnv += (sizeof(XPVIV) - 1) / sizeof(NV) + 1; /* fudge by sizeof XPVIV */
- PL_xnv_root = xnv;
- while (xnv < xnvend) {
- *(NV**)xnv = (NV*)(xnv + 1);
- xnv++;
- }
- *(NV**)xnv = 0;
-}
-
-/* allocate another arena's worth of struct xpv */
-
-STATIC void
-S_more_xpv(pTHX)
-{
- xpv_allocated* xpv;
- xpv_allocated* xpvend;
- New(713, xpv, PERL_ARENA_SIZE/sizeof(xpv_allocated), xpv_allocated);
- *((xpv_allocated**)xpv) = PL_xpv_arenaroot;
- PL_xpv_arenaroot = xpv;
-
- xpvend = &xpv[PERL_ARENA_SIZE / sizeof(xpv_allocated) - 1];
- PL_xpv_root = ++xpv;
- while (xpv < xpvend) {
- *((xpv_allocated**)xpv) = xpv + 1;
- xpv++;
- }
- *((xpv_allocated**)xpv) = 0;
-}
-
-/* allocate another arena's worth of struct xpviv */
-
-STATIC void
-S_more_xpviv(pTHX)
-{
- xpviv_allocated* xpviv;
- xpviv_allocated* xpvivend;
- New(713, xpviv, PERL_ARENA_SIZE/sizeof(xpviv_allocated), xpviv_allocated);
- *((xpviv_allocated**)xpviv) = PL_xpviv_arenaroot;
- PL_xpviv_arenaroot = xpviv;
-
- xpvivend = &xpviv[PERL_ARENA_SIZE / sizeof(xpviv_allocated) - 1];
- PL_xpviv_root = ++xpviv;
- while (xpviv < xpvivend) {
- *((xpviv_allocated**)xpviv) = xpviv + 1;
- xpviv++;
- }
- *((xpviv_allocated**)xpviv) = 0;
-}
-
-/* allocate another arena's worth of struct xpvnv */
-
-STATIC void
-S_more_xpvnv(pTHX)
+STATIC void *
+S_more_bodies (pTHX_ void **arena_root, void **root, size_t size)
{
- XPVNV* xpvnv;
- XPVNV* xpvnvend;
- New(715, xpvnv, PERL_ARENA_SIZE/sizeof(XPVNV), XPVNV);
- *((XPVNV**)xpvnv) = PL_xpvnv_arenaroot;
- PL_xpvnv_arenaroot = xpvnv;
+ char *start;
+ const char *end;
+ size_t count = PERL_ARENA_SIZE/size;
+ New(0, start, count*size, char);
+ *((void **) start) = *arena_root;
+ *arena_root = (void *)start;
- xpvnvend = &xpvnv[PERL_ARENA_SIZE / sizeof(XPVNV) - 1];
- PL_xpvnv_root = ++xpvnv;
- while (xpvnv < xpvnvend) {
- *((XPVNV**)xpvnv) = xpvnv + 1;
- xpvnv++;
- }
- *((XPVNV**)xpvnv) = 0;
-}
+ end = start + (count-1) * size;
-/* allocate another arena's worth of struct xpvcv */
+ /* The initial slot is used to link the arenas together, so it isn't to be
+ linked into the list of ready-to-use bodies. */
-STATIC void
-S_more_xpvcv(pTHX)
-{
- XPVCV* xpvcv;
- XPVCV* xpvcvend;
- New(716, xpvcv, PERL_ARENA_SIZE/sizeof(XPVCV), XPVCV);
- *((XPVCV**)xpvcv) = PL_xpvcv_arenaroot;
- PL_xpvcv_arenaroot = xpvcv;
-
- xpvcvend = &xpvcv[PERL_ARENA_SIZE / sizeof(XPVCV) - 1];
- PL_xpvcv_root = ++xpvcv;
- while (xpvcv < xpvcvend) {
- *((XPVCV**)xpvcv) = xpvcv + 1;
- xpvcv++;
- }
- *((XPVCV**)xpvcv) = 0;
-}
+ start += size;
-/* allocate another arena's worth of struct xpvav */
+ *root = (void *)start;
-STATIC void
-S_more_xpvav(pTHX)
-{
- xpvav_allocated* xpvav;
- xpvav_allocated* xpvavend;
- New(717, xpvav, PERL_ARENA_SIZE/sizeof(xpvav_allocated),
- xpvav_allocated);
- *((xpvav_allocated**)xpvav) = PL_xpvav_arenaroot;
- PL_xpvav_arenaroot = xpvav;
-
- xpvavend = &xpvav[PERL_ARENA_SIZE / sizeof(xpvav_allocated) - 1];
- PL_xpvav_root = ++xpvav;
- while (xpvav < xpvavend) {
- *((xpvav_allocated**)xpvav) = xpvav + 1;
- xpvav++;
+ while (start < end) {
+ char *next = start + size;
+ *(void**) start = (void *)next;
+ start = next;
}
- *((xpvav_allocated**)xpvav) = 0;
-}
-
-/* allocate another arena's worth of struct xpvhv */
-
-STATIC void
-S_more_xpvhv(pTHX)
-{
- xpvhv_allocated* xpvhv;
- xpvhv_allocated* xpvhvend;
- New(718, xpvhv, PERL_ARENA_SIZE/sizeof(xpvhv_allocated),
- xpvhv_allocated);
- *((xpvhv_allocated**)xpvhv) = PL_xpvhv_arenaroot;
- PL_xpvhv_arenaroot = xpvhv;
-
- xpvhvend = &xpvhv[PERL_ARENA_SIZE / sizeof(xpvhv_allocated) - 1];
- PL_xpvhv_root = ++xpvhv;
- while (xpvhv < xpvhvend) {
- *((xpvhv_allocated**)xpvhv) = xpvhv + 1;
- xpvhv++;
- }
- *((xpvhv_allocated**)xpvhv) = 0;
-}
-
-/* allocate another arena's worth of struct xpvmg */
-
-STATIC void
-S_more_xpvmg(pTHX)
-{
- XPVMG* xpvmg;
- XPVMG* xpvmgend;
- New(719, xpvmg, PERL_ARENA_SIZE/sizeof(XPVMG), XPVMG);
- *((XPVMG**)xpvmg) = PL_xpvmg_arenaroot;
- PL_xpvmg_arenaroot = xpvmg;
-
- xpvmgend = &xpvmg[PERL_ARENA_SIZE / sizeof(XPVMG) - 1];
- PL_xpvmg_root = ++xpvmg;
- while (xpvmg < xpvmgend) {
- *((XPVMG**)xpvmg) = xpvmg + 1;
- xpvmg++;
- }
- *((XPVMG**)xpvmg) = 0;
-}
-
-/* allocate another arena's worth of struct xpvgv */
-
-STATIC void
-S_more_xpvgv(pTHX)
-{
- XPVGV* xpvgv;
- XPVGV* xpvgvend;
- New(720, xpvgv, PERL_ARENA_SIZE/sizeof(XPVGV), XPVGV);
- *((XPVGV**)xpvgv) = PL_xpvgv_arenaroot;
- PL_xpvgv_arenaroot = xpvgv;
-
- xpvgvend = &xpvgv[PERL_ARENA_SIZE / sizeof(XPVGV) - 1];
- PL_xpvgv_root = ++xpvgv;
- while (xpvgv < xpvgvend) {
- *((XPVGV**)xpvgv) = xpvgv + 1;
- xpvgv++;
- }
- *((XPVGV**)xpvgv) = 0;
-}
-
-/* allocate another arena's worth of struct xpvlv */
-
-STATIC void
-S_more_xpvlv(pTHX)
-{
- XPVLV* xpvlv;
- XPVLV* xpvlvend;
- New(720, xpvlv, PERL_ARENA_SIZE/sizeof(XPVLV), XPVLV);
- *((XPVLV**)xpvlv) = PL_xpvlv_arenaroot;
- PL_xpvlv_arenaroot = xpvlv;
-
- xpvlvend = &xpvlv[PERL_ARENA_SIZE / sizeof(XPVLV) - 1];
- PL_xpvlv_root = ++xpvlv;
- while (xpvlv < xpvlvend) {
- *((XPVLV**)xpvlv) = xpvlv + 1;
- xpvlv++;
- }
- *((XPVLV**)xpvlv) = 0;
-}
-
-/* allocate another arena's worth of struct xpvbm */
-
-STATIC void
-S_more_xpvbm(pTHX)
-{
- XPVBM* xpvbm;
- XPVBM* xpvbmend;
- New(721, xpvbm, PERL_ARENA_SIZE/sizeof(XPVBM), XPVBM);
- *((XPVBM**)xpvbm) = PL_xpvbm_arenaroot;
- PL_xpvbm_arenaroot = xpvbm;
-
- xpvbmend = &xpvbm[PERL_ARENA_SIZE / sizeof(XPVBM) - 1];
- PL_xpvbm_root = ++xpvbm;
- while (xpvbm < xpvbmend) {
- *((XPVBM**)xpvbm) = xpvbm + 1;
- xpvbm++;
- }
- *((XPVBM**)xpvbm) = 0;
-}
-
-/* grab a new NV body from the free list, allocating more if necessary */
-
-STATIC XPVNV*
-S_new_xnv(pTHX)
-{
- NV* xnv;
- LOCK_SV_MUTEX;
- if (!PL_xnv_root)
- S_more_xnv(aTHX);
- xnv = PL_xnv_root;
- PL_xnv_root = *(NV**)xnv;
- UNLOCK_SV_MUTEX;
- return (XPVNV*)((char*)xnv - STRUCT_OFFSET(XPVNV, xnv_nv));
-}
-
-/* return an NV body to the free list */
-
-STATIC void
-S_del_xnv(pTHX_ XPVNV *p)
-{
- NV* xnv = (NV*)((char*)(p) + STRUCT_OFFSET(XPVNV, xnv_nv));
- LOCK_SV_MUTEX;
- *(NV**)xnv = PL_xnv_root;
- PL_xnv_root = xnv;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpv from the free list, allocating more if necessary */
-
-STATIC XPV*
-S_new_xpv(pTHX)
-{
- xpv_allocated* xpv;
- LOCK_SV_MUTEX;
- if (!PL_xpv_root)
- S_more_xpv(aTHX);
- xpv = PL_xpv_root;
- PL_xpv_root = *(xpv_allocated**)xpv;
- UNLOCK_SV_MUTEX;
- /* If xpv_allocated is the same structure as XPV then the two OFFSETs
- sum to zero, and the pointer is unchanged. If the allocated structure
- is smaller (no initial IV actually allocated) then the net effect is
- to subtract the size of the IV from the pointer, to return a new pointer
- as if an initial IV were actually allocated. */
- return (XPV*)((char*)xpv - STRUCT_OFFSET(XPV, xpv_cur)
- + STRUCT_OFFSET(xpv_allocated, xpv_cur));
-}
-
-/* return a struct xpv to the free list */
-
-STATIC void
-S_del_xpv(pTHX_ XPV *p)
-{
- xpv_allocated* xpv
- = (xpv_allocated*)((char*)(p) + STRUCT_OFFSET(XPV, xpv_cur)
- - STRUCT_OFFSET(xpv_allocated, xpv_cur));
- LOCK_SV_MUTEX;
- *(xpv_allocated**)xpv = PL_xpv_root;
- PL_xpv_root = xpv;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpviv from the free list, allocating more if necessary */
-
-STATIC XPVIV*
-S_new_xpviv(pTHX)
-{
- xpviv_allocated* xpviv;
- LOCK_SV_MUTEX;
- if (!PL_xpviv_root)
- S_more_xpviv(aTHX);
- xpviv = PL_xpviv_root;
- PL_xpviv_root = *(xpviv_allocated**)xpviv;
- UNLOCK_SV_MUTEX;
- /* If xpviv_allocated is the same structure as XPVIV then the two OFFSETs
- sum to zero, and the pointer is unchanged. If the allocated structure
- is smaller (no initial IV actually allocated) then the net effect is
- to subtract the size of the IV from the pointer, to return a new pointer
- as if an initial IV were actually allocated. */
- return (XPVIV*)((char*)xpviv - STRUCT_OFFSET(XPVIV, xpv_cur)
- + STRUCT_OFFSET(xpviv_allocated, xpv_cur));
-}
-
-/* return a struct xpviv to the free list */
-
-STATIC void
-S_del_xpviv(pTHX_ XPVIV *p)
-{
- xpviv_allocated* xpviv
- = (xpviv_allocated*)((char*)(p) + STRUCT_OFFSET(XPVIV, xpv_cur)
- - STRUCT_OFFSET(xpviv_allocated, xpv_cur));
- LOCK_SV_MUTEX;
- *(xpviv_allocated**)xpviv = PL_xpviv_root;
- PL_xpviv_root = xpviv;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpvnv from the free list, allocating more if necessary */
-
-STATIC XPVNV*
-S_new_xpvnv(pTHX)
-{
- XPVNV* xpvnv;
- LOCK_SV_MUTEX;
- if (!PL_xpvnv_root)
- S_more_xpvnv(aTHX);
- xpvnv = PL_xpvnv_root;
- PL_xpvnv_root = *(XPVNV**)xpvnv;
- UNLOCK_SV_MUTEX;
- return xpvnv;
-}
-
-/* return a struct xpvnv to the free list */
-
-STATIC void
-S_del_xpvnv(pTHX_ XPVNV *p)
-{
- LOCK_SV_MUTEX;
- *(XPVNV**)p = PL_xpvnv_root;
- PL_xpvnv_root = p;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpvcv from the free list, allocating more if necessary */
-
-STATIC XPVCV*
-S_new_xpvcv(pTHX)
-{
- XPVCV* xpvcv;
- LOCK_SV_MUTEX;
- if (!PL_xpvcv_root)
- S_more_xpvcv(aTHX);
- xpvcv = PL_xpvcv_root;
- PL_xpvcv_root = *(XPVCV**)xpvcv;
- UNLOCK_SV_MUTEX;
- return xpvcv;
-}
-
-/* return a struct xpvcv to the free list */
-
-STATIC void
-S_del_xpvcv(pTHX_ XPVCV *p)
-{
- LOCK_SV_MUTEX;
- *(XPVCV**)p = PL_xpvcv_root;
- PL_xpvcv_root = p;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpvav from the free list, allocating more if necessary */
-
-STATIC XPVAV*
-S_new_xpvav(pTHX)
-{
- xpvav_allocated* xpvav;
- LOCK_SV_MUTEX;
- if (!PL_xpvav_root)
- S_more_xpvav(aTHX);
- xpvav = PL_xpvav_root;
- PL_xpvav_root = *(xpvav_allocated**)xpvav;
- UNLOCK_SV_MUTEX;
- return (XPVAV*)((char*)xpvav - STRUCT_OFFSET(XPVAV, xav_fill)
- + STRUCT_OFFSET(xpvav_allocated, xav_fill));
-}
-
-/* return a struct xpvav to the free list */
-
-STATIC void
-S_del_xpvav(pTHX_ XPVAV *p)
-{
- xpvav_allocated* xpvav
- = (xpvav_allocated*)((char*)(p) + STRUCT_OFFSET(XPVAV, xav_fill)
- - STRUCT_OFFSET(xpvav_allocated, xav_fill));
- LOCK_SV_MUTEX;
- *(xpvav_allocated**)xpvav = PL_xpvav_root;
- PL_xpvav_root = xpvav;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpvhv from the free list, allocating more if necessary */
-
-STATIC XPVHV*
-S_new_xpvhv(pTHX)
-{
- xpvhv_allocated* xpvhv;
- LOCK_SV_MUTEX;
- if (!PL_xpvhv_root)
- S_more_xpvhv(aTHX);
- xpvhv = PL_xpvhv_root;
- PL_xpvhv_root = *(xpvhv_allocated**)xpvhv;
- UNLOCK_SV_MUTEX;
- return (XPVHV*)((char*)xpvhv - STRUCT_OFFSET(XPVHV, xhv_fill)
- + STRUCT_OFFSET(xpvhv_allocated, xhv_fill));
-}
-
-/* return a struct xpvhv to the free list */
-
-STATIC void
-S_del_xpvhv(pTHX_ XPVHV *p)
-{
- xpvhv_allocated* xpvhv
- = (xpvhv_allocated*)((char*)(p) + STRUCT_OFFSET(XPVHV, xhv_fill)
- - STRUCT_OFFSET(xpvhv_allocated, xhv_fill));
- LOCK_SV_MUTEX;
- *(xpvhv_allocated**)xpvhv = PL_xpvhv_root;
- PL_xpvhv_root = xpvhv;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpvmg from the free list, allocating more if necessary */
-
-STATIC XPVMG*
-S_new_xpvmg(pTHX)
-{
- XPVMG* xpvmg;
- LOCK_SV_MUTEX;
- if (!PL_xpvmg_root)
- S_more_xpvmg(aTHX);
- xpvmg = PL_xpvmg_root;
- PL_xpvmg_root = *(XPVMG**)xpvmg;
- UNLOCK_SV_MUTEX;
- return xpvmg;
-}
-
-/* return a struct xpvmg to the free list */
+ *(void **)start = 0;
-STATIC void
-S_del_xpvmg(pTHX_ XPVMG *p)
-{
- LOCK_SV_MUTEX;
- *(XPVMG**)p = PL_xpvmg_root;
- PL_xpvmg_root = p;
- UNLOCK_SV_MUTEX;
+ return *root;
}
-/* grab a new struct xpvgv from the free list, allocating more if necessary */
+/* grab a new thing from the free list, allocating more if necessary */
-STATIC XPVGV*
-S_new_xpvgv(pTHX)
+STATIC void *
+S_new_body(pTHX_ void **arena_root, void **root, size_t size, size_t offset)
{
- XPVGV* xpvgv;
+ void *xpv;
LOCK_SV_MUTEX;
- if (!PL_xpvgv_root)
- S_more_xpvgv(aTHX);
- xpvgv = PL_xpvgv_root;
- PL_xpvgv_root = *(XPVGV**)xpvgv;
+ xpv = *root ? *root : S_more_bodies(aTHX_ arena_root, root, size);
+ *root = *(void**)xpv;
UNLOCK_SV_MUTEX;
- return xpvgv;
+ return (void*)((char*)xpv - offset);
}
-/* return a struct xpvgv to the free list */
+/* return a thing to the free list */
STATIC void
-S_del_xpvgv(pTHX_ XPVGV *p)
+S_del_body(pTHX_ void *thing, void **root, size_t offset)
{
+ void **real_thing = (void**)((char *)thing + offset);
LOCK_SV_MUTEX;
- *(XPVGV**)p = PL_xpvgv_root;
- PL_xpvgv_root = p;
+ *real_thing = *root;
+ *root = (void*)real_thing;
UNLOCK_SV_MUTEX;
}
-/* grab a new struct xpvlv from the free list, allocating more if necessary */
+/* Conventionally we simply malloc() a big block of memory, then divide it
+ up into lots of the thing that we're allocating.
-STATIC XPVLV*
-S_new_xpvlv(pTHX)
-{
- XPVLV* xpvlv;
- LOCK_SV_MUTEX;
- if (!PL_xpvlv_root)
- S_more_xpvlv(aTHX);
- xpvlv = PL_xpvlv_root;
- PL_xpvlv_root = *(XPVLV**)xpvlv;
- UNLOCK_SV_MUTEX;
- return xpvlv;
-}
+ This macro will expand to call to S_new_body. So for XPVBM (with ithreads),
+ it would become
-/* return a struct xpvlv to the free list */
-
-STATIC void
-S_del_xpvlv(pTHX_ XPVLV *p)
-{
- LOCK_SV_MUTEX;
- *(XPVLV**)p = PL_xpvlv_root;
- PL_xpvlv_root = p;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpvbm from the free list, allocating more if necessary */
-
-STATIC XPVBM*
-S_new_xpvbm(pTHX)
-{
- XPVBM* xpvbm;
- LOCK_SV_MUTEX;
- if (!PL_xpvbm_root)
- S_more_xpvbm(aTHX);
- xpvbm = PL_xpvbm_root;
- PL_xpvbm_root = *(XPVBM**)xpvbm;
- UNLOCK_SV_MUTEX;
- return xpvbm;
-}
-
-/* return a struct xpvbm to the free list */
+ S_new_body(my_perl, (void**)&(my_perl->Ixpvbm_arenaroot),
+ (void**)&(my_perl->Ixpvbm_root), sizeof(XPVBM), 0)
+*/
-STATIC void
-S_del_xpvbm(pTHX_ XPVBM *p)
-{
- LOCK_SV_MUTEX;
- *(XPVBM**)p = PL_xpvbm_root;
- PL_xpvbm_root = p;
- UNLOCK_SV_MUTEX;
-}
+#define new_body(TYPE,lctype) \
+ S_new_body(aTHX_ (void**)&PL_ ## lctype ## _arenaroot, \
+ (void**)&PL_ ## lctype ## _root, \
+ sizeof(TYPE), \
+ 0)
+
+/* But for some types, we cheat. The type starts with some members that are
+ never accessed. So we allocate the substructure, starting at the first used
+ member, then adjust the pointer back in memory by the size of the bit not
+ allocated, so it's as if we allocated the full structure.
+ (But things will all go boom if you write to the part that is "not there",
+ because you'll be overwriting the last members of the preceding structure
+ in memory.)
+
+ We calculate the correction using the STRUCT_OFFSET macro. For example, if
+ xpv_allocated is the same structure as XPV then the two OFFSETs sum to zero,
+ and the pointer is unchanged. If the allocated structure is smaller (no
+ initial NV actually allocated) then the net effect is to subtract the size
+ of the NV from the pointer, to return a new pointer as if an initial NV were
+ actually allocated.
+
+ This is the same trick as was used for NV and IV bodies. Ironically it
+ doesn't need to be used for NV bodies any more, because NV is now at the
+ start of the structure. IV bodies don't need it either, because they are
+ no longer allocated. */
+
+#define new_body_allocated(TYPE,lctype,member) \
+ S_new_body(aTHX_ (void**)&PL_ ## lctype ## _arenaroot, \
+ (void**)&PL_ ## lctype ## _root, \
+ sizeof(lctype ## _allocated), \
+ STRUCT_OFFSET(TYPE, member) \
+ - STRUCT_OFFSET(lctype ## _allocated, member))
+
+
+#define del_body(p,TYPE,lctype) \
+ S_del_body(aTHX_ (void*)p, (void**)&PL_ ## lctype ## _root, 0)
+
+#define del_body_allocated(p,TYPE,lctype,member) \
+ S_del_body(aTHX_ (void*)p, (void**)&PL_ ## lctype ## _root, \
+ STRUCT_OFFSET(TYPE, member) \
+ - STRUCT_OFFSET(lctype ## _allocated, member))
#define my_safemalloc(s) (void*)safemalloc(s)
#define my_safefree(p) safefree((char*)p)
#else /* !PURIFY */
-#define new_XNV() (void*)new_xnv()
-#define del_XNV(p) del_xnv((XPVNV*) p)
+#define new_XNV() new_body(NV, xnv)
+#define del_XNV(p) del_body(p, NV, xnv)
-#define new_XPV() (void*)new_xpv()
-#define del_XPV(p) del_xpv((XPV *)p)
+#define new_XPV() new_body_allocated(XPV, xpv, xpv_cur)
+#define del_XPV(p) del_body_allocated(p, XPV, xpv, xpv_cur)
-#define new_XPVIV() (void*)new_xpviv()
-#define del_XPVIV(p) del_xpviv((XPVIV *)p)
+#define new_XPVIV() new_body_allocated(XPVIV, xpviv, xpv_cur)
+#define del_XPVIV(p) del_body_allocated(p, XPVIV, xpviv, xpv_cur)
-#define new_XPVNV() (void*)new_xpvnv()
-#define del_XPVNV(p) del_xpvnv((XPVNV *)p)
+#define new_XPVNV() new_body(XPVNV, xpvnv)
+#define del_XPVNV(p) del_body(p, XPVNV, xpvnv)
-#define new_XPVCV() (void*)new_xpvcv()
-#define del_XPVCV(p) del_xpvcv((XPVCV *)p)
+#define new_XPVCV() new_body(XPVCV, xpvcv)
+#define del_XPVCV(p) del_body(p, XPVCV, xpvcv)
-#define new_XPVAV() (void*)new_xpvav()
-#define del_XPVAV(p) del_xpvav((XPVAV *)p)
+#define new_XPVAV() new_body_allocated(XPVAV, xpvav, xav_fill)
+#define del_XPVAV(p) del_body_allocated(p, XPVAV, xpvav, xav_fill)
-#define new_XPVHV() (void*)new_xpvhv()
-#define del_XPVHV(p) del_xpvhv((XPVHV *)p)
+#define new_XPVHV() new_body_allocated(XPVHV, xpvhv, xhv_fill)
+#define del_XPVHV(p) del_body_allocated(p, XPVHV, xpvhv, xhv_fill)
-#define new_XPVMG() (void*)new_xpvmg()
-#define del_XPVMG(p) del_xpvmg((XPVMG *)p)
+#define new_XPVMG() new_body(XPVMG, xpvmg)
+#define del_XPVMG(p) del_body(p, XPVMG, xpvmg)
-#define new_XPVGV() (void*)new_xpvgv()
-#define del_XPVGV(p) del_xpvgv((XPVGV *)p)
+#define new_XPVGV() new_body(XPVGV, xpvgv)
+#define del_XPVGV(p) del_body(p, XPVGV, xpvgv)
-#define new_XPVLV() (void*)new_xpvlv()
-#define del_XPVLV(p) del_xpvlv((XPVLV *)p)
+#define new_XPVLV() new_body(XPVLV, xpvlv)
+#define del_XPVLV(p) del_body(p, XPVLV, xpvlv)
-#define new_XPVBM() (void*)new_xpvbm()
-#define del_XPVBM(p) del_xpvbm((XPVBM *)p)
+#define new_XPVBM() new_body(XPVBM, xpvbm)
+#define del_XPVBM(p) del_body(p, XPVBM, xpvbm)
#endif /* PURIFY */
=cut
*/
-bool
+void
Perl_sv_upgrade(pTHX_ register SV *sv, U32 mt)
{
-
- char* pv;
- U32 cur;
- U32 len;
- IV iv;
- NV nv;
- MAGIC* magic;
- HV* stash;
+ void** old_body_arena;
+ size_t old_body_offset;
+ size_t old_body_length; /* Well, the length to copy. */
+ void* old_body;
+ bool zero_nv = TRUE;
+ void* new_body;
+ size_t new_body_length;
+ size_t new_body_offset;
+ void** new_body_arena;
+ void** new_body_arenaroot;
+ U32 old_type = SvTYPE(sv);
if (mt != SVt_PV && SvIsCOW(sv)) {
sv_force_normal_flags(sv, 0);
}
if (SvTYPE(sv) == mt)
- return TRUE;
+ return;
+
+ if (SvTYPE(sv) > mt)
+ Perl_croak(aTHX_ "sv_upgrade from type %d down to type %d",
+ (int)SvTYPE(sv), (int)mt);
- pv = NULL;
- cur = 0;
- len = 0;
- iv = 0;
- nv = 0.0;
- magic = NULL;
- stash = Nullhv;
+
+ old_body = SvANY(sv);
+ old_body_arena = 0;
+ old_body_offset = 0;
+ old_body_length = 0;
+ new_body_offset = 0;
+ new_body_length = ~0;
+
+ /* Copying structures onto other structures that have been neatly zeroed
+ has a subtle gotcha. Consider XPVMG
+
+ +------+------+------+------+------+-------+-------+
+ | NV | CUR | LEN | IV | MAGIC | STASH |
+ +------+------+------+------+------+-------+-------+
+ 0 4 8 12 16 20 24 28
+
+ where NVs are aligned to 8 bytes, so that sizeof that structure is
+ actually 32 bytes long, with 4 bytes of padding at the end:
+
+ +------+------+------+------+------+-------+-------+------+
+ | NV | CUR | LEN | IV | MAGIC | STASH | ??? |
+ +------+------+------+------+------+-------+-------+------+
+ 0 4 8 12 16 20 24 28 32
+
+ so what happens if you allocate memory for this structure:
+
+ +------+------+------+------+------+-------+-------+------+------+...
+ | NV | CUR | LEN | IV | MAGIC | STASH | GP | NAME |
+ +------+------+------+------+------+-------+-------+------+------+...
+ 0 4 8 12 16 20 24 28 32 36
+
+ zero it, then copy sizeof(XPVMG) bytes on top of it? Not quite what you
+ expect, because you copy the area marked ??? onto GP. Now, ??? may have
+ started out as zero once, but it's quite possible that it isn't. So now,
+ rather than a nicely zeroed GP, you have it pointing somewhere random.
+ Bugs ensue.
+
+ (In fact, GP ends up pointing at a previous GP structure, because the
+ principle cause of the padding in XPVMG getting garbage is a copy of
+ sizeof(XPVMG) bytes from a XPVGV structure in sv_unglob)
+
+ So we are careful and work out the size of used parts of all the
+ structures. */
switch (SvTYPE(sv)) {
case SVt_NULL:
break;
case SVt_IV:
- iv = SvIVX(sv);
if (mt == SVt_NV)
mt = SVt_PVNV;
else if (mt < SVt_PVIV)
mt = SVt_PVIV;
+ old_body_offset = STRUCT_OFFSET(XPVIV, xiv_iv);
+ old_body_length = sizeof(IV);
break;
case SVt_NV:
- nv = SvNVX(sv);
- del_XNV(SvANY(sv));
+ old_body_arena = (void **) &PL_xnv_root;
+ old_body_length = sizeof(NV);
+ zero_nv = FALSE;
+
if (mt < SVt_PVNV)
mt = SVt_PVNV;
break;
case SVt_RV:
- pv = (char*)SvRV(sv);
break;
case SVt_PV:
- pv = SvPVX_mutable(sv);
- cur = SvCUR(sv);
- len = SvLEN(sv);
- del_XPV(SvANY(sv));
+ old_body_arena = (void **) &PL_xpv_root;
+ old_body_offset = STRUCT_OFFSET(XPV, xpv_cur)
+ - STRUCT_OFFSET(xpv_allocated, xpv_cur);
+ old_body_length = STRUCT_OFFSET(XPV, xpv_len)
+ + sizeof (((XPV*)SvANY(sv))->xpv_len)
+ - old_body_offset;
if (mt <= SVt_IV)
mt = SVt_PVIV;
else if (mt == SVt_NV)
mt = SVt_PVNV;
break;
case SVt_PVIV:
- pv = SvPVX_mutable(sv);
- cur = SvCUR(sv);
- len = SvLEN(sv);
- iv = SvIVX(sv);
- del_XPVIV(SvANY(sv));
+ old_body_arena = (void **) &PL_xpviv_root;
+ old_body_offset = STRUCT_OFFSET(XPVIV, xpv_cur)
+ - STRUCT_OFFSET(xpviv_allocated, xpv_cur);
+ old_body_length = STRUCT_OFFSET(XPVIV, xiv_u)
+ + sizeof (((XPVIV*)SvANY(sv))->xiv_u)
+ - old_body_offset;
break;
case SVt_PVNV:
- pv = SvPVX_mutable(sv);
- cur = SvCUR(sv);
- len = SvLEN(sv);
- iv = SvIVX(sv);
- nv = SvNVX(sv);
- del_XPVNV(SvANY(sv));
+ old_body_arena = (void **) &PL_xpvnv_root;
+ old_body_length = STRUCT_OFFSET(XPVNV, xiv_u)
+ + sizeof (((XPVNV*)SvANY(sv))->xiv_u);
+ zero_nv = FALSE;
break;
case SVt_PVMG:
/* Because the XPVMG of PL_mess_sv isn't allocated from the arena,
Given that it only has meaning inside the pad, it shouldn't be set
on anything that can get upgraded. */
assert((SvFLAGS(sv) & SVpad_TYPED) == 0);
- pv = SvPVX_mutable(sv);
- cur = SvCUR(sv);
- len = SvLEN(sv);
- iv = SvIVX(sv);
- nv = SvNVX(sv);
- magic = SvMAGIC(sv);
- stash = SvSTASH(sv);
- del_XPVMG(SvANY(sv));
+ old_body_arena = (void **) &PL_xpvmg_root;
+ old_body_length = STRUCT_OFFSET(XPVMG, xmg_stash)
+ + sizeof (((XPVMG*)SvANY(sv))->xmg_stash);
+ zero_nv = FALSE;
break;
default:
Perl_croak(aTHX_ "Can't upgrade that kind of scalar");
case SVt_NULL:
Perl_croak(aTHX_ "Can't upgrade to undef");
case SVt_IV:
+ assert(old_type == SVt_NULL);
SvANY(sv) = (XPVIV*)((char*)&(sv->sv_u.svu_iv) - STRUCT_OFFSET(XPVIV, xiv_iv));
- SvIV_set(sv, iv);
+ SvIV_set(sv, 0);
break;
case SVt_NV:
+ assert(old_type == SVt_NULL);
SvANY(sv) = new_XNV();
- SvNV_set(sv, nv);
+ SvNV_set(sv, 0);
break;
case SVt_RV:
+ assert(old_type == SVt_NULL);
SvANY(sv) = &sv->sv_u.svu_rv;
- SvRV_set(sv, (SV*)pv);
+ SvRV_set(sv, 0);
break;
case SVt_PVHV:
SvANY(sv) = new_XPVHV();
HvMAX(sv) = 0;
HvTOTALKEYS(sv) = 0;
- /* Fall through... */
- if (0) {
- case SVt_PVAV:
- SvANY(sv) = new_XPVAV();
- AvMAX(sv) = -1;
- AvFILLp(sv) = -1;
- AvALLOC(sv) = 0;
- AvREAL_only(sv);
- }
- /* to here. */
- /* XXX? Only SVt_NULL is ever upgraded to AV or HV? */
- assert(!pv);
- /* FIXME. Should be able to remove all this if()... if the above
- assertion is genuinely always true. */
- if(SvOOK(sv)) {
- pv -= iv;
- SvFLAGS(sv) &= ~SVf_OOK;
- }
- Safefree(pv);
+ goto hv_av_common;
+
+ case SVt_PVAV:
+ SvANY(sv) = new_XPVAV();
+ AvMAX(sv) = -1;
+ AvFILLp(sv) = -1;
+ AvALLOC(sv) = 0;
+ AvREAL_only(sv);
+
+ hv_av_common:
+ /* SVt_NULL isn't the only thing upgraded to AV or HV.
+ The target created by newSVrv also is, and it can have magic.
+ However, it never has SvPVX set.
+ */
+ if (old_type >= SVt_RV) {
+ assert(SvPVX_const(sv) == 0);
+ }
+
+ /* Could put this in the else clause below, as PVMG must have SvPVX
+ 0 already (the assertion above) */
SvPV_set(sv, (char*)0);
- SvMAGIC_set(sv, magic);
- SvSTASH_set(sv, stash);
+
+ if (old_type >= SVt_PVMG) {
+ SvMAGIC_set(sv, ((XPVMG*)old_body)->xmg_magic);
+ SvSTASH_set(sv, ((XPVMG*)old_body)->xmg_stash);
+ } else {
+ SvMAGIC_set(sv, 0);
+ SvSTASH_set(sv, 0);
+ }
break;
case SVt_PVIO:
- SvANY(sv) = new_XPVIO();
- Zero(SvANY(sv), 1, XPVIO);
- IoPAGE_LEN(sv) = 60;
- goto set_magic_common;
+ new_body = new_XPVIO();
+ new_body_length = sizeof(XPVIO);
+ goto zero;
case SVt_PVFM:
- SvANY(sv) = new_XPVFM();
- Zero(SvANY(sv), 1, XPVFM);
- goto set_magic_common;
+ new_body = new_XPVFM();
+ new_body_length = sizeof(XPVFM);
+ goto zero;
+
case SVt_PVBM:
- SvANY(sv) = new_XPVBM();
- BmRARE(sv) = 0;
- BmUSEFUL(sv) = 0;
- BmPREVIOUS(sv) = 0;
- goto set_magic_common;
+ new_body_length = sizeof(XPVBM);
+ new_body_arena = (void **) &PL_xpvbm_root;
+ new_body_arenaroot = (void **) &PL_xpvbm_arenaroot;
+ goto new_body;
case SVt_PVGV:
- SvANY(sv) = new_XPVGV();
- GvGP(sv) = 0;
- GvNAME(sv) = 0;
- GvNAMELEN(sv) = 0;
- GvSTASH(sv) = 0;
- GvFLAGS(sv) = 0;
- goto set_magic_common;
+ new_body_length = sizeof(XPVGV);
+ new_body_arena = (void **) &PL_xpvgv_root;
+ new_body_arenaroot = (void **) &PL_xpvgv_arenaroot;
+ goto new_body;
case SVt_PVCV:
- SvANY(sv) = new_XPVCV();
- Zero(SvANY(sv), 1, XPVCV);
- goto set_magic_common;
+ new_body_length = sizeof(XPVCV);
+ new_body_arena = (void **) &PL_xpvcv_root;
+ new_body_arenaroot = (void **) &PL_xpvcv_arenaroot;
+ goto new_body;
case SVt_PVLV:
- SvANY(sv) = new_XPVLV();
- LvTARGOFF(sv) = 0;
- LvTARGLEN(sv) = 0;
- LvTARG(sv) = 0;
- LvTYPE(sv) = 0;
- GvGP(sv) = 0;
- GvNAME(sv) = 0;
- GvNAMELEN(sv) = 0;
- GvSTASH(sv) = 0;
- GvFLAGS(sv) = 0;
- /* Fall through. */
- if (0) {
- case SVt_PVMG:
- SvANY(sv) = new_XPVMG();
- }
- set_magic_common:
- SvMAGIC_set(sv, magic);
- SvSTASH_set(sv, stash);
- /* Fall through. */
- if (0) {
- case SVt_PVNV:
- SvANY(sv) = new_XPVNV();
- }
- SvNV_set(sv, nv);
- /* Fall through. */
- if (0) {
- case SVt_PVIV:
- SvANY(sv) = new_XPVIV();
- if (SvNIOK(sv))
- (void)SvIOK_on(sv);
- SvNOK_off(sv);
- }
- SvIV_set(sv, iv);
- /* Fall through. */
- if (0) {
- case SVt_PV:
- SvANY(sv) = new_XPV();
+ new_body_length = sizeof(XPVLV);
+ new_body_arena = (void **) &PL_xpvlv_root;
+ new_body_arenaroot = (void **) &PL_xpvlv_arenaroot;
+ goto new_body;
+ case SVt_PVMG:
+ new_body_length = sizeof(XPVMG);
+ new_body_arena = (void **) &PL_xpvmg_root;
+ new_body_arenaroot = (void **) &PL_xpvmg_arenaroot;
+ goto new_body;
+ case SVt_PVNV:
+ new_body_length = sizeof(XPVNV);
+ new_body_arena = (void **) &PL_xpvnv_root;
+ new_body_arenaroot = (void **) &PL_xpvnv_arenaroot;
+ goto new_body;
+ case SVt_PVIV:
+ new_body_offset = STRUCT_OFFSET(XPVIV, xpv_cur)
+ - STRUCT_OFFSET(xpviv_allocated, xpv_cur);
+ new_body_length = sizeof(XPVIV) - new_body_offset;
+ new_body_arena = (void **) &PL_xpviv_root;
+ new_body_arenaroot = (void **) &PL_xpviv_arenaroot;
+ /* XXX Is this still needed? Was it ever needed? Surely as there is
+ no route from NV to PVIV, NOK can never be true */
+ if (SvNIOK(sv))
+ (void)SvIOK_on(sv);
+ SvNOK_off(sv);
+ goto new_body_no_NV;
+ case SVt_PV:
+ new_body_offset = STRUCT_OFFSET(XPV, xpv_cur)
+ - STRUCT_OFFSET(xpv_allocated, xpv_cur);
+ new_body_length = sizeof(XPV) - new_body_offset;
+ new_body_arena = (void **) &PL_xpv_root;
+ new_body_arenaroot = (void **) &PL_xpv_arenaroot;
+ new_body_no_NV:
+ /* PV and PVIV don't have an NV slot. */
+ zero_nv = FALSE;
+
+ {
+ new_body:
+ assert(new_body_length);
+#ifndef PURIFY
+ new_body = S_new_body(aTHX_ new_body_arenaroot, new_body_arena,
+ new_body_length, new_body_offset);
+#else
+ /* We always allocated the full length item with PURIFY */
+ new_body_length += new_body_offset;
+ new_body_offset = 0;
+ new_body = my_safemalloc(new_body_length);
+
+#endif
+ zero:
+ Zero(((char *)new_body) + new_body_offset, new_body_length, char);
+ SvANY(sv) = new_body;
+
+ if (old_body_length) {
+ Copy((char *)old_body + old_body_offset,
+ (char *)new_body + old_body_offset,
+ old_body_length, char);
+ }
+
+ /* FIXME - add a Configure test to determine if NV 0.0 is actually
+ all bits zero. If it is, we can skip this initialisation. */
+ if (zero_nv)
+ SvNV_set(sv, 0);
+
+ if (mt == SVt_PVIO)
+ IoPAGE_LEN(sv) = 60;
+ if (old_type < SVt_RV)
+ SvPV_set(sv, 0);
}
- SvPV_set(sv, pv);
- SvCUR_set(sv, cur);
- SvLEN_set(sv, len);
break;
+ default:
+ Perl_croak(aTHX_ "panic: sv_upgrade to unknown type %lu", mt);
+ }
+
+
+ if (old_body_arena) {
+#ifdef PURIFY
+ my_safefree(old_body);
+#else
+ S_del_body(aTHX_ old_body, old_body_arena, old_body_offset);
+#endif
}
- return TRUE;
}
/*
Perl_sv_2pvbyte(pTHX_ register SV *sv, STRLEN *lp)
{
sv_utf8_downgrade(sv,0);
- return SvPV(sv,*lp);
+ return lp ? SvPV(sv,*lp) : SvPV_nolen(sv);
}
/*
}
if (hibit) {
STRLEN len = SvCUR(sv) + 1; /* Plus the \0 */
- char *recoded = bytes_to_utf8((U8*)s, &len);
+ U8 *recoded = bytes_to_utf8((U8*)s, &len);
SvPV_free(sv); /* No longer using what was there before. */
- SvPV_set(sv, recoded);
+ SvPV_set(sv, (char*)recoded);
SvCUR_set(sv, len - 1);
SvLEN_set(sv, len); /* No longer know the real size. */
}
#endif
{
/* SvIsCOW_shared_hash */
- UV hash = SvSHARED_HASH(sstr);
DEBUG_C(PerlIO_printf(Perl_debug_log,
"Copy on write: Sharing hash\n"));
assert (SvTYPE(dstr) >= SVt_PV);
- /* FIXME - would benefit from share_hek_hek */
SvPV_set(dstr,
- sharepvn(SvPVX_const(sstr),
- (sflags & SVf_UTF8?-cur:cur), hash));
+ HEK_KEY(share_hek_hek(SvSHARED_HEK_FROM_PV(SvPVX_const(sstr)))));
}
SvLEN_set(dstr, len);
SvCUR_set(dstr, cur);
}
if (sflags & SVf_UTF8)
SvUTF8_on(dstr);
- /*SUPPRESS 560*/
if (sflags & SVp_NOK) {
SvNOKp_on(dstr);
if (sflags & SVf_NOK)
if (SvLEN(sstr) == 0) {
/* source is a COW shared hash key. */
- UV hash = SvSHARED_HASH(sstr);
DEBUG_C(PerlIO_printf(Perl_debug_log,
"Fast copy on write: Sharing hash\n"));
- /* FIXME - would benefit from share_hek_hek */
- new_pv = sharepvn(SvPVX_const(sstr), (SvUTF8(sstr)?-cur:cur), hash);
+ new_pv = HEK_KEY(share_hek_hek(SvSHARED_HEK_FROM_PV(SvPVX_const(sstr))));
goto common_exit;
}
SV_COW_NEXT_SV_SET(dstr, SV_COW_NEXT_SV(sstr));
*mid = '\0';
SvCUR_set(bigstr, mid - big);
}
- /*SUPPRESS 560*/
else if ((i = mid - big)) { /* faster from front */
midend -= littlelen;
mid = midend;
if (!(flags & SVp_POK) || !*SvPVX_const(sv)) {
if ((flags & SVTYPEMASK) < SVt_PVIV)
- sv_upgrade(sv, SVt_IV);
+ sv_upgrade(sv, ((flags & SVTYPEMASK) > SVt_IV ? SVt_PVIV : SVt_IV));
(void)SvIOK_only(sv);
SvIV_set(sv, 1);
return;
# define PTR_TABLE_HASH(ptr) (PTR2UV(ptr) >> 2)
#endif
-
-
-STATIC void
-S_more_pte(pTHX)
-{
- struct ptr_tbl_ent* pte;
- struct ptr_tbl_ent* pteend;
- New(0, pte, PERL_ARENA_SIZE/sizeof(struct ptr_tbl_ent), struct ptr_tbl_ent);
- pte->next = PL_pte_arenaroot;
- PL_pte_arenaroot = pte;
-
- pteend = &pte[PERL_ARENA_SIZE / sizeof(struct ptr_tbl_ent) - 1];
- PL_pte_root = ++pte;
- while (pte < pteend) {
- pte->next = pte + 1;
- pte++;
- }
- pte->next = 0;
-}
-
-STATIC struct ptr_tbl_ent*
-S_new_pte(pTHX)
-{
- struct ptr_tbl_ent* pte;
- if (!PL_pte_root)
- S_more_pte(aTHX);
- pte = PL_pte_root;
- PL_pte_root = pte->next;
- return pte;
-}
-
-STATIC void
-S_del_pte(pTHX_ struct ptr_tbl_ent*p)
-{
- p->next = PL_pte_root;
- PL_pte_root = p;
-}
+#define new_pte() new_body(struct ptr_tbl_ent, pte)
+#define del_pte(p) del_body(p, struct ptr_tbl_ent, pte)
/* map an existing pointer using a table */
return;
}
}
- tblent = S_new_pte(aTHX);
+ tblent = new_pte();
tblent->oldval = oldv;
tblent->newval = newv;
tblent->next = *otblent;
if (entry) {
PTR_TBL_ENT_t *oentry = entry;
entry = entry->next;
- S_del_pte(aTHX_ oentry);
+ del_pte(oentry);
}
if (!entry) {
if (++riter > max) {
}
else {
/* Special case - not normally malloced for some reason */
- if (SvREADONLY(sstr) && SvFAKE(sstr)) {
- /* A "shared" PV - clone it as unshared string */
- if(SvPADTMP(sstr)) {
- /* However, some of them live in the pad
- and they should not have these flags
- turned off */
-
- /* FIXME - would benefit from share_hek_hek */
- SvPV_set(dstr, sharepvn(SvPVX_const(sstr), SvCUR(sstr),
- SvUVX(sstr)));
- } else {
-
- SvPV_set(dstr, SAVEPVN(SvPVX_const(sstr), SvCUR(sstr)));
- SvFAKE_off(dstr);
- SvREADONLY_off(dstr);
- }
+ if ((SvREADONLY(sstr) && SvFAKE(sstr))) {
+ /* A "shared" PV - clone it as "shared" PV */
+ SvPV_set(dstr,
+ HEK_KEY(hek_dup(SvSHARED_HEK_FROM_PV(SvPVX_const(sstr)),
+ param)));
}
else {
/* Some other special case - random pointer */
/* create SV map for pointer relocation */
PL_ptr_table = ptr_table_new();
- /* and one for finding shared hash keys quickly */
- PL_shared_hek_table = ptr_table_new();
/* initialize these special pointers as early as possible */
SvANY(&PL_sv_undef) = NULL;
if (!(flags & CLONEf_KEEP_PTR_TABLE)) {
ptr_table_free(PL_ptr_table);
PL_ptr_table = NULL;
- ptr_table_free(PL_shared_hek_table);
- PL_shared_hek_table = NULL;
}
/* Call the ->CLONE method, if it exists, for each of the stashes