pointer to the body (struct xrv, xpv, xpviv...), which contains fields
specific to each type.
-Normally, this allocation is done using arenas, which by default are
-approximately 4K chunks of memory parcelled up into N heads or bodies. The
-first slot in each arena is reserved, and is used to hold a link to the next
-arena. In the case of heads, the unused first slot also contains some flags
-and a note of the number of slots. Snaked through each arena chain is a
-linked list of free items; when this becomes empty, an extra arena is
-allocated and divided up into N items which are threaded into the free list.
+In all but the most memory-paranoid configuations (ex: PURIFY), this
+allocation is done using arenas, which by default are approximately 4K
+chunks of memory parcelled up into N heads or bodies (of same size).
+Sv-bodies are allocated by their sv-type, guaranteeing size
+consistency needed to allocate safely from arrays.
+
+The first slot in each arena is reserved, and is used to hold a link
+to the next arena. In the case of heads, the unused first slot also
+contains some flags and a note of the number of slots. Snaked through
+each arena chain is a linked list of free items; when this becomes
+empty, an extra arena is allocated and divided up into N items which
+are threaded into the free list.
The following global variables are associated with arenas:
PL_sv_arenaroot pointer to list of SV arenas
PL_sv_root pointer to list of free SV structures
- PL_foo_arenaroot pointer to list of foo arenas,
- PL_foo_root pointer to list of free foo bodies
- ... for foo in xiv, xnv, xrv, xpv etc.
+ PL_body_arenaroots[] array of pointers to list of arenas, 1 per svtype
+ PL_body_roots[] array of pointers to list of free bodies of svtype
+ arrays are indexed by the svtype needed
-Note that some of the larger and more rarely used body types (eg xpvio)
-are not allocated using arenas, but are instead just malloc()/free()ed as
-required. Also, if PURIFY is defined, arenas are abandoned altogether,
-with all items individually malloc()ed. In addition, a few SV heads are
-not allocated from an arena, but are instead directly created as static
-or auto variables, eg PL_sv_undef. The size of arenas can be changed from
-the default by setting PERL_ARENA_SIZE appropriately at compile time.
+Note that some of the larger and more rarely used body types (eg
+xpvio) are not allocated using arenas, but are instead just
+malloc()/free()ed as required.
+
+In addition, a few SV heads are not allocated from an arena, but are
+instead directly created as static or auto variables, eg PL_sv_undef.
+The size of arenas can be changed from the default by setting
+PERL_ARENA_SIZE appropriately at compile time.
The SV arena serves the secondary purpose of allowing still-live SVs
to be located and destroyed during final cleanup.
of zero. called repeatedly from perl_destruct()
until there are no SVs left.
-=head2 Summary
+=head2 Arena allocator API Summary
Private API to rest of sv.c
# define FREE_SV_DEBUG_FILE(sv)
#endif
+#ifdef PERL_POISON
+# define SvARENA_CHAIN(sv) ((sv)->sv_u.svu_rv)
+/* Whilst I'd love to do this, it seems that things like to check on
+ unreferenced scalars
+# define POSION_SV_HEAD(sv) Poison(sv, 1, struct STRUCT_SV)
+*/
+# define POSION_SV_HEAD(sv) Poison(&SvANY(sv), 1, void *), \
+ Poison(&SvREFCNT(sv), 1, U32)
+#else
+# define SvARENA_CHAIN(sv) SvANY(sv)
+# define POSION_SV_HEAD(sv)
+#endif
+
#define plant_SV(p) \
STMT_START { \
FREE_SV_DEBUG_FILE(p); \
- SvANY(p) = (void *)PL_sv_root; \
+ POSION_SV_HEAD(p); \
+ SvARENA_CHAIN(p) = (void *)PL_sv_root; \
SvFLAGS(p) = SVTYPEMASK; \
PL_sv_root = (p); \
--PL_sv_count; \
#define uproot_SV(p) \
STMT_START { \
(p) = PL_sv_root; \
- PL_sv_root = (SV*)SvANY(p); \
+ PL_sv_root = (SV*)SvARENA_CHAIN(p); \
++PL_sv_count; \
} STMT_END
svend = &sva[SvREFCNT(sva) - 1];
sv = sva + 1;
while (sv < svend) {
- SvANY(sv) = (void *)(SV*)(sv + 1);
+ SvARENA_CHAIN(sv) = (void *)(SV*)(sv + 1);
#ifdef DEBUGGING
SvREFCNT(sv) = 0;
#endif
SvFLAGS(sv) = SVTYPEMASK;
sv++;
}
- SvANY(sv) = 0;
+ SvARENA_CHAIN(sv) = 0;
#ifdef DEBUGGING
SvREFCNT(sv) = 0;
#endif
=cut
*/
-
#define free_arena(name) \
STMT_START { \
S_free_arena(aTHX_ (void**) PL_ ## name ## _arenaroot); \
{
SV* sva;
SV* svanext;
+ int i;
/* Free arenas here, but be careful about fake ones. (We assume
contiguity of the fake ones with the corresponding real ones.) */
if (!SvFAKE(sva))
Safefree(sva);
}
-
- free_arena(xnv);
- free_arena(xpv);
- free_arena(xpviv);
- free_arena(xpvnv);
- free_arena(xpvcv);
- free_arena(xpvav);
- free_arena(xpvhv);
- free_arena(xpvmg);
- free_arena(xpvgv);
- free_arena(xpvlv);
- free_arena(xpvbm);
+
+ for (i=0; i<SVt_LAST; i++) {
+ S_free_arena(aTHX_ (void**) PL_body_arenaroots[i]);
+ PL_body_arenaroots[i] = 0;
+ PL_body_roots[i] = 0;
+ }
+
free_arena(he);
-#if defined(USE_ITHREADS)
- free_arena(pte);
-#endif
Safefree(PL_nice_chunk);
PL_nice_chunk = Nullch;
"", "", "");
}
+/*
+ Here are mid-level routines that manage the allocation of bodies out
+ of the various arenas. There are 5 kinds of arenas:
+
+ 1. SV-head arenas, which are discussed and handled above
+ 2. regular body arenas
+ 3. arenas for reduced-size bodies
+ 4. Hash-Entry arenas
+ 5. pte arenas (thread related)
+
+ Arena types 2 & 3 are chained by body-type off an array of
+ arena-root pointers, which is indexed by svtype. Some of the
+ larger/less used body types are malloced singly, since a large
+ unused block of them is wasteful. Also, several svtypes dont have
+ bodies; the data fits into the sv-head itself. The arena-root
+ pointer thus has a few unused root-pointers (which may be hijacked
+ later for arena types 4,5)
+
+ 3 differs from 2 as an optimization; some body types have several
+ unused fields in the front of the structure (which are kept in-place
+ for consistency). These bodies can be allocated in smaller chunks,
+ because the leading fields arent accessed. Pointers to such bodies
+ are decremented to point at the unused 'ghost' memory, knowing that
+ the pointers are used with offsets to the real memory.
+
+ HE, HEK arenas are managed separately, with separate code, but may
+ be merge-able later..
+
+ PTE arenas are not sv-bodies, but they share these mid-level
+ mechanics, so are considered here. The new mid-level mechanics rely
+ on the sv_type of the body being allocated, so we just reserve one
+ of the unused body-slots for PTEs, then use it in those (2) PTE
+ contexts below (line ~10k)
+*/
+
STATIC void *
-S_more_bodies (pTHX_ void **arena_root, void **root, size_t size)
+S_more_bodies (pTHX_ size_t size, svtype sv_type)
{
+ void **arena_root = &PL_body_arenaroots[sv_type];
+ void **root = &PL_body_roots[sv_type];
char *start;
const char *end;
- const size_t count = PERL_ARENA_SIZE/size;
+ const size_t count = PERL_ARENA_SIZE / size;
+
Newx(start, count*size, char);
*((void **) start) = *arena_root;
*arena_root = (void *)start;
/* 1st, the inline version */
-#define new_body_inline(xpv, arena_root, root, size) \
+#define new_body_inline(xpv, size, sv_type) \
STMT_START { \
+ void **r3wt = &PL_body_roots[sv_type]; \
LOCK_SV_MUTEX; \
- xpv = *((void **)(root)) \
- ? *((void **)(root)) : S_more_bodies(aTHX_ arena_root, root, size); \
- *(root) = *(void**)(xpv); \
+ xpv = *((void **)(r3wt)) \
+ ? *((void **)(r3wt)) : S_more_bodies(aTHX_ size, sv_type); \
+ *(r3wt) = *(void**)(xpv); \
UNLOCK_SV_MUTEX; \
} STMT_END
/* now use the inline version in the proper function */
+#ifndef PURIFY
+
+/* This isn't being used with -DPURIFY, so don't declare it. Otherwise
+ compilers issue warnings. */
+
STATIC void *
-S_new_body(pTHX_ void **arena_root, void **root, size_t size)
+S_new_body(pTHX_ size_t size, svtype sv_type)
{
void *xpv;
- new_body_inline(xpv, arena_root, root, size);
+ new_body_inline(xpv, size, sv_type);
return xpv;
}
+#endif
+
/* return a thing to the free list */
#define del_body(thing, root) \
UNLOCK_SV_MUTEX; \
} STMT_END
-/* Conventionally we simply malloc() a big block of memory, then divide it
- up into lots of the thing that we're allocating.
-
- This macro will expand to call to S_new_body. So for XPVBM (with ithreads),
- it would become
-
- S_new_body(my_perl, (void**)&(my_perl->Ixpvbm_arenaroot),
- (void**)&(my_perl->Ixpvbm_root), sizeof(XPVBM), 0)
-*/
-
-#define new_body_type(TYPE,lctype) \
- S_new_body(aTHX_ (void**)&PL_ ## lctype ## _arenaroot, \
- (void**)&PL_ ## lctype ## _root, \
- sizeof(TYPE))
+/*
+ Revisiting type 3 arenas, there are 4 body-types which have some
+ members that are never accessed. They are XPV, XPVIV, XPVAV,
+ XPVHV, which have corresponding types: xpv_allocated,
+ xpviv_allocated, xpvav_allocated, xpvhv_allocated,
-#define del_body_type(p,TYPE,lctype) \
- del_body((void*)p, (void**)&PL_ ## lctype ## _root)
-
-/* But for some types, we cheat. The type starts with some members that are
- never accessed. So we allocate the substructure, starting at the first used
- member, then adjust the pointer back in memory by the size of the bit not
- allocated, so it's as if we allocated the full structure.
- (But things will all go boom if you write to the part that is "not there",
- because you'll be overwriting the last members of the preceding structure
- in memory.)
+ For these types, the arenas are carved up into *_allocated size
+ chunks, we thus avoid wasted memory for those unaccessed members.
+ When bodies are allocated, we adjust the pointer back in memory by
+ the size of the bit not allocated, so it's as if we allocated the
+ full structure. (But things will all go boom if you write to the
+ part that is "not there", because you'll be overwriting the last
+ members of the preceding structure in memory.)
We calculate the correction using the STRUCT_OFFSET macro. For example, if
xpv_allocated is the same structure as XPV then the two OFFSETs sum to zero,
start of the structure. IV bodies don't need it either, because they are
no longer allocated. */
-#define new_body_allocated(TYPE,lctype,member) \
- (void*)((char*)S_new_body(aTHX_ (void**)&PL_ ## lctype ## _arenaroot, \
- (void**)&PL_ ## lctype ## _root, \
- sizeof(lctype ## _allocated)) - \
- STRUCT_OFFSET(TYPE, member) \
- + STRUCT_OFFSET(lctype ## _allocated, member))
+/* The following 2 arrays hide the above details in a pair of
+ lookup-tables, allowing us to be body-type agnostic.
+
+ size maps svtype to its body's allocated size.
+ offset maps svtype to the body-pointer adjustment needed
+
+ NB: elements in latter are 0 or <0, and are added during
+ allocation, and subtracted during deallocation. It may be clearer
+ to invert the values, and call it shrinkage_by_svtype.
+*/
+struct body_details {
+ size_t size; /* Size to allocate */
+ size_t copy; /* Size of structure to copy (may be shorter) */
+ int offset;
+ bool cant_upgrade; /* Can upgrade this type */
+ bool zero_nv; /* zero the NV when upgrading from this */
+ bool arena; /* Allocated from an arena */
+};
+
+#define HADNV FALSE
+#define NONV TRUE
+
+#define HASARENA TRUE
+#define NOARENA FALSE
+
+static const struct body_details bodies_by_type[] = {
+ {0, 0, 0, FALSE, NONV, NOARENA},
+ /* IVs are in the head, so the allocation size is 0 */
+ {0, sizeof(IV), -STRUCT_OFFSET(XPVIV, xiv_iv), FALSE, NONV, NOARENA},
+ /* 8 bytes on most ILP32 with IEEE doubles */
+ {sizeof(NV), sizeof(NV), 0, FALSE, HADNV, HASARENA},
+ /* RVs are in the head now */
+ /* However, this slot is overloaded and used by the pte */
+ {0, 0, 0, FALSE, NONV, NOARENA},
+ /* 8 bytes on most ILP32 with IEEE doubles */
+ {sizeof(xpv_allocated),
+ STRUCT_OFFSET(XPV, xpv_len) + sizeof (((XPV*)SvANY((SV*)0))->xpv_len)
+ + STRUCT_OFFSET(xpv_allocated, xpv_cur) - STRUCT_OFFSET(XPV, xpv_cur),
+ + STRUCT_OFFSET(xpv_allocated, xpv_cur) - STRUCT_OFFSET(XPV, xpv_cur)
+ , FALSE, NONV, HASARENA},
+ /* 12 */
+ {sizeof(xpviv_allocated),
+ STRUCT_OFFSET(XPVIV, xiv_u) + sizeof (((XPVIV*)SvANY((SV*)0))->xiv_u)
+ + STRUCT_OFFSET(xpviv_allocated, xpv_cur) - STRUCT_OFFSET(XPVIV, xpv_cur),
+ + STRUCT_OFFSET(xpviv_allocated, xpv_cur) - STRUCT_OFFSET(XPVIV, xpv_cur)
+ , FALSE, NONV, HASARENA},
+ /* 20 */
+ {sizeof(XPVNV),
+ STRUCT_OFFSET(XPVNV, xiv_u) + sizeof (((XPVNV*)SvANY((SV*)0))->xiv_u),
+ 0, FALSE, HADNV, HASARENA},
+ /* 28 */
+ {sizeof(XPVMG),
+ STRUCT_OFFSET(XPVMG, xmg_stash) + sizeof (((XPVMG*)SvANY((SV*)0))->xmg_stash),
+ 0, FALSE, HADNV, HASARENA},
+ /* 36 */
+ {sizeof(XPVBM), sizeof(XPVBM), 0, TRUE, HADNV, HASARENA},
+ /* 48 */
+ {sizeof(XPVGV), sizeof(XPVGV), 0, TRUE, HADNV, HASARENA},
+ /* 64 */
+ {sizeof(XPVLV), sizeof(XPVLV), 0, TRUE, HADNV, HASARENA},
+ /* 20 */
+ {sizeof(xpvav_allocated),
+ STRUCT_OFFSET(XPVAV, xmg_stash)
+ + sizeof (((XPVAV*)SvANY((SV *)0))->xmg_stash)
+ + STRUCT_OFFSET(xpvav_allocated, xav_fill)
+ - STRUCT_OFFSET(XPVAV, xav_fill),
+ STRUCT_OFFSET(xpvav_allocated, xav_fill)
+ - STRUCT_OFFSET(XPVAV, xav_fill), TRUE, HADNV, HASARENA},
+ /* 20 */
+ {sizeof(xpvhv_allocated),
+ STRUCT_OFFSET(XPVHV, xmg_stash)
+ + sizeof (((XPVHV*)SvANY((SV *)0))->xmg_stash)
+ + STRUCT_OFFSET(xpvhv_allocated, xhv_fill)
+ - STRUCT_OFFSET(XPVHV, xhv_fill),
+ STRUCT_OFFSET(xpvhv_allocated, xhv_fill)
+ - STRUCT_OFFSET(XPVHV, xhv_fill), TRUE, HADNV, HASARENA},
+ /* 76 */
+ {sizeof(XPVCV), sizeof(XPVCV), 0, TRUE, HADNV, HASARENA},
+ /* 80 */
+ {sizeof(XPVFM), sizeof(XPVFM), 0, TRUE, HADNV, NOARENA},
+ /* 84 */
+ {sizeof(XPVIO), sizeof(XPVIO), 0, TRUE, HADNV, NOARENA}
+};
+
+#define new_body_type(sv_type) \
+ (void *)((char *)S_new_body(aTHX_ bodies_by_type[sv_type].size, sv_type)\
+ + bodies_by_type[sv_type].offset)
+
+#define del_body_type(p, sv_type) \
+ del_body(p, &PL_body_roots[sv_type])
+
+
+#define new_body_allocated(sv_type) \
+ (void *)((char *)S_new_body(aTHX_ bodies_by_type[sv_type].size, sv_type)\
+ + bodies_by_type[sv_type].offset)
+
+#define del_body_allocated(p, sv_type) \
+ del_body(p - bodies_by_type[sv_type].offset, &PL_body_roots[sv_type])
-#define del_body_allocated(p,TYPE,lctype,member) \
- del_body((void*)((char*)p + STRUCT_OFFSET(TYPE, member) \
- - STRUCT_OFFSET(lctype ## _allocated, member)), \
- (void**)&PL_ ## lctype ## _root)
#define my_safemalloc(s) (void*)safemalloc(s)
+#define my_safecalloc(s) (void*)safecalloc(s, 1)
#define my_safefree(p) safefree((char*)p)
#ifdef PURIFY
#else /* !PURIFY */
-#define new_XNV() new_body_type(NV, xnv)
-#define del_XNV(p) del_body_type(p, NV, xnv)
+#define new_XNV() new_body_type(SVt_NV)
+#define del_XNV(p) del_body_type(p, SVt_NV)
-#define new_XPV() new_body_allocated(XPV, xpv, xpv_cur)
-#define del_XPV(p) del_body_allocated(p, XPV, xpv, xpv_cur)
+#define new_XPV() new_body_allocated(SVt_PV)
+#define del_XPV(p) del_body_allocated(p, SVt_PV)
-#define new_XPVIV() new_body_allocated(XPVIV, xpviv, xpv_cur)
-#define del_XPVIV(p) del_body_allocated(p, XPVIV, xpviv, xpv_cur)
+#define new_XPVIV() new_body_allocated(SVt_PVIV)
+#define del_XPVIV(p) del_body_allocated(p, SVt_PVIV)
-#define new_XPVNV() new_body_type(XPVNV, xpvnv)
-#define del_XPVNV(p) del_body_type(p, XPVNV, xpvnv)
+#define new_XPVNV() new_body_type(SVt_PVNV)
+#define del_XPVNV(p) del_body_type(p, SVt_PVNV)
-#define new_XPVCV() new_body_type(XPVCV, xpvcv)
-#define del_XPVCV(p) del_body_type(p, XPVCV, xpvcv)
+#define new_XPVCV() new_body_type(SVt_PVCV)
+#define del_XPVCV(p) del_body_type(p, SVt_PVCV)
-#define new_XPVAV() new_body_allocated(XPVAV, xpvav, xav_fill)
-#define del_XPVAV(p) del_body_allocated(p, XPVAV, xpvav, xav_fill)
+#define new_XPVAV() new_body_allocated(SVt_PVAV)
+#define del_XPVAV(p) del_body_allocated(p, SVt_PVAV)
-#define new_XPVHV() new_body_allocated(XPVHV, xpvhv, xhv_fill)
-#define del_XPVHV(p) del_body_allocated(p, XPVHV, xpvhv, xhv_fill)
+#define new_XPVHV() new_body_allocated(SVt_PVHV)
+#define del_XPVHV(p) del_body_allocated(p, SVt_PVHV)
-#define new_XPVMG() new_body_type(XPVMG, xpvmg)
-#define del_XPVMG(p) del_body_type(p, XPVMG, xpvmg)
+#define new_XPVMG() new_body_type(SVt_PVMG)
+#define del_XPVMG(p) del_body_type(p, SVt_PVMG)
-#define new_XPVGV() new_body_type(XPVGV, xpvgv)
-#define del_XPVGV(p) del_body_type(p, XPVGV, xpvgv)
+#define new_XPVGV() new_body_type(SVt_PVGV)
+#define del_XPVGV(p) del_body_type(p, SVt_PVGV)
-#define new_XPVLV() new_body_type(XPVLV, xpvlv)
-#define del_XPVLV(p) del_body_type(p, XPVLV, xpvlv)
+#define new_XPVLV() new_body_type(SVt_PVLV)
+#define del_XPVLV(p) del_body_type(p, SVt_PVLV)
-#define new_XPVBM() new_body_type(XPVBM, xpvbm)
-#define del_XPVBM(p) del_body_type(p, XPVBM, xpvbm)
+#define new_XPVBM() new_body_type(SVt_PVBM)
+#define del_XPVBM(p) del_body_type(p, SVt_PVBM)
#endif /* PURIFY */
+/* no arena for you! */
+
+#define new_NOARENA(details) \
+ my_safemalloc((details)->size - (details)->offset)
+#define new_NOARENAZ(details) \
+ my_safecalloc((details)->size - (details)->offset)
+
#define new_XPVFM() my_safemalloc(sizeof(XPVFM))
#define del_XPVFM(p) my_safefree(p)
#define new_XPVIO() my_safemalloc(sizeof(XPVIO))
#define del_XPVIO(p) my_safefree(p)
+
+
/*
=for apidoc sv_upgrade
*/
void
-Perl_sv_upgrade(pTHX_ register SV *sv, U32 mt)
+Perl_sv_upgrade(pTHX_ register SV *sv, U32 new_type)
{
- void** old_body_arena;
- size_t old_body_offset;
- size_t old_body_length; /* Well, the length to copy. */
void* old_body;
-#ifndef NV_ZERO_IS_ALLBITS_ZERO
- /* If NV 0.0 is store as all bits 0 then Zero() already creates a correct
- 0.0 for us. */
- bool zero_nv = TRUE;
-#endif
void* new_body;
- size_t new_body_length;
- size_t new_body_offset;
- void** new_body_arena;
- void** new_body_arenaroot;
const U32 old_type = SvTYPE(sv);
+ const struct body_details *const old_type_details
+ = bodies_by_type + old_type;
+ const struct body_details *new_type_details = bodies_by_type + new_type;
- if (mt != SVt_PV && SvIsCOW(sv)) {
+ if (new_type != SVt_PV && SvIsCOW(sv)) {
sv_force_normal_flags(sv, 0);
}
- if (SvTYPE(sv) == mt)
+ if (old_type == new_type)
return;
- if (SvTYPE(sv) > mt)
+ if (old_type > new_type)
Perl_croak(aTHX_ "sv_upgrade from type %d down to type %d",
- (int)SvTYPE(sv), (int)mt);
+ (int)old_type, (int)new_type);
old_body = SvANY(sv);
- old_body_arena = 0;
- old_body_offset = 0;
- old_body_length = 0;
- new_body_offset = 0;
- new_body_length = ~0;
/* Copying structures onto other structures that have been neatly zeroed
has a subtle gotcha. Consider XPVMG
So we are careful and work out the size of used parts of all the
structures. */
- switch (SvTYPE(sv)) {
+ switch (old_type) {
case SVt_NULL:
break;
case SVt_IV:
- if (mt == SVt_NV)
- mt = SVt_PVNV;
- else if (mt < SVt_PVIV)
- mt = SVt_PVIV;
- old_body_offset = STRUCT_OFFSET(XPVIV, xiv_iv);
- old_body_length = sizeof(IV);
+ if (new_type < SVt_PVIV) {
+ new_type = (new_type == SVt_NV)
+ ? SVt_PVNV : SVt_PVIV;
+ new_type_details = bodies_by_type + new_type;
+ }
break;
case SVt_NV:
- old_body_arena = (void **) &PL_xnv_root;
- old_body_length = sizeof(NV);
-#ifndef NV_ZERO_IS_ALLBITS_ZERO
- zero_nv = FALSE;
-#endif
- if (mt < SVt_PVNV)
- mt = SVt_PVNV;
+ if (new_type < SVt_PVNV) {
+ new_type = SVt_PVNV;
+ new_type_details = bodies_by_type + new_type;
+ }
break;
case SVt_RV:
break;
case SVt_PV:
- old_body_arena = (void **) &PL_xpv_root;
- old_body_offset = STRUCT_OFFSET(XPV, xpv_cur)
- - STRUCT_OFFSET(xpv_allocated, xpv_cur);
- old_body_length = STRUCT_OFFSET(XPV, xpv_len)
- + sizeof (((XPV*)SvANY(sv))->xpv_len)
- - old_body_offset;
- if (mt <= SVt_IV)
- mt = SVt_PVIV;
- else if (mt == SVt_NV)
- mt = SVt_PVNV;
+ assert(new_type > SVt_PV);
+ assert(SVt_IV < SVt_PV);
+ assert(SVt_NV < SVt_PV);
break;
case SVt_PVIV:
- old_body_arena = (void **) &PL_xpviv_root;
- old_body_offset = STRUCT_OFFSET(XPVIV, xpv_cur)
- - STRUCT_OFFSET(xpviv_allocated, xpv_cur);
- old_body_length = STRUCT_OFFSET(XPVIV, xiv_u)
- + sizeof (((XPVIV*)SvANY(sv))->xiv_u)
- - old_body_offset;
break;
case SVt_PVNV:
- old_body_arena = (void **) &PL_xpvnv_root;
- old_body_length = STRUCT_OFFSET(XPVNV, xiv_u)
- + sizeof (((XPVNV*)SvANY(sv))->xiv_u);
-#ifndef NV_ZERO_IS_ALLBITS_ZERO
- zero_nv = FALSE;
-#endif
break;
case SVt_PVMG:
/* Because the XPVMG of PL_mess_sv isn't allocated from the arena,
Given that it only has meaning inside the pad, it shouldn't be set
on anything that can get upgraded. */
assert((SvFLAGS(sv) & SVpad_TYPED) == 0);
- old_body_arena = (void **) &PL_xpvmg_root;
- old_body_length = STRUCT_OFFSET(XPVMG, xmg_stash)
- + sizeof (((XPVMG*)SvANY(sv))->xmg_stash);
-#ifndef NV_ZERO_IS_ALLBITS_ZERO
- zero_nv = FALSE;
-#endif
break;
default:
- Perl_croak(aTHX_ "Can't upgrade that kind of scalar");
+ if (old_type_details->cant_upgrade)
+ Perl_croak(aTHX_ "Can't upgrade that kind of scalar");
}
SvFLAGS(sv) &= ~SVTYPEMASK;
- SvFLAGS(sv) |= mt;
+ SvFLAGS(sv) |= new_type;
- switch (mt) {
+ switch (new_type) {
case SVt_NULL:
Perl_croak(aTHX_ "Can't upgrade to undef");
case SVt_IV:
}
break;
+
+ case SVt_PVIV:
+ /* XXX Is this still needed? Was it ever needed? Surely as there is
+ no route from NV to PVIV, NOK can never be true */
+ assert(!SvNOKp(sv));
+ assert(!SvNOK(sv));
case SVt_PVIO:
- new_body = new_XPVIO();
- new_body_length = sizeof(XPVIO);
- goto zero;
case SVt_PVFM:
- new_body = new_XPVFM();
- new_body_length = sizeof(XPVFM);
- goto zero;
-
case SVt_PVBM:
- new_body_length = sizeof(XPVBM);
- new_body_arena = (void **) &PL_xpvbm_root;
- new_body_arenaroot = (void **) &PL_xpvbm_arenaroot;
- goto new_body;
case SVt_PVGV:
- new_body_length = sizeof(XPVGV);
- new_body_arena = (void **) &PL_xpvgv_root;
- new_body_arenaroot = (void **) &PL_xpvgv_arenaroot;
- goto new_body;
case SVt_PVCV:
- new_body_length = sizeof(XPVCV);
- new_body_arena = (void **) &PL_xpvcv_root;
- new_body_arenaroot = (void **) &PL_xpvcv_arenaroot;
- goto new_body;
case SVt_PVLV:
- new_body_length = sizeof(XPVLV);
- new_body_arena = (void **) &PL_xpvlv_root;
- new_body_arenaroot = (void **) &PL_xpvlv_arenaroot;
- goto new_body;
case SVt_PVMG:
- new_body_length = sizeof(XPVMG);
- new_body_arena = (void **) &PL_xpvmg_root;
- new_body_arenaroot = (void **) &PL_xpvmg_arenaroot;
- goto new_body;
case SVt_PVNV:
- new_body_length = sizeof(XPVNV);
- new_body_arena = (void **) &PL_xpvnv_root;
- new_body_arenaroot = (void **) &PL_xpvnv_arenaroot;
- goto new_body;
- case SVt_PVIV:
- new_body_offset = STRUCT_OFFSET(XPVIV, xpv_cur)
- - STRUCT_OFFSET(xpviv_allocated, xpv_cur);
- new_body_length = sizeof(XPVIV) - new_body_offset;
- new_body_arena = (void **) &PL_xpviv_root;
- new_body_arenaroot = (void **) &PL_xpviv_arenaroot;
- /* XXX Is this still needed? Was it ever needed? Surely as there is
- no route from NV to PVIV, NOK can never be true */
- if (SvNIOK(sv))
- (void)SvIOK_on(sv);
- SvNOK_off(sv);
- goto new_body_no_NV;
case SVt_PV:
- new_body_offset = STRUCT_OFFSET(XPV, xpv_cur)
- - STRUCT_OFFSET(xpv_allocated, xpv_cur);
- new_body_length = sizeof(XPV) - new_body_offset;
- new_body_arena = (void **) &PL_xpv_root;
- new_body_arenaroot = (void **) &PL_xpv_arenaroot;
- new_body_no_NV:
- /* PV and PVIV don't have an NV slot. */
-#ifndef NV_ZERO_IS_ALLBITS_ZERO
- zero_nv = FALSE;
-#endif
- new_body:
- assert(new_body_length);
+ assert(new_type_details->size);
#ifndef PURIFY
- /* This points to the start of the allocated area. */
- new_body_inline(new_body, new_body_arenaroot, new_body_arena,
- new_body_length);
+ if(new_type_details->arena) {
+ /* This points to the start of the allocated area. */
+ new_body_inline(new_body, new_type_details->size, new_type);
+ Zero(new_body, new_type_details->size, char);
+ new_body = ((char *)new_body) + new_type_details->offset;
+ } else {
+ new_body = new_NOARENAZ(new_type_details);
+ }
#else
/* We always allocated the full length item with PURIFY */
- new_body_length += new_body_offset;
- new_body_offset = 0;
- new_body = my_safemalloc(new_body_length);
-
+ new_body = new_NOARENAZ(new_type_details);
#endif
- zero:
- Zero(new_body, new_body_length, char);
- new_body = ((char *)new_body) - new_body_offset;
SvANY(sv) = new_body;
- if (old_body_length) {
- Copy((char *)old_body + old_body_offset,
- (char *)new_body + old_body_offset,
- old_body_length, char);
+ if (old_type_details->copy) {
+ Copy((char *)old_body - old_type_details->offset,
+ (char *)new_body - old_type_details->offset,
+ old_type_details->copy, char);
}
#ifndef NV_ZERO_IS_ALLBITS_ZERO
- if (zero_nv)
+ /* If NV 0.0 is store as all bits 0 then Zero() already creates a correct
+ 0.0 for us. */
+ if (old_type_details->zero_nv)
SvNV_set(sv, 0);
#endif
- if (mt == SVt_PVIO)
+ if (new_type == SVt_PVIO)
IoPAGE_LEN(sv) = 60;
if (old_type < SVt_RV)
SvPV_set(sv, 0);
break;
default:
- Perl_croak(aTHX_ "panic: sv_upgrade to unknown type %lu", mt);
+ Perl_croak(aTHX_ "panic: sv_upgrade to unknown type %lu", new_type);
}
-
- if (old_body_arena) {
+ if (old_type_details->size) {
+ /* If the old body had an allocated size, then we need to free it. */
#ifdef PURIFY
my_safefree(old_body);
#else
- del_body((void*)((char*)old_body + old_body_offset),
- old_body_arena);
+ del_body((void*)((char*)old_body - old_type_details->offset),
+ &PL_body_roots[old_type]);
#endif
}
}
}
/*
- * =for apidoc sv_2pvutf8
- *
- * Return a pointer to the UTF-8-encoded representation of the SV, and set *lp
- * to its length. May cause the SV to be upgraded to UTF-8 as a side-effect.
- *
- * Usually accessed via the C<SvPVutf8> macro.
- *
- * =cut
- * */
+=for apidoc sv_2pvutf8
+
+Return a pointer to the UTF-8-encoded representation of the SV, and set *lp
+to its length. May cause the SV to be upgraded to UTF-8 as a side-effect.
+
+Usually accessed via the C<SvPVutf8> macro.
+
+=cut
+*/
char *
Perl_sv_2pvutf8(pTHX_ register SV *sv, STRLEN *lp)
{
- sv_utf8_upgrade(sv);
- return lp ? SvPV(sv,*lp) : SvPV_nolen(sv);
+ sv_utf8_upgrade(sv);
+ return lp ? SvPV(sv,*lp) : SvPV_nolen(sv);
}
Perl_sv_clear(pTHX_ register SV *sv)
{
dVAR;
- void** old_body_arena;
- size_t old_body_offset;
const U32 type = SvTYPE(sv);
+ const struct body_details *const sv_type_details
+ = bodies_by_type + type;
assert(sv);
assert(SvREFCNT(sv) == 0);
if (type <= SVt_IV)
return;
- old_body_arena = 0;
- old_body_offset = 0;
-
if (SvOBJECT(sv)) {
if (PL_defstash) { /* Still have a symbol table? */
dSP;
Safefree(IoTOP_NAME(sv));
Safefree(IoFMT_NAME(sv));
Safefree(IoBOTTOM_NAME(sv));
- /* PVIOs aren't from arenas */
goto freescalar;
case SVt_PVBM:
- old_body_arena = (void **) &PL_xpvbm_root;
goto freescalar;
case SVt_PVCV:
- old_body_arena = (void **) &PL_xpvcv_root;
case SVt_PVFM:
- /* PVFMs aren't from arenas */
cv_undef((CV*)sv);
goto freescalar;
case SVt_PVHV:
hv_undef((HV*)sv);
- old_body_arena = (void **) &PL_xpvhv_root;
- old_body_offset = STRUCT_OFFSET(XPVHV, xhv_fill);
break;
case SVt_PVAV:
av_undef((AV*)sv);
- old_body_arena = (void **) &PL_xpvav_root;
- old_body_offset = STRUCT_OFFSET(XPVAV, xav_fill);
break;
case SVt_PVLV:
if (LvTYPE(sv) == 'T') { /* for tie: return HE to pool */
}
else if (LvTYPE(sv) != 't') /* unless tie: unrefcnted fake SV** */
SvREFCNT_dec(LvTARG(sv));
- old_body_arena = (void **) &PL_xpvlv_root;
goto freescalar;
case SVt_PVGV:
gp_free((GV*)sv);
have a back reference to us, which needs to be cleared. */
if (GvSTASH(sv))
sv_del_backref((SV*)GvSTASH(sv), sv);
- old_body_arena = (void **) &PL_xpvgv_root;
- goto freescalar;
case SVt_PVMG:
- old_body_arena = (void **) &PL_xpvmg_root;
- goto freescalar;
case SVt_PVNV:
- old_body_arena = (void **) &PL_xpvnv_root;
- goto freescalar;
case SVt_PVIV:
- old_body_arena = (void **) &PL_xpviv_root;
- old_body_offset = STRUCT_OFFSET(XPVIV, xpv_cur);
freescalar:
/* Don't bother with SvOOK_off(sv); as we're only going to free it. */
if (SvOOK(sv)) {
SvPV_set(sv, SvPVX_mutable(sv) - SvIVX(sv));
/* Don't even bother with turning off the OOK flag. */
}
- goto pvrv_common;
case SVt_PV:
- old_body_arena = (void **) &PL_xpv_root;
- old_body_offset = STRUCT_OFFSET(XPV, xpv_cur);
case SVt_RV:
- pvrv_common:
if (SvROK(sv)) {
SV *target = SvRV(sv);
if (SvWEAKREF(sv))
#endif
break;
case SVt_NV:
- old_body_arena = (void **) &PL_xnv_root;
break;
}
SvFLAGS(sv) |= SVTYPEMASK;
#ifndef PURIFY
- if (old_body_arena) {
- del_body(((char *)SvANY(sv) + old_body_offset), old_body_arena);
+ if (sv_type_details->arena) {
+ del_body(((char *)SvANY(sv) - sv_type_details->offset),
+ &PL_body_roots[type]);
+ }
+ else if (sv_type_details->size) {
+ my_safefree(SvANY(sv));
+ }
+#else
+ if (sv_type_details->size) {
+ my_safefree(SvANY(sv));
}
- else
#endif
- if (type > SVt_RV) {
- my_safefree(SvANY(sv));
- }
}
/*
{
q++; /* skip past the rest of the %vd format */
eptr = (const char *) vecstr;
- elen = strlen(eptr);
+ elen = veclen;
vectorize=FALSE;
goto string;
}
if (mg->mg_type == PERL_MAGIC_overload_table &&
AMT_AMAGIC((AMT*)mg->mg_ptr))
{
- AMT *amtp = (AMT*)mg->mg_ptr;
- AMT *namtp = (AMT*)nmg->mg_ptr;
+ AMT * const amtp = (AMT*)mg->mg_ptr;
+ AMT * const namtp = (AMT*)nmg->mg_ptr;
I32 i;
for (i = 1; i < NofAMmeth; i++) {
namtp->table[i] = cv_dup_inc(amtp->table[i], param);
# define PTR_TABLE_HASH(ptr) (PTR2UV(ptr) >> 2)
#endif
-#define del_pte(p) del_body_type(p, struct ptr_tbl_ent, pte)
+/*
+ we use the PTE_SVSLOT 'reservation' made above, both here (in the
+ following define) and at call to new_body_inline made below in
+ Perl_ptr_table_store()
+ */
+
+#define del_pte(p) del_body_type(p, PTE_SVSLOT)
/* map an existing pointer using a table */
return;
}
}
- new_body_inline(tblent, (void**)&PL_pte_arenaroot, (void**)&PL_pte_root,
- sizeof(struct ptr_tbl_ent));
+ new_body_inline(tblent, sizeof(struct ptr_tbl_ent), PTE_SVSLOT);
tblent->oldval = oldsv;
tblent->newval = newsv;
tblent->next = *otblent;
default:
{
/* These are all the types that need complex bodies allocating. */
- size_t new_body_length;
- size_t new_body_offset = 0;
- void **new_body_arena;
- void **new_body_arenaroot;
void *new_body;
+ const svtype sv_type = SvTYPE(sstr);
+ const struct body_details *const sv_type_details
+ = bodies_by_type + sv_type;
- switch (SvTYPE(sstr)) {
+ switch (sv_type) {
default:
Perl_croak(aTHX_ "Bizarre SvTYPE [%" IVdf "]",
(IV)SvTYPE(sstr));
break;
+ case SVt_PVGV:
+ if (GvUNIQUE((GV*)sstr)) {
+ /* Do sharing here, and fall through */
+ }
case SVt_PVIO:
- new_body = new_XPVIO();
- new_body_length = sizeof(XPVIO);
- break;
case SVt_PVFM:
- new_body = new_XPVFM();
- new_body_length = sizeof(XPVFM);
- break;
-
case SVt_PVHV:
- new_body_arena = (void **) &PL_xpvhv_root;
- new_body_arenaroot = (void **) &PL_xpvhv_arenaroot;
- new_body_offset = STRUCT_OFFSET(XPVHV, xhv_fill)
- - STRUCT_OFFSET(xpvhv_allocated, xhv_fill);
- new_body_length = STRUCT_OFFSET(XPVHV, xmg_stash)
- + sizeof (((XPVHV*)SvANY(sstr))->xmg_stash)
- - new_body_offset;
- goto new_body;
case SVt_PVAV:
- new_body_arena = (void **) &PL_xpvav_root;
- new_body_arenaroot = (void **) &PL_xpvav_arenaroot;
- new_body_offset = STRUCT_OFFSET(XPVAV, xav_fill)
- - STRUCT_OFFSET(xpvav_allocated, xav_fill);
- new_body_length = STRUCT_OFFSET(XPVHV, xmg_stash)
- + sizeof (((XPVHV*)SvANY(sstr))->xmg_stash)
- - new_body_offset;
- goto new_body;
case SVt_PVBM:
- new_body_length = sizeof(XPVBM);
- new_body_arena = (void **) &PL_xpvbm_root;
- new_body_arenaroot = (void **) &PL_xpvbm_arenaroot;
- goto new_body;
- case SVt_PVGV:
- if (GvUNIQUE((GV*)sstr)) {
- /* Do sharing here. */
- }
- new_body_length = sizeof(XPVGV);
- new_body_arena = (void **) &PL_xpvgv_root;
- new_body_arenaroot = (void **) &PL_xpvgv_arenaroot;
- goto new_body;
case SVt_PVCV:
- new_body_length = sizeof(XPVCV);
- new_body_arena = (void **) &PL_xpvcv_root;
- new_body_arenaroot = (void **) &PL_xpvcv_arenaroot;
- goto new_body;
case SVt_PVLV:
- new_body_length = sizeof(XPVLV);
- new_body_arena = (void **) &PL_xpvlv_root;
- new_body_arenaroot = (void **) &PL_xpvlv_arenaroot;
- goto new_body;
case SVt_PVMG:
- new_body_length = sizeof(XPVMG);
- new_body_arena = (void **) &PL_xpvmg_root;
- new_body_arenaroot = (void **) &PL_xpvmg_arenaroot;
- goto new_body;
case SVt_PVNV:
- new_body_length = sizeof(XPVNV);
- new_body_arena = (void **) &PL_xpvnv_root;
- new_body_arenaroot = (void **) &PL_xpvnv_arenaroot;
- goto new_body;
case SVt_PVIV:
- new_body_offset = STRUCT_OFFSET(XPVIV, xpv_cur)
- - STRUCT_OFFSET(xpviv_allocated, xpv_cur);
- new_body_length = sizeof(XPVIV) - new_body_offset;
- new_body_arena = (void **) &PL_xpviv_root;
- new_body_arenaroot = (void **) &PL_xpviv_arenaroot;
- goto new_body;
case SVt_PV:
- new_body_offset = STRUCT_OFFSET(XPV, xpv_cur)
- - STRUCT_OFFSET(xpv_allocated, xpv_cur);
- new_body_length = sizeof(XPV) - new_body_offset;
- new_body_arena = (void **) &PL_xpv_root;
- new_body_arenaroot = (void **) &PL_xpv_arenaroot;
- new_body:
- assert(new_body_length);
+ assert(sv_type_details->copy);
#ifndef PURIFY
- new_body_inline(new_body, new_body_arenaroot, new_body_arena,
- new_body_length);
- new_body = (void*)((char*)new_body - new_body_offset);
+ if (sv_type_details->arena) {
+ new_body_inline(new_body, sv_type_details->copy, sv_type);
+ new_body
+ = (void*)((char*)new_body + sv_type_details->offset);
+ } else {
+ new_body = new_NOARENA(sv_type_details);
+ }
#else
/* We always allocated the full length item with PURIFY */
- new_body_length += new_body_offset;
- new_body_offset = 0;
- new_body = my_safemalloc(new_body_length);
+ new_body = new_NOARENA(sv_type_details);
#endif
}
assert(new_body);
SvANY(dstr) = new_body;
- Copy(((char*)SvANY(sstr)) + new_body_offset,
- ((char*)SvANY(dstr)) + new_body_offset,
- new_body_length, char);
+#ifndef PURIFY
+ Copy(((char*)SvANY(sstr)) - sv_type_details->offset,
+ ((char*)SvANY(dstr)) - sv_type_details->offset,
+ sv_type_details->copy, char);
+#else
+ Copy(((char*)SvANY(sstr)),
+ ((char*)SvANY(dstr)),
+ sv_type_details->size - sv_type_details->offset, char);
+#endif
- if (SvTYPE(sstr) != SVt_PVAV && SvTYPE(sstr) != SVt_PVHV)
+ if (sv_type != SVt_PVAV && sv_type != SVt_PVHV)
Perl_rvpv_dup(aTHX_ dstr, sstr, param);
/* The Copy above means that all the source (unduplicated) pointers
pointers in either, but it's possible that there's less cache
missing by always going for the destination.
FIXME - instrument and check that assumption */
- if (SvTYPE(sstr) >= SVt_PVMG) {
+ if (sv_type >= SVt_PVMG) {
if (SvMAGIC(dstr))
SvMAGIC_set(dstr, mg_dup(SvMAGIC(dstr), param));
if (SvSTASH(dstr))
SvSTASH_set(dstr, hv_dup_inc(SvSTASH(dstr), param));
}
- switch (SvTYPE(sstr)) {
+ /* The cast silences a GCC warning about unhandled types. */
+ switch ((int)sv_type) {
case SVt_PV:
break;
case SVt_PVIV:
param->flags = flags;
param->proto_perl = proto_perl;
- /* arena roots */
- PL_xnv_arenaroot = NULL;
- PL_xnv_root = NULL;
- PL_xpv_arenaroot = NULL;
- PL_xpv_root = NULL;
- PL_xpviv_arenaroot = NULL;
- PL_xpviv_root = NULL;
- PL_xpvnv_arenaroot = NULL;
- PL_xpvnv_root = NULL;
- PL_xpvcv_arenaroot = NULL;
- PL_xpvcv_root = NULL;
- PL_xpvav_arenaroot = NULL;
- PL_xpvav_root = NULL;
- PL_xpvhv_arenaroot = NULL;
- PL_xpvhv_root = NULL;
- PL_xpvmg_arenaroot = NULL;
- PL_xpvmg_root = NULL;
- PL_xpvgv_arenaroot = NULL;
- PL_xpvgv_root = NULL;
- PL_xpvlv_arenaroot = NULL;
- PL_xpvlv_root = NULL;
- PL_xpvbm_arenaroot = NULL;
- PL_xpvbm_root = NULL;
+ Zero(&PL_body_arenaroots, 1, PL_body_arenaroots);
+ Zero(&PL_body_roots, 1, PL_body_roots);
+
PL_he_arenaroot = NULL;
PL_he_root = NULL;
-#if defined(USE_ITHREADS)
- PL_pte_arenaroot = NULL;
- PL_pte_root = NULL;
-#endif
+
PL_nice_chunk = NULL;
PL_nice_chunk_size = 0;
PL_sv_count = 0;