#define ASSERT_UTF8_CACHE(cache) NOOP
#endif
-#ifdef PERL_COPY_ON_WRITE
+#ifdef PERL_OLD_COPY_ON_WRITE
#define SV_COW_NEXT_SV(sv) INT2PTR(SV *,SvUVX(sv))
#define SV_COW_NEXT_SV_SET(current,next) SvUV_set(current, PTR2UV(next))
/* This is a pessimistic view. Scalar must be purely a read-write PV to copy-
SV* sva;
SV* svanext;
void *arena, *arenanext;
+ int i;
+ void **arenaroots[] = {
+ (void**) &PL_xnv_arenaroot,
+ (void**) &PL_xpv_arenaroot,
+ (void**) &PL_xpviv_arenaroot,
+ (void**) &PL_xpvnv_arenaroot,
+ (void**) &PL_xpvcv_arenaroot,
+ (void**) &PL_xpvav_arenaroot,
+ (void**) &PL_xpvhv_arenaroot,
+ (void**) &PL_xpvmg_arenaroot,
+ (void**) &PL_xpvgv_arenaroot,
+ (void**) &PL_xpvlv_arenaroot,
+ (void**) &PL_xpvbm_arenaroot,
+ (void**) 0
+ };
+ void **roots[] = {
+ (void**) &PL_xnv_root,
+ (void**) &PL_xpv_root,
+ (void**) &PL_xpviv_root,
+ (void**) &PL_xpvnv_root,
+ (void**) &PL_xpvcv_root,
+ (void**) &PL_xpvav_root,
+ (void**) &PL_xpvhv_root,
+ (void**) &PL_xpvmg_root,
+ (void**) &PL_xpvgv_root,
+ (void**) &PL_xpvlv_root,
+ (void**) &PL_xpvbm_root,
+ (void**) 0
+ };
/* Free arenas here, but be careful about fake ones. (We assume
contiguity of the fake ones with the corresponding real ones.) */
if (!SvFAKE(sva))
Safefree(sva);
}
+
+ assert(sizeof(arenaroots) == sizeof(roots));
- for (arena = PL_xnv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xnv_arenaroot = 0;
- PL_xnv_root = 0;
-
- for (arena = PL_xpv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpv_arenaroot = 0;
- PL_xpv_root = 0;
-
- for (arena = PL_xpviv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpviv_arenaroot = 0;
- PL_xpviv_root = 0;
-
- for (arena = PL_xpvnv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpvnv_arenaroot = 0;
- PL_xpvnv_root = 0;
-
- for (arena = PL_xpvcv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpvcv_arenaroot = 0;
- PL_xpvcv_root = 0;
-
- for (arena = PL_xpvav_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpvav_arenaroot = 0;
- PL_xpvav_root = 0;
-
- for (arena = PL_xpvhv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpvhv_arenaroot = 0;
- PL_xpvhv_root = 0;
-
- for (arena = PL_xpvmg_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpvmg_arenaroot = 0;
- PL_xpvmg_root = 0;
-
- for (arena = PL_xpvgv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpvgv_arenaroot = 0;
- PL_xpvgv_root = 0;
+ for (i=0; arenaroots[i]; i++) {
- for (arena = PL_xpvlv_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
- }
- PL_xpvlv_arenaroot = 0;
- PL_xpvlv_root = 0;
-
- for (arena = PL_xpvbm_arenaroot; arena; arena = arenanext) {
- arenanext = *(void **)arena;
- Safefree(arena);
+ arena = *arenaroots[i];
+ for (; arena; arena = arenanext) {
+ arenanext = *(void **)arena;
+ Safefree(arena);
+ }
+ *arenaroots[i] = 0;
+ *roots[i] = 0;
}
- PL_xpvbm_arenaroot = 0;
- PL_xpvbm_root = 0;
{
HE *he;
else {
U32 u;
CV *cv = find_runcv(&u);
- STRLEN len;
- const char *str;
if (!cv || !CvPADLIST(cv))
return Nullsv;;
av = (AV*)(*av_fetch(CvPADLIST(cv), 0, FALSE));
sv = *av_fetch(av, targ, FALSE);
/* SvLEN in a pad name is not to be trusted */
- str = SvPV(sv,len);
- sv_setpvn(name, str, len);
+ sv_setpv(name, SvPV_nolen_const(sv));
}
if (subscript_type == FUV_SUBSCRIPT_HASH) {
sv_insert(varname, 0, 0, " ", 1);
}
Perl_warner(aTHX_ packWARN(WARN_UNINITIALIZED), PL_warn_uninit,
- varname ? SvPV_nolen(varname) : "",
+ varname ? SvPV_nolen_const(varname) : "",
" in ", OP_DESC(PL_op));
}
else
"", "", "");
}
-/* allocate another arena's worth of NV bodies */
-
-STATIC void
-S_more_xnv(pTHX)
-{
- NV* xnv;
- NV* xnvend;
- void *ptr;
- New(711, ptr, PERL_ARENA_SIZE/sizeof(NV), NV);
- *((void **) ptr) = (void *)PL_xnv_arenaroot;
- PL_xnv_arenaroot = ptr;
-
- xnv = (NV*) ptr;
- xnvend = &xnv[PERL_ARENA_SIZE / sizeof(NV) - 1];
- xnv += (sizeof(XPVIV) - 1) / sizeof(NV) + 1; /* fudge by sizeof XPVIV */
- PL_xnv_root = xnv;
- while (xnv < xnvend) {
- *(NV**)xnv = (NV*)(xnv + 1);
- xnv++;
- }
- *(NV**)xnv = 0;
-}
-
-/* allocate another arena's worth of struct xpv */
-
-STATIC void
-S_more_xpv(pTHX)
-{
- xpv_allocated* xpv;
- xpv_allocated* xpvend;
- New(713, xpv, PERL_ARENA_SIZE/sizeof(xpv_allocated), xpv_allocated);
- *((xpv_allocated**)xpv) = PL_xpv_arenaroot;
- PL_xpv_arenaroot = xpv;
-
- xpvend = &xpv[PERL_ARENA_SIZE / sizeof(xpv_allocated) - 1];
- PL_xpv_root = ++xpv;
- while (xpv < xpvend) {
- *((xpv_allocated**)xpv) = xpv + 1;
- xpv++;
- }
- *((xpv_allocated**)xpv) = 0;
-}
-
-/* allocate another arena's worth of struct xpviv */
-
-STATIC void
-S_more_xpviv(pTHX)
-{
- xpviv_allocated* xpviv;
- xpviv_allocated* xpvivend;
- New(713, xpviv, PERL_ARENA_SIZE/sizeof(xpviv_allocated), xpviv_allocated);
- *((xpviv_allocated**)xpviv) = PL_xpviv_arenaroot;
- PL_xpviv_arenaroot = xpviv;
-
- xpvivend = &xpviv[PERL_ARENA_SIZE / sizeof(xpviv_allocated) - 1];
- PL_xpviv_root = ++xpviv;
- while (xpviv < xpvivend) {
- *((xpviv_allocated**)xpviv) = xpviv + 1;
- xpviv++;
- }
- *((xpviv_allocated**)xpviv) = 0;
-}
-
-/* allocate another arena's worth of struct xpvnv */
-
-STATIC void
-S_more_xpvnv(pTHX)
-{
- XPVNV* xpvnv;
- XPVNV* xpvnvend;
- New(715, xpvnv, PERL_ARENA_SIZE/sizeof(XPVNV), XPVNV);
- *((XPVNV**)xpvnv) = PL_xpvnv_arenaroot;
- PL_xpvnv_arenaroot = xpvnv;
-
- xpvnvend = &xpvnv[PERL_ARENA_SIZE / sizeof(XPVNV) - 1];
- PL_xpvnv_root = ++xpvnv;
- while (xpvnv < xpvnvend) {
- *((XPVNV**)xpvnv) = xpvnv + 1;
- xpvnv++;
- }
- *((XPVNV**)xpvnv) = 0;
-}
-
-/* allocate another arena's worth of struct xpvcv */
-
-STATIC void
-S_more_xpvcv(pTHX)
+STATIC void *
+S_more_bodies (pTHX_ void **arena_root, void **root, size_t size)
{
- XPVCV* xpvcv;
- XPVCV* xpvcvend;
- New(716, xpvcv, PERL_ARENA_SIZE/sizeof(XPVCV), XPVCV);
- *((XPVCV**)xpvcv) = PL_xpvcv_arenaroot;
- PL_xpvcv_arenaroot = xpvcv;
-
- xpvcvend = &xpvcv[PERL_ARENA_SIZE / sizeof(XPVCV) - 1];
- PL_xpvcv_root = ++xpvcv;
- while (xpvcv < xpvcvend) {
- *((XPVCV**)xpvcv) = xpvcv + 1;
- xpvcv++;
- }
- *((XPVCV**)xpvcv) = 0;
-}
-
-/* allocate another arena's worth of struct xpvav */
-
-STATIC void
-S_more_xpvav(pTHX)
-{
- xpvav_allocated* xpvav;
- xpvav_allocated* xpvavend;
- New(717, xpvav, PERL_ARENA_SIZE/sizeof(xpvav_allocated),
- xpvav_allocated);
- *((xpvav_allocated**)xpvav) = PL_xpvav_arenaroot;
- PL_xpvav_arenaroot = xpvav;
-
- xpvavend = &xpvav[PERL_ARENA_SIZE / sizeof(xpvav_allocated) - 1];
- PL_xpvav_root = ++xpvav;
- while (xpvav < xpvavend) {
- *((xpvav_allocated**)xpvav) = xpvav + 1;
- xpvav++;
- }
- *((xpvav_allocated**)xpvav) = 0;
-}
-
-/* allocate another arena's worth of struct xpvhv */
-
-STATIC void
-S_more_xpvhv(pTHX)
-{
- xpvhv_allocated* xpvhv;
- xpvhv_allocated* xpvhvend;
- New(718, xpvhv, PERL_ARENA_SIZE/sizeof(xpvhv_allocated),
- xpvhv_allocated);
- *((xpvhv_allocated**)xpvhv) = PL_xpvhv_arenaroot;
- PL_xpvhv_arenaroot = xpvhv;
-
- xpvhvend = &xpvhv[PERL_ARENA_SIZE / sizeof(xpvhv_allocated) - 1];
- PL_xpvhv_root = ++xpvhv;
- while (xpvhv < xpvhvend) {
- *((xpvhv_allocated**)xpvhv) = xpvhv + 1;
- xpvhv++;
- }
- *((xpvhv_allocated**)xpvhv) = 0;
-}
-
-/* allocate another arena's worth of struct xpvmg */
-
-STATIC void
-S_more_xpvmg(pTHX)
-{
- XPVMG* xpvmg;
- XPVMG* xpvmgend;
- New(719, xpvmg, PERL_ARENA_SIZE/sizeof(XPVMG), XPVMG);
- *((XPVMG**)xpvmg) = PL_xpvmg_arenaroot;
- PL_xpvmg_arenaroot = xpvmg;
-
- xpvmgend = &xpvmg[PERL_ARENA_SIZE / sizeof(XPVMG) - 1];
- PL_xpvmg_root = ++xpvmg;
- while (xpvmg < xpvmgend) {
- *((XPVMG**)xpvmg) = xpvmg + 1;
- xpvmg++;
- }
- *((XPVMG**)xpvmg) = 0;
-}
-
-/* allocate another arena's worth of struct xpvgv */
-
-STATIC void
-S_more_xpvgv(pTHX)
-{
- XPVGV* xpvgv;
- XPVGV* xpvgvend;
- New(720, xpvgv, PERL_ARENA_SIZE/sizeof(XPVGV), XPVGV);
- *((XPVGV**)xpvgv) = PL_xpvgv_arenaroot;
- PL_xpvgv_arenaroot = xpvgv;
-
- xpvgvend = &xpvgv[PERL_ARENA_SIZE / sizeof(XPVGV) - 1];
- PL_xpvgv_root = ++xpvgv;
- while (xpvgv < xpvgvend) {
- *((XPVGV**)xpvgv) = xpvgv + 1;
- xpvgv++;
- }
- *((XPVGV**)xpvgv) = 0;
-}
+ char *start;
+ const char *end;
+ size_t count = PERL_ARENA_SIZE/size;
+ New(0, start, count*size, char);
+ *((void **) start) = *arena_root;
+ *arena_root = (void *)start;
-/* allocate another arena's worth of struct xpvlv */
+ end = start + (count-1) * size;
-STATIC void
-S_more_xpvlv(pTHX)
-{
- XPVLV* xpvlv;
- XPVLV* xpvlvend;
- New(720, xpvlv, PERL_ARENA_SIZE/sizeof(XPVLV), XPVLV);
- *((XPVLV**)xpvlv) = PL_xpvlv_arenaroot;
- PL_xpvlv_arenaroot = xpvlv;
+ /* The initial slot is used to link the arenas together, so it isn't to be
+ linked into the list of ready-to-use bodies. */
- xpvlvend = &xpvlv[PERL_ARENA_SIZE / sizeof(XPVLV) - 1];
- PL_xpvlv_root = ++xpvlv;
- while (xpvlv < xpvlvend) {
- *((XPVLV**)xpvlv) = xpvlv + 1;
- xpvlv++;
- }
- *((XPVLV**)xpvlv) = 0;
-}
+ start += size;
-/* allocate another arena's worth of struct xpvbm */
+ *root = (void *)start;
-STATIC void
-S_more_xpvbm(pTHX)
-{
- XPVBM* xpvbm;
- XPVBM* xpvbmend;
- New(721, xpvbm, PERL_ARENA_SIZE/sizeof(XPVBM), XPVBM);
- *((XPVBM**)xpvbm) = PL_xpvbm_arenaroot;
- PL_xpvbm_arenaroot = xpvbm;
-
- xpvbmend = &xpvbm[PERL_ARENA_SIZE / sizeof(XPVBM) - 1];
- PL_xpvbm_root = ++xpvbm;
- while (xpvbm < xpvbmend) {
- *((XPVBM**)xpvbm) = xpvbm + 1;
- xpvbm++;
+ while (start < end) {
+ char *next = start + size;
+ *(void**) start = (void *)next;
+ start = next;
}
- *((XPVBM**)xpvbm) = 0;
-}
-
-/* grab a new NV body from the free list, allocating more if necessary */
-
-STATIC XPVNV*
-S_new_xnv(pTHX)
-{
- NV* xnv;
- LOCK_SV_MUTEX;
- if (!PL_xnv_root)
- S_more_xnv(aTHX);
- xnv = PL_xnv_root;
- PL_xnv_root = *(NV**)xnv;
- UNLOCK_SV_MUTEX;
- return (XPVNV*)((char*)xnv - STRUCT_OFFSET(XPVNV, xnv_nv));
-}
-
-/* return an NV body to the free list */
-
-STATIC void
-S_del_xnv(pTHX_ XPVNV *p)
-{
- NV* xnv = (NV*)((char*)(p) + STRUCT_OFFSET(XPVNV, xnv_nv));
- LOCK_SV_MUTEX;
- *(NV**)xnv = PL_xnv_root;
- PL_xnv_root = xnv;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpv from the free list, allocating more if necessary */
-
-STATIC XPV*
-S_new_xpv(pTHX)
-{
- xpv_allocated* xpv;
- LOCK_SV_MUTEX;
- if (!PL_xpv_root)
- S_more_xpv(aTHX);
- xpv = PL_xpv_root;
- PL_xpv_root = *(xpv_allocated**)xpv;
- UNLOCK_SV_MUTEX;
- /* If xpv_allocated is the same structure as XPV then the two OFFSETs
- sum to zero, and the pointer is unchanged. If the allocated structure
- is smaller (no initial IV actually allocated) then the net effect is
- to subtract the size of the IV from the pointer, to return a new pointer
- as if an initial IV were actually allocated. */
- return (XPV*)((char*)xpv - STRUCT_OFFSET(XPV, xpv_cur)
- + STRUCT_OFFSET(xpv_allocated, xpv_cur));
-}
-
-/* return a struct xpv to the free list */
-
-STATIC void
-S_del_xpv(pTHX_ XPV *p)
-{
- xpv_allocated* xpv
- = (xpv_allocated*)((char*)(p) + STRUCT_OFFSET(XPV, xpv_cur)
- - STRUCT_OFFSET(xpv_allocated, xpv_cur));
- LOCK_SV_MUTEX;
- *(xpv_allocated**)xpv = PL_xpv_root;
- PL_xpv_root = xpv;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpviv from the free list, allocating more if necessary */
-
-STATIC XPVIV*
-S_new_xpviv(pTHX)
-{
- xpviv_allocated* xpviv;
- LOCK_SV_MUTEX;
- if (!PL_xpviv_root)
- S_more_xpviv(aTHX);
- xpviv = PL_xpviv_root;
- PL_xpviv_root = *(xpviv_allocated**)xpviv;
- UNLOCK_SV_MUTEX;
- /* If xpviv_allocated is the same structure as XPVIV then the two OFFSETs
- sum to zero, and the pointer is unchanged. If the allocated structure
- is smaller (no initial IV actually allocated) then the net effect is
- to subtract the size of the IV from the pointer, to return a new pointer
- as if an initial IV were actually allocated. */
- return (XPVIV*)((char*)xpviv - STRUCT_OFFSET(XPVIV, xpv_cur)
- + STRUCT_OFFSET(xpviv_allocated, xpv_cur));
-}
+ *(void **)start = 0;
-/* return a struct xpviv to the free list */
-
-STATIC void
-S_del_xpviv(pTHX_ XPVIV *p)
-{
- xpviv_allocated* xpviv
- = (xpviv_allocated*)((char*)(p) + STRUCT_OFFSET(XPVIV, xpv_cur)
- - STRUCT_OFFSET(xpviv_allocated, xpv_cur));
- LOCK_SV_MUTEX;
- *(xpviv_allocated**)xpviv = PL_xpviv_root;
- PL_xpviv_root = xpviv;
- UNLOCK_SV_MUTEX;
+ return *root;
}
-/* grab a new struct xpvnv from the free list, allocating more if necessary */
+/* grab a new thing from the free list, allocating more if necessary */
-STATIC XPVNV*
-S_new_xpvnv(pTHX)
+STATIC void *
+S_new_body(pTHX_ void **arena_root, void **root, size_t size, size_t offset)
{
- XPVNV* xpvnv;
+ void *xpv;
LOCK_SV_MUTEX;
- if (!PL_xpvnv_root)
- S_more_xpvnv(aTHX);
- xpvnv = PL_xpvnv_root;
- PL_xpvnv_root = *(XPVNV**)xpvnv;
+ xpv = *root ? *root : S_more_bodies(aTHX_ arena_root, root, size);
+ *root = *(void**)xpv;
UNLOCK_SV_MUTEX;
- return xpvnv;
+ return (void*)((char*)xpv - offset);
}
-/* return a struct xpvnv to the free list */
+/* return a thing to the free list */
STATIC void
-S_del_xpvnv(pTHX_ XPVNV *p)
+S_del_body(pTHX_ void *thing, void **root, size_t offset)
{
+ void **real_thing = (void**)((char *)thing + offset);
LOCK_SV_MUTEX;
- *(XPVNV**)p = PL_xpvnv_root;
- PL_xpvnv_root = p;
+ *real_thing = *root;
+ *root = (void*)real_thing;
UNLOCK_SV_MUTEX;
}
-/* grab a new struct xpvcv from the free list, allocating more if necessary */
+/* Conventionally we simply malloc() a big block of memory, then divide it
+ up into lots of the thing that we're allocating.
-STATIC XPVCV*
-S_new_xpvcv(pTHX)
-{
- XPVCV* xpvcv;
- LOCK_SV_MUTEX;
- if (!PL_xpvcv_root)
- S_more_xpvcv(aTHX);
- xpvcv = PL_xpvcv_root;
- PL_xpvcv_root = *(XPVCV**)xpvcv;
- UNLOCK_SV_MUTEX;
- return xpvcv;
-}
+ This macro will expand to call to S_new_body. So for XPVBM (with ithreads),
+ it would become
-/* return a struct xpvcv to the free list */
-
-STATIC void
-S_del_xpvcv(pTHX_ XPVCV *p)
-{
- LOCK_SV_MUTEX;
- *(XPVCV**)p = PL_xpvcv_root;
- PL_xpvcv_root = p;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpvav from the free list, allocating more if necessary */
-
-STATIC XPVAV*
-S_new_xpvav(pTHX)
-{
- xpvav_allocated* xpvav;
- LOCK_SV_MUTEX;
- if (!PL_xpvav_root)
- S_more_xpvav(aTHX);
- xpvav = PL_xpvav_root;
- PL_xpvav_root = *(xpvav_allocated**)xpvav;
- UNLOCK_SV_MUTEX;
- return (XPVAV*)((char*)xpvav - STRUCT_OFFSET(XPVAV, xav_fill)
- + STRUCT_OFFSET(xpvav_allocated, xav_fill));
-}
-
-/* return a struct xpvav to the free list */
-
-STATIC void
-S_del_xpvav(pTHX_ XPVAV *p)
-{
- xpvav_allocated* xpvav
- = (xpvav_allocated*)((char*)(p) + STRUCT_OFFSET(XPVAV, xav_fill)
- - STRUCT_OFFSET(xpvav_allocated, xav_fill));
- LOCK_SV_MUTEX;
- *(xpvav_allocated**)xpvav = PL_xpvav_root;
- PL_xpvav_root = xpvav;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpvhv from the free list, allocating more if necessary */
-
-STATIC XPVHV*
-S_new_xpvhv(pTHX)
-{
- xpvhv_allocated* xpvhv;
- LOCK_SV_MUTEX;
- if (!PL_xpvhv_root)
- S_more_xpvhv(aTHX);
- xpvhv = PL_xpvhv_root;
- PL_xpvhv_root = *(xpvhv_allocated**)xpvhv;
- UNLOCK_SV_MUTEX;
- return (XPVHV*)((char*)xpvhv - STRUCT_OFFSET(XPVHV, xhv_fill)
- + STRUCT_OFFSET(xpvhv_allocated, xhv_fill));
-}
-
-/* return a struct xpvhv to the free list */
-
-STATIC void
-S_del_xpvhv(pTHX_ XPVHV *p)
-{
- xpvhv_allocated* xpvhv
- = (xpvhv_allocated*)((char*)(p) + STRUCT_OFFSET(XPVHV, xhv_fill)
- - STRUCT_OFFSET(xpvhv_allocated, xhv_fill));
- LOCK_SV_MUTEX;
- *(xpvhv_allocated**)xpvhv = PL_xpvhv_root;
- PL_xpvhv_root = xpvhv;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpvmg from the free list, allocating more if necessary */
-
-STATIC XPVMG*
-S_new_xpvmg(pTHX)
-{
- XPVMG* xpvmg;
- LOCK_SV_MUTEX;
- if (!PL_xpvmg_root)
- S_more_xpvmg(aTHX);
- xpvmg = PL_xpvmg_root;
- PL_xpvmg_root = *(XPVMG**)xpvmg;
- UNLOCK_SV_MUTEX;
- return xpvmg;
-}
-
-/* return a struct xpvmg to the free list */
-
-STATIC void
-S_del_xpvmg(pTHX_ XPVMG *p)
-{
- LOCK_SV_MUTEX;
- *(XPVMG**)p = PL_xpvmg_root;
- PL_xpvmg_root = p;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpvgv from the free list, allocating more if necessary */
-
-STATIC XPVGV*
-S_new_xpvgv(pTHX)
-{
- XPVGV* xpvgv;
- LOCK_SV_MUTEX;
- if (!PL_xpvgv_root)
- S_more_xpvgv(aTHX);
- xpvgv = PL_xpvgv_root;
- PL_xpvgv_root = *(XPVGV**)xpvgv;
- UNLOCK_SV_MUTEX;
- return xpvgv;
-}
-
-/* return a struct xpvgv to the free list */
-
-STATIC void
-S_del_xpvgv(pTHX_ XPVGV *p)
-{
- LOCK_SV_MUTEX;
- *(XPVGV**)p = PL_xpvgv_root;
- PL_xpvgv_root = p;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpvlv from the free list, allocating more if necessary */
-
-STATIC XPVLV*
-S_new_xpvlv(pTHX)
-{
- XPVLV* xpvlv;
- LOCK_SV_MUTEX;
- if (!PL_xpvlv_root)
- S_more_xpvlv(aTHX);
- xpvlv = PL_xpvlv_root;
- PL_xpvlv_root = *(XPVLV**)xpvlv;
- UNLOCK_SV_MUTEX;
- return xpvlv;
-}
-
-/* return a struct xpvlv to the free list */
-
-STATIC void
-S_del_xpvlv(pTHX_ XPVLV *p)
-{
- LOCK_SV_MUTEX;
- *(XPVLV**)p = PL_xpvlv_root;
- PL_xpvlv_root = p;
- UNLOCK_SV_MUTEX;
-}
-
-/* grab a new struct xpvbm from the free list, allocating more if necessary */
-
-STATIC XPVBM*
-S_new_xpvbm(pTHX)
-{
- XPVBM* xpvbm;
- LOCK_SV_MUTEX;
- if (!PL_xpvbm_root)
- S_more_xpvbm(aTHX);
- xpvbm = PL_xpvbm_root;
- PL_xpvbm_root = *(XPVBM**)xpvbm;
- UNLOCK_SV_MUTEX;
- return xpvbm;
-}
-
-/* return a struct xpvbm to the free list */
+ S_new_body(my_perl, (void**)&(my_perl->Ixpvbm_arenaroot),
+ (void**)&(my_perl->Ixpvbm_root), sizeof(XPVBM), 0)
+*/
-STATIC void
-S_del_xpvbm(pTHX_ XPVBM *p)
-{
- LOCK_SV_MUTEX;
- *(XPVBM**)p = PL_xpvbm_root;
- PL_xpvbm_root = p;
- UNLOCK_SV_MUTEX;
-}
+#define new_body(TYPE,lctype) \
+ S_new_body(aTHX_ (void**)&PL_ ## lctype ## _arenaroot, \
+ (void**)&PL_ ## lctype ## _root, \
+ sizeof(TYPE), \
+ 0)
+
+/* But for some types, we cheat. The type starts with some members that are
+ never accessed. So we allocate the substructure, starting at the first used
+ member, then adjust the pointer back in memory by the size of the bit not
+ allocated, so it's as if we allocated the full structure.
+ (But things will all go boom if you write to the part that is "not there",
+ because you'll be overwriting the last members of the preceding structure
+ in memory.)
+
+ We calculate the correction using the STRUCT_OFFSET macro. For example, if
+ xpv_allocated is the same structure as XPV then the two OFFSETs sum to zero,
+ and the pointer is unchanged. If the allocated structure is smaller (no
+ initial NV actually allocated) then the net effect is to subtract the size
+ of the NV from the pointer, to return a new pointer as if an initial NV were
+ actually allocated.
+
+ This is the same trick as was used for NV and IV bodies. Ironically it
+ doesn't need to be used for NV bodies any more, because NV is now at the
+ start of the structure. IV bodies don't need it either, because they are
+ no longer allocated. */
+
+#define new_body_allocated(TYPE,lctype,member) \
+ S_new_body(aTHX_ (void**)&PL_ ## lctype ## _arenaroot, \
+ (void**)&PL_ ## lctype ## _root, \
+ sizeof(lctype ## _allocated), \
+ STRUCT_OFFSET(TYPE, member) \
+ - STRUCT_OFFSET(lctype ## _allocated, member))
+
+
+#define del_body(p,TYPE,lctype) \
+ S_del_body(aTHX_ (void*)p, (void**)&PL_ ## lctype ## _root, 0)
+
+#define del_body_allocated(p,TYPE,lctype,member) \
+ S_del_body(aTHX_ (void*)p, (void**)&PL_ ## lctype ## _root, \
+ STRUCT_OFFSET(TYPE, member) \
+ - STRUCT_OFFSET(lctype ## _allocated, member))
#define my_safemalloc(s) (void*)safemalloc(s)
#define my_safefree(p) safefree((char*)p)
#else /* !PURIFY */
-#define new_XNV() (void*)new_xnv()
-#define del_XNV(p) del_xnv((XPVNV*) p)
+#define new_XNV() new_body(NV, xnv)
+#define del_XNV(p) del_body(p, NV, xnv)
-#define new_XPV() (void*)new_xpv()
-#define del_XPV(p) del_xpv((XPV *)p)
+#define new_XPV() new_body_allocated(XPV, xpv, xpv_cur)
+#define del_XPV(p) del_body_allocated(p, XPV, xpv, xpv_cur)
-#define new_XPVIV() (void*)new_xpviv()
-#define del_XPVIV(p) del_xpviv((XPVIV *)p)
+#define new_XPVIV() new_body_allocated(XPVIV, xpviv, xpv_cur)
+#define del_XPVIV(p) del_body_allocated(p, XPVIV, xpviv, xpv_cur)
-#define new_XPVNV() (void*)new_xpvnv()
-#define del_XPVNV(p) del_xpvnv((XPVNV *)p)
+#define new_XPVNV() new_body(XPVNV, xpvnv)
+#define del_XPVNV(p) del_body(p, XPVNV, xpvnv)
-#define new_XPVCV() (void*)new_xpvcv()
-#define del_XPVCV(p) del_xpvcv((XPVCV *)p)
+#define new_XPVCV() new_body(XPVCV, xpvcv)
+#define del_XPVCV(p) del_body(p, XPVCV, xpvcv)
-#define new_XPVAV() (void*)new_xpvav()
-#define del_XPVAV(p) del_xpvav((XPVAV *)p)
+#define new_XPVAV() new_body_allocated(XPVAV, xpvav, xav_fill)
+#define del_XPVAV(p) del_body_allocated(p, XPVAV, xpvav, xav_fill)
-#define new_XPVHV() (void*)new_xpvhv()
-#define del_XPVHV(p) del_xpvhv((XPVHV *)p)
+#define new_XPVHV() new_body_allocated(XPVHV, xpvhv, xhv_fill)
+#define del_XPVHV(p) del_body_allocated(p, XPVHV, xpvhv, xhv_fill)
-#define new_XPVMG() (void*)new_xpvmg()
-#define del_XPVMG(p) del_xpvmg((XPVMG *)p)
+#define new_XPVMG() new_body(XPVMG, xpvmg)
+#define del_XPVMG(p) del_body(p, XPVMG, xpvmg)
-#define new_XPVGV() (void*)new_xpvgv()
-#define del_XPVGV(p) del_xpvgv((XPVGV *)p)
+#define new_XPVGV() new_body(XPVGV, xpvgv)
+#define del_XPVGV(p) del_body(p, XPVGV, xpvgv)
-#define new_XPVLV() (void*)new_xpvlv()
-#define del_XPVLV(p) del_xpvlv((XPVLV *)p)
+#define new_XPVLV() new_body(XPVLV, xpvlv)
+#define del_XPVLV(p) del_body(p, XPVLV, xpvlv)
-#define new_XPVBM() (void*)new_xpvbm()
-#define del_XPVBM(p) del_xpvbm((XPVBM *)p)
+#define new_XPVBM() new_body(XPVBM, xpvbm)
+#define del_XPVBM(p) del_body(p, XPVBM, xpvbm)
#endif /* PURIFY */
=cut
*/
-bool
+void
Perl_sv_upgrade(pTHX_ register SV *sv, U32 mt)
{
-
- char* pv;
- U32 cur;
- U32 len;
- IV iv;
- NV nv;
- MAGIC* magic;
- HV* stash;
+ void** old_body_arena;
+ size_t old_body_offset;
+ size_t old_body_length; /* Well, the length to copy. */
+ void* old_body;
+ bool zero_nv = TRUE;
+ void* new_body;
+ size_t new_body_length;
+ size_t new_body_offset;
+ void** new_body_arena;
+ void** new_body_arenaroot;
+ U32 old_type = SvTYPE(sv);
if (mt != SVt_PV && SvIsCOW(sv)) {
sv_force_normal_flags(sv, 0);
}
if (SvTYPE(sv) == mt)
- return TRUE;
+ return;
+
+ if (SvTYPE(sv) > mt)
+ Perl_croak(aTHX_ "sv_upgrade from type %d down to type %d",
+ (int)SvTYPE(sv), (int)mt);
+
+
+ old_body = SvANY(sv);
+ old_body_arena = 0;
+ old_body_offset = 0;
+ old_body_length = 0;
+ new_body_offset = 0;
+ new_body_length = ~0;
+
+ /* Copying structures onto other structures that have been neatly zeroed
+ has a subtle gotcha. Consider XPVMG
+
+ +------+------+------+------+------+-------+-------+
+ | NV | CUR | LEN | IV | MAGIC | STASH |
+ +------+------+------+------+------+-------+-------+
+ 0 4 8 12 16 20 24 28
+
+ where NVs are aligned to 8 bytes, so that sizeof that structure is
+ actually 32 bytes long, with 4 bytes of padding at the end:
- pv = NULL;
- cur = 0;
- len = 0;
- iv = 0;
- nv = 0.0;
- magic = NULL;
- stash = Nullhv;
+ +------+------+------+------+------+-------+-------+------+
+ | NV | CUR | LEN | IV | MAGIC | STASH | ??? |
+ +------+------+------+------+------+-------+-------+------+
+ 0 4 8 12 16 20 24 28 32
+
+ so what happens if you allocate memory for this structure:
+
+ +------+------+------+------+------+-------+-------+------+------+...
+ | NV | CUR | LEN | IV | MAGIC | STASH | GP | NAME |
+ +------+------+------+------+------+-------+-------+------+------+...
+ 0 4 8 12 16 20 24 28 32 36
+
+ zero it, then copy sizeof(XPVMG) bytes on top of it? Not quite what you
+ expect, because you copy the area marked ??? onto GP. Now, ??? may have
+ started out as zero once, but it's quite possible that it isn't. So now,
+ rather than a nicely zeroed GP, you have it pointing somewhere random.
+ Bugs ensue.
+
+ (In fact, GP ends up pointing at a previous GP structure, because the
+ principle cause of the padding in XPVMG getting garbage is a copy of
+ sizeof(XPVMG) bytes from a XPVGV structure in sv_unglob)
+
+ So we are careful and work out the size of used parts of all the
+ structures. */
switch (SvTYPE(sv)) {
case SVt_NULL:
break;
case SVt_IV:
- iv = SvIVX(sv);
if (mt == SVt_NV)
mt = SVt_PVNV;
else if (mt < SVt_PVIV)
mt = SVt_PVIV;
+ old_body_offset = STRUCT_OFFSET(XPVIV, xiv_iv);
+ old_body_length = sizeof(IV);
break;
case SVt_NV:
- nv = SvNVX(sv);
- del_XNV(SvANY(sv));
+ old_body_arena = (void **) &PL_xnv_root;
+ old_body_length = sizeof(NV);
+ zero_nv = FALSE;
+
if (mt < SVt_PVNV)
mt = SVt_PVNV;
break;
case SVt_RV:
- pv = (char*)SvRV(sv);
break;
case SVt_PV:
- pv = SvPVX_mutable(sv);
- cur = SvCUR(sv);
- len = SvLEN(sv);
- del_XPV(SvANY(sv));
+ old_body_arena = (void **) &PL_xpv_root;
+ old_body_offset = STRUCT_OFFSET(XPV, xpv_cur)
+ - STRUCT_OFFSET(xpv_allocated, xpv_cur);
+ old_body_length = STRUCT_OFFSET(XPV, xpv_len)
+ + sizeof (((XPV*)SvANY(sv))->xpv_len)
+ - old_body_offset;
if (mt <= SVt_IV)
mt = SVt_PVIV;
else if (mt == SVt_NV)
mt = SVt_PVNV;
break;
case SVt_PVIV:
- pv = SvPVX_mutable(sv);
- cur = SvCUR(sv);
- len = SvLEN(sv);
- iv = SvIVX(sv);
- del_XPVIV(SvANY(sv));
+ old_body_arena = (void **) &PL_xpviv_root;
+ old_body_offset = STRUCT_OFFSET(XPVIV, xpv_cur)
+ - STRUCT_OFFSET(xpviv_allocated, xpv_cur);
+ old_body_length = STRUCT_OFFSET(XPVIV, xiv_u)
+ + sizeof (((XPVIV*)SvANY(sv))->xiv_u)
+ - old_body_offset;
break;
case SVt_PVNV:
- pv = SvPVX_mutable(sv);
- cur = SvCUR(sv);
- len = SvLEN(sv);
- iv = SvIVX(sv);
- nv = SvNVX(sv);
- del_XPVNV(SvANY(sv));
+ old_body_arena = (void **) &PL_xpvnv_root;
+ old_body_length = STRUCT_OFFSET(XPVNV, xiv_u)
+ + sizeof (((XPVNV*)SvANY(sv))->xiv_u);
+ zero_nv = FALSE;
break;
case SVt_PVMG:
/* Because the XPVMG of PL_mess_sv isn't allocated from the arena,
Given that it only has meaning inside the pad, it shouldn't be set
on anything that can get upgraded. */
assert((SvFLAGS(sv) & SVpad_TYPED) == 0);
- pv = SvPVX_mutable(sv);
- cur = SvCUR(sv);
- len = SvLEN(sv);
- iv = SvIVX(sv);
- nv = SvNVX(sv);
- magic = SvMAGIC(sv);
- stash = SvSTASH(sv);
- del_XPVMG(SvANY(sv));
+ old_body_arena = (void **) &PL_xpvmg_root;
+ old_body_length = STRUCT_OFFSET(XPVMG, xmg_stash)
+ + sizeof (((XPVMG*)SvANY(sv))->xmg_stash);
+ zero_nv = FALSE;
break;
default:
Perl_croak(aTHX_ "Can't upgrade that kind of scalar");
case SVt_NULL:
Perl_croak(aTHX_ "Can't upgrade to undef");
case SVt_IV:
+ assert(old_type == SVt_NULL);
SvANY(sv) = (XPVIV*)((char*)&(sv->sv_u.svu_iv) - STRUCT_OFFSET(XPVIV, xiv_iv));
- SvIV_set(sv, iv);
+ SvIV_set(sv, 0);
break;
case SVt_NV:
+ assert(old_type == SVt_NULL);
SvANY(sv) = new_XNV();
- SvNV_set(sv, nv);
+ SvNV_set(sv, 0);
break;
case SVt_RV:
+ assert(old_type == SVt_NULL);
SvANY(sv) = &sv->sv_u.svu_rv;
- SvRV_set(sv, (SV*)pv);
+ SvRV_set(sv, 0);
break;
case SVt_PVHV:
SvANY(sv) = new_XPVHV();
HvMAX(sv) = 0;
HvTOTALKEYS(sv) = 0;
- /* Fall through... */
- if (0) {
- case SVt_PVAV:
- SvANY(sv) = new_XPVAV();
- AvMAX(sv) = -1;
- AvFILLp(sv) = -1;
- AvALLOC(sv) = 0;
- AvREAL_only(sv);
- }
- /* to here. */
- /* XXX? Only SVt_NULL is ever upgraded to AV or HV? */
- assert(!pv);
- /* FIXME. Should be able to remove all this if()... if the above
- assertion is genuinely always true. */
- if(SvOOK(sv)) {
- pv -= iv;
- SvFLAGS(sv) &= ~SVf_OOK;
- }
- Safefree(pv);
+ goto hv_av_common;
+
+ case SVt_PVAV:
+ SvANY(sv) = new_XPVAV();
+ AvMAX(sv) = -1;
+ AvFILLp(sv) = -1;
+ AvALLOC(sv) = 0;
+ AvREAL_only(sv);
+
+ hv_av_common:
+ /* SVt_NULL isn't the only thing upgraded to AV or HV.
+ The target created by newSVrv also is, and it can have magic.
+ However, it never has SvPVX set.
+ */
+ if (old_type >= SVt_RV) {
+ assert(SvPVX_const(sv) == 0);
+ }
+
+ /* Could put this in the else clause below, as PVMG must have SvPVX
+ 0 already (the assertion above) */
SvPV_set(sv, (char*)0);
- SvMAGIC_set(sv, magic);
- SvSTASH_set(sv, stash);
+
+ if (old_type >= SVt_PVMG) {
+ SvMAGIC_set(sv, ((XPVMG*)old_body)->xmg_magic);
+ SvSTASH_set(sv, ((XPVMG*)old_body)->xmg_stash);
+ } else {
+ SvMAGIC_set(sv, 0);
+ SvSTASH_set(sv, 0);
+ }
break;
case SVt_PVIO:
- SvANY(sv) = new_XPVIO();
- Zero(SvANY(sv), 1, XPVIO);
- IoPAGE_LEN(sv) = 60;
- goto set_magic_common;
+ new_body = new_XPVIO();
+ new_body_length = sizeof(XPVIO);
+ goto zero;
case SVt_PVFM:
- SvANY(sv) = new_XPVFM();
- Zero(SvANY(sv), 1, XPVFM);
- goto set_magic_common;
+ new_body = new_XPVFM();
+ new_body_length = sizeof(XPVFM);
+ goto zero;
+
case SVt_PVBM:
- SvANY(sv) = new_XPVBM();
- BmRARE(sv) = 0;
- BmUSEFUL(sv) = 0;
- BmPREVIOUS(sv) = 0;
- goto set_magic_common;
+ new_body_length = sizeof(XPVBM);
+ new_body_arena = (void **) &PL_xpvbm_root;
+ new_body_arenaroot = (void **) &PL_xpvbm_arenaroot;
+ goto new_body;
case SVt_PVGV:
- SvANY(sv) = new_XPVGV();
- GvGP(sv) = 0;
- GvNAME(sv) = 0;
- GvNAMELEN(sv) = 0;
- GvSTASH(sv) = 0;
- GvFLAGS(sv) = 0;
- goto set_magic_common;
+ new_body_length = sizeof(XPVGV);
+ new_body_arena = (void **) &PL_xpvgv_root;
+ new_body_arenaroot = (void **) &PL_xpvgv_arenaroot;
+ goto new_body;
case SVt_PVCV:
- SvANY(sv) = new_XPVCV();
- Zero(SvANY(sv), 1, XPVCV);
- goto set_magic_common;
+ new_body_length = sizeof(XPVCV);
+ new_body_arena = (void **) &PL_xpvcv_root;
+ new_body_arenaroot = (void **) &PL_xpvcv_arenaroot;
+ goto new_body;
case SVt_PVLV:
- SvANY(sv) = new_XPVLV();
- LvTARGOFF(sv) = 0;
- LvTARGLEN(sv) = 0;
- LvTARG(sv) = 0;
- LvTYPE(sv) = 0;
- GvGP(sv) = 0;
- GvNAME(sv) = 0;
- GvNAMELEN(sv) = 0;
- GvSTASH(sv) = 0;
- GvFLAGS(sv) = 0;
- /* Fall through. */
- if (0) {
- case SVt_PVMG:
- SvANY(sv) = new_XPVMG();
- }
- set_magic_common:
- SvMAGIC_set(sv, magic);
- SvSTASH_set(sv, stash);
- /* Fall through. */
- if (0) {
- case SVt_PVNV:
- SvANY(sv) = new_XPVNV();
- }
- SvNV_set(sv, nv);
- /* Fall through. */
- if (0) {
- case SVt_PVIV:
- SvANY(sv) = new_XPVIV();
- if (SvNIOK(sv))
- (void)SvIOK_on(sv);
- SvNOK_off(sv);
- }
- SvIV_set(sv, iv);
- /* Fall through. */
- if (0) {
- case SVt_PV:
- SvANY(sv) = new_XPV();
+ new_body_length = sizeof(XPVLV);
+ new_body_arena = (void **) &PL_xpvlv_root;
+ new_body_arenaroot = (void **) &PL_xpvlv_arenaroot;
+ goto new_body;
+ case SVt_PVMG:
+ new_body_length = sizeof(XPVMG);
+ new_body_arena = (void **) &PL_xpvmg_root;
+ new_body_arenaroot = (void **) &PL_xpvmg_arenaroot;
+ goto new_body;
+ case SVt_PVNV:
+ new_body_length = sizeof(XPVNV);
+ new_body_arena = (void **) &PL_xpvnv_root;
+ new_body_arenaroot = (void **) &PL_xpvnv_arenaroot;
+ goto new_body;
+ case SVt_PVIV:
+ new_body_offset = STRUCT_OFFSET(XPVIV, xpv_cur)
+ - STRUCT_OFFSET(xpviv_allocated, xpv_cur);
+ new_body_length = sizeof(XPVIV) - new_body_offset;
+ new_body_arena = (void **) &PL_xpviv_root;
+ new_body_arenaroot = (void **) &PL_xpviv_arenaroot;
+ /* XXX Is this still needed? Was it ever needed? Surely as there is
+ no route from NV to PVIV, NOK can never be true */
+ if (SvNIOK(sv))
+ (void)SvIOK_on(sv);
+ SvNOK_off(sv);
+ goto new_body_no_NV;
+ case SVt_PV:
+ new_body_offset = STRUCT_OFFSET(XPV, xpv_cur)
+ - STRUCT_OFFSET(xpv_allocated, xpv_cur);
+ new_body_length = sizeof(XPV) - new_body_offset;
+ new_body_arena = (void **) &PL_xpv_root;
+ new_body_arenaroot = (void **) &PL_xpv_arenaroot;
+ new_body_no_NV:
+ /* PV and PVIV don't have an NV slot. */
+ zero_nv = FALSE;
+
+ {
+ new_body:
+ assert(new_body_length);
+#ifndef PURIFY
+ new_body = S_new_body(aTHX_ new_body_arenaroot, new_body_arena,
+ new_body_length, new_body_offset);
+#else
+ /* We always allocated the full length item with PURIFY */
+ new_body_length += new_body_offset;
+ new_body_offset = 0;
+ new_body = my_safemalloc(new_body_length);
+
+#endif
+ zero:
+ Zero(((char *)new_body) + new_body_offset, new_body_length, char);
+ SvANY(sv) = new_body;
+
+ if (old_body_length) {
+ Copy((char *)old_body + old_body_offset,
+ (char *)new_body + old_body_offset,
+ old_body_length, char);
+ }
+
+ /* FIXME - add a Configure test to determine if NV 0.0 is actually
+ all bits zero. If it is, we can skip this initialisation. */
+ if (zero_nv)
+ SvNV_set(sv, 0);
+
+ if (mt == SVt_PVIO)
+ IoPAGE_LEN(sv) = 60;
+ if (old_type < SVt_RV)
+ SvPV_set(sv, 0);
}
- SvPV_set(sv, pv);
- SvCUR_set(sv, cur);
- SvLEN_set(sv, len);
break;
+ default:
+ Perl_croak(aTHX_ "panic: sv_upgrade to unknown type %lu", mt);
+ }
+
+
+ if (old_body_arena) {
+#ifdef PURIFY
+ my_safefree(old_body);
+#else
+ S_del_body(aTHX_ old_body, old_body_arena, old_body_offset);
+#endif
}
- return TRUE;
}
/*
sv_unref(sv);
if (SvTYPE(sv) < SVt_PV) {
sv_upgrade(sv, SVt_PV);
- s = SvPVX(sv);
+ s = SvPVX_mutable(sv);
}
else if (SvOOK(sv)) { /* pv is offset? */
sv_backoff(sv);
- s = SvPVX(sv);
+ s = SvPVX_mutable(sv);
if (newlen > SvLEN(sv))
newlen += 10 * (newlen - SvCUR(sv)); /* avoid copy each time */
#ifdef HAS_64K_LIMIT
newlen = PERL_STRLEN_ROUNDUP(newlen);
if (SvLEN(sv) && s) {
#ifdef MYMALLOC
- const STRLEN l = malloced_size((void*)SvPVX(sv));
+ const STRLEN l = malloced_size((void*)SvPVX_const(sv));
if (newlen <= l) {
SvLEN_set(sv, l);
return s;
len = SvCUR(sv);
}
else if (SvPOKp(sv))
- sbegin = SvPV(sv, len);
+ sbegin = SvPV_const(sv, len);
else
return SvFLAGS(sv) & (SVf_NOK|SVp_NOK|SVf_IOK|SVp_IOK);
return grok_number(sbegin, len, NULL);
SV *tsv, *origsv;
char tbuf[64]; /* Must fit sprintf/Gconvert of longest IV/NV */
char *tmpbuf = tbuf;
- STRLEN n_a;
-
- if (!lp) {
- /* Saves needing to do lots of if (!lp) checks below */
- lp = &n_a;
- }
if (!sv) {
- *lp = 0;
+ if (lp)
+ *lp = 0;
return (char *)"";
}
if (SvGMAGICAL(sv)) {
if (flags & SV_GMAGIC)
mg_get(sv);
if (SvPOKp(sv)) {
- *lp = SvCUR(sv);
+ if (lp)
+ *lp = SvCUR(sv);
+ if (flags & SV_MUTABLE_RETURN)
+ return SvPVX_mutable(sv);
if (flags & SV_CONST_RETURN)
return (char *)SvPVX_const(sv);
return SvPVX(sv);
if (ckWARN(WARN_UNINITIALIZED) && !PL_localizing)
report_uninit(sv);
}
- *lp = 0;
+ if (lp)
+ *lp = 0;
return (char *)"";
}
}
register const char *typestr;
if (SvAMAGIC(sv) && (tmpstr=AMG_CALLun(sv,string)) &&
(!SvROK(tmpstr) || (SvRV(tmpstr) != SvRV(sv)))) {
- char *pv = SvPV(tmpstr, *lp);
+ /* Unwrap this: */
+ /* char *pv = lp ? SvPV(tmpstr, *lp) : SvPV_nolen(tmpstr); */
+
+ char *pv;
+ if ((SvFLAGS(tmpstr) & (SVf_POK)) == SVf_POK) {
+ if (flags & SV_CONST_RETURN) {
+ pv = (char *) SvPVX_const(tmpstr);
+ } else {
+ pv = (flags & SV_MUTABLE_RETURN)
+ ? SvPVX_mutable(tmpstr) : SvPVX(tmpstr);
+ }
+ if (lp)
+ *lp = SvCUR(tmpstr);
+ } else {
+ pv = sv_2pv_flags(tmpstr, lp, flags);
+ }
if (SvUTF8(tmpstr))
SvUTF8_on(sv);
else
SvUTF8_on(origsv);
else
SvUTF8_off(origsv);
- *lp = mg->mg_len;
+ if (lp)
+ *lp = mg->mg_len;
return mg->mg_ptr;
}
/* Fall through */
Perl_sv_setpvf(aTHX_ tsv, "%s(0x%"UVxf")", typestr, PTR2UV(sv));
goto tokensaveref;
}
- *lp = strlen(typestr);
+ if (lp)
+ *lp = strlen(typestr);
return (char *)typestr;
}
if (SvREADONLY(sv) && !SvOK(sv)) {
if (ckWARN(WARN_UNINITIALIZED))
report_uninit(sv);
- *lp = 0;
+ if (lp)
+ *lp = 0;
return (char *)"";
}
}
ptr = uiv_2buf(buf, 0, SvUVX(sv), 1, &ebuf);
else
ptr = uiv_2buf(buf, SvIVX(sv), 0, 0, &ebuf);
- SvGROW(sv, (STRLEN)(ebuf - ptr + 1)); /* inlined from sv_setpvn */
+ /* inlined from sv_setpvn */
+ SvGROW_mutable(sv, (STRLEN)(ebuf - ptr + 1));
Move(ptr,SvPVX_mutable(sv),ebuf - ptr,char);
SvCUR_set(sv, ebuf - ptr);
s = SvEND(sv);
if (SvTYPE(sv) < SVt_PVNV)
sv_upgrade(sv, SVt_PVNV);
/* The +20 is pure guesswork. Configure test needed. --jhi */
- SvGROW(sv, NV_DIG + 20);
- s = SvPVX_mutable(sv);
+ s = SvGROW_mutable(sv, NV_DIG + 20);
olderrno = errno; /* some Xenix systems wipe out errno here */
#ifdef apollo
if (SvNVX(sv) == 0.0)
if (ckWARN(WARN_UNINITIALIZED)
&& !PL_localizing && !(SvFLAGS(sv) & SVs_PADTMP))
report_uninit(sv);
+ if (lp)
*lp = 0;
if (SvTYPE(sv) < SVt_PV)
/* Typically the caller expects that sv_any is not NULL now. */
sv_upgrade(sv, SVt_PV);
return (char *)"";
}
- *lp = s - SvPVX_const(sv);
- SvCUR_set(sv, *lp);
+ {
+ STRLEN len = s - SvPVX_const(sv);
+ if (lp)
+ *lp = len;
+ SvCUR_set(sv, len);
+ }
SvPOK_on(sv);
DEBUG_c(PerlIO_printf(Perl_debug_log, "0x%"UVxf" 2pv(%s)\n",
PTR2UV(sv),SvPVX_const(sv)));
if (flags & SV_CONST_RETURN)
return (char *)SvPVX_const(sv);
+ if (flags & SV_MUTABLE_RETURN)
+ return SvPVX_mutable(sv);
return SvPVX(sv);
tokensave:
if (!tsv)
tsv = newSVpv(tmpbuf, 0);
sv_2mortal(tsv);
- *lp = SvCUR(tsv);
+ if (lp)
+ *lp = SvCUR(tsv);
return SvPVX(tsv);
}
else {
}
#endif
SvUPGRADE(sv, SVt_PV);
- *lp = len;
- s = SvGROW(sv, len + 1);
+ if (lp)
+ *lp = len;
+ s = SvGROW_mutable(sv, len + 1);
SvCUR_set(sv, len);
SvPOKp_on(sv);
return strcpy(s, t);
Perl_sv_2pvbyte(pTHX_ register SV *sv, STRLEN *lp)
{
sv_utf8_downgrade(sv,0);
- return SvPV(sv,*lp);
+ return lp ? SvPV(sv,*lp) : SvPV_nolen(sv);
}
/*
* had a FLAG in SVs to signal if there are any hibit
* chars in the PV. Given that there isn't such a flag
* make the loop as fast as possible. */
- U8 *s = (U8 *) SvPVX(sv);
- U8 *e = (U8 *) SvEND(sv);
- U8 *t = s;
+ const U8 *s = (U8 *) SvPVX_const(sv);
+ const U8 *e = (U8 *) SvEND(sv);
+ const U8 *t = s;
int hibit = 0;
while (t < e) {
}
if (hibit) {
STRLEN len = SvCUR(sv) + 1; /* Plus the \0 */
- s = bytes_to_utf8((U8*)s, &len);
+ U8 *recoded = bytes_to_utf8((U8*)s, &len);
SvPV_free(sv); /* No longer using what was there before. */
- SvPV_set(sv, (char*)s);
+ SvPV_set(sv, (char*)recoded);
SvCUR_set(sv, len - 1);
SvLEN_set(sv, len); /* No longer know the real size. */
}
Perl_sv_utf8_decode(pTHX_ register SV *sv)
{
if (SvPOKp(sv)) {
- U8 *c;
- U8 *e;
+ const U8 *c;
+ const U8 *e;
/* The octets may have got themselves encoded - get them back as
* bytes
/* it is actually just a matter of turning the utf8 flag on, but
* we want to make sure everything inside is valid utf8 first.
*/
- c = (U8 *) SvPVX(sv);
+ c = (const U8 *) SvPVX_const(sv);
if (!is_utf8_string(c, SvCUR(sv)+1))
return FALSE;
- e = (U8 *) SvEND(sv);
+ e = (const U8 *) SvEND(sv);
while (c < e) {
U8 ch = *c++;
if (!UTF8_IS_INVARIANT(ch)) {
}
break;
case SVt_PVFM:
-#ifdef PERL_COPY_ON_WRITE
+#ifdef PERL_OLD_COPY_ON_WRITE
if ((SvFLAGS(sstr) & CAN_COW_MASK) == CAN_COW_FLAGS) {
if (dtype < SVt_PVIV)
sv_upgrade(dstr, SVt_PVIV);
}
if (!intro)
cv_ckproto(cv, (GV*)dstr,
- SvPOK(sref) ? SvPVX(sref) : Nullch);
+ SvPOK(sref)
+ ? SvPVX_const(sref) : Nullch);
}
GvCV(dstr) = (CV*)sref;
GvCVGEN(dstr) = 0; /* Switch off cacheness. */
if (
/* We're not already COW */
((sflags & (SVf_FAKE | SVf_READONLY)) != (SVf_FAKE | SVf_READONLY)
-#ifndef PERL_COPY_ON_WRITE
+#ifndef PERL_OLD_COPY_ON_WRITE
/* or we are, but dstr isn't a suitable target. */
|| (SvFLAGS(dstr) & CAN_COW_MASK) != CAN_COW_FLAGS
#endif
SvLEN(sstr) && /* and really is a string */
/* and won't be needed again, potentially */
!(PL_op && PL_op->op_type == OP_AASSIGN))
-#ifdef PERL_COPY_ON_WRITE
+#ifdef PERL_OLD_COPY_ON_WRITE
&& !((sflags & CAN_COW_MASK) == CAN_COW_FLAGS
&& (SvFLAGS(dstr) & CAN_COW_MASK) == CAN_COW_FLAGS
&& SvTYPE(sstr) >= SVt_PVIV)
SvCUR_set(dstr, len);
*SvEND(dstr) = '\0';
} else {
- /* If PERL_COPY_ON_WRITE is not defined, then isSwipe will always
+ /* If PERL_OLD_COPY_ON_WRITE is not defined, then isSwipe will always
be true in here. */
/* Either it's a shared hash key, or it's suitable for
copy-on-write or we can swipe the string. */
sv_dump(sstr);
sv_dump(dstr);
}
-#ifdef PERL_COPY_ON_WRITE
+#ifdef PERL_OLD_COPY_ON_WRITE
if (!isSwipe) {
/* I believe I should acquire a global SV mutex if
it's a COW sv (not a shared hash key) to stop
/* making another shared SV. */
STRLEN cur = SvCUR(sstr);
STRLEN len = SvLEN(sstr);
-#ifdef PERL_COPY_ON_WRITE
+#ifdef PERL_OLD_COPY_ON_WRITE
if (len) {
assert (SvTYPE(dstr) >= SVt_PVIV);
/* SvIsCOW_normal */
/* splice us in between source and next-after-source. */
SV_COW_NEXT_SV_SET(dstr, SV_COW_NEXT_SV(sstr));
SV_COW_NEXT_SV_SET(sstr, dstr);
- SvPV_set(dstr, SvPVX(sstr));
+ SvPV_set(dstr, SvPVX_mutable(sstr));
} else
#endif
{
/* SvIsCOW_shared_hash */
- UV hash = SvSHARED_HASH(sstr);
DEBUG_C(PerlIO_printf(Perl_debug_log,
"Copy on write: Sharing hash\n"));
- assert (SvTYPE(dstr) >= SVt_PVIV);
+ assert (SvTYPE(dstr) >= SVt_PV);
SvPV_set(dstr,
- sharepvn(SvPVX_const(sstr),
- (sflags & SVf_UTF8?-cur:cur), hash));
- SvUV_set(dstr, hash);
- }
+ HEK_KEY(share_hek_hek(SvSHARED_HEK_FROM_PV(SvPVX_const(sstr)))));
+ }
SvLEN_set(dstr, len);
SvCUR_set(dstr, cur);
SvREADONLY_on(dstr);
}
else
{ /* Passes the swipe test. */
- SvPV_set(dstr, SvPVX(sstr));
+ SvPV_set(dstr, SvPVX_mutable(sstr));
SvLEN_set(dstr, SvLEN(sstr));
SvCUR_set(dstr, SvCUR(sstr));
}
if (sflags & SVf_UTF8)
SvUTF8_on(dstr);
- /*SUPPRESS 560*/
if (sflags & SVp_NOK) {
SvNOKp_on(dstr);
if (sflags & SVf_NOK)
SvSETMAGIC(dstr);
}
-#ifdef PERL_COPY_ON_WRITE
+#ifdef PERL_OLD_COPY_ON_WRITE
SV *
Perl_sv_setsv_cow(pTHX_ SV *dstr, SV *sstr)
{
if (SvLEN(sstr) == 0) {
/* source is a COW shared hash key. */
- UV hash = SvSHARED_HASH(sstr);
DEBUG_C(PerlIO_printf(Perl_debug_log,
"Fast copy on write: Sharing hash\n"));
- SvUV_set(dstr, hash);
- new_pv = sharepvn(SvPVX_const(sstr), (SvUTF8(sstr)?-cur:cur), hash);
+ new_pv = HEK_KEY(share_hek_hek(SvSHARED_HEK_FROM_PV(SvPVX_const(sstr))));
goto common_exit;
}
SV_COW_NEXT_SV_SET(dstr, SV_COW_NEXT_SV(sstr));
SV_COW_NEXT_SV_SET(dstr, sstr);
}
SV_COW_NEXT_SV_SET(sstr, dstr);
- new_pv = SvPVX(sstr);
+ new_pv = SvPVX_mutable(sstr);
common_exit:
SvPV_set(dstr, new_pv);
}
SvUPGRADE(sv, SVt_PV);
- SvGROW(sv, len + 1);
- dptr = SvPVX(sv);
+ dptr = SvGROW(sv, len + 1);
Move(ptr,dptr,len,char);
dptr[len] = '\0';
SvCUR_set(sv, len);
SvSETMAGIC(sv);
}
-#ifdef PERL_COPY_ON_WRITE
+#ifdef PERL_OLD_COPY_ON_WRITE
/* Need to do this *after* making the SV normal, as we need the buffer
pointer to remain valid until after we've copied it. If we let go too early,
another thread could invalidate it by unsharing last of the same hash key
(which it can do by means other than releasing copy-on-write Svs)
or by changing the other copy-on-write SVs in the loop. */
STATIC void
-S_sv_release_COW(pTHX_ register SV *sv, const char *pvx, STRLEN cur, STRLEN len,
- U32 hash, SV *after)
+S_sv_release_COW(pTHX_ register SV *sv, const char *pvx, STRLEN len, SV *after)
{
if (len) { /* this SV was SvIsCOW_normal(sv) */
/* we need to find the SV pointing to us. */
SV_COW_NEXT_SV_SET(current, after);
}
} else {
- unsharepvn(pvx, SvUTF8(sv) ? -(I32)cur : cur, hash);
+ unshare_hek(SvSHARED_HEK_FROM_PV(pvx));
}
}
void
Perl_sv_force_normal_flags(pTHX_ register SV *sv, U32 flags)
{
-#ifdef PERL_COPY_ON_WRITE
+#ifdef PERL_OLD_COPY_ON_WRITE
if (SvREADONLY(sv)) {
/* At this point I believe I should acquire a global SV mutex. */
if (SvFAKE(sv)) {
- const char *pvx = SvPVX_const(sv);
- const STRLEN len = SvLEN(sv);
- const STRLEN cur = SvCUR(sv);
- const U32 hash = SvSHARED_HASH(sv);
- SV *const next = SV_COW_NEXT_SV(sv); /* next COW sv in the loop. */
+ const char *pvx = SvPVX_const(sv);
+ const STRLEN len = SvLEN(sv);
+ const STRLEN cur = SvCUR(sv);
+ SV * const next = SV_COW_NEXT_SV(sv); /* next COW sv in the loop. */
if (DEBUG_C_TEST) {
PerlIO_printf(Perl_debug_log,
"Copy on write: Force normal %ld\n",
SvCUR_set(sv, cur);
*SvEND(sv) = '\0';
}
- sv_release_COW(sv, pvx, cur, len, hash, next);
+ sv_release_COW(sv, pvx, len, next);
if (DEBUG_C_TEST) {
sv_dump(sv);
}
if (SvREADONLY(sv)) {
if (SvFAKE(sv)) {
const char *pvx = SvPVX_const(sv);
- const int is_utf8 = SvUTF8(sv);
const STRLEN len = SvCUR(sv);
- const U32 hash = SvSHARED_HASH(sv);
SvFAKE_off(sv);
SvREADONLY_off(sv);
SvPV_set(sv, Nullch);
SvGROW(sv, len + 1);
Move(pvx,SvPVX_const(sv),len,char);
*SvEND(sv) = '\0';
- unsharepvn(pvx, is_utf8 ? -(I32)len : len, hash);
+ unshare_hek(SvSHARED_HEK_FROM_PV(pvx));
}
else if (IN_PERL_RUNTIME)
Perl_croak(aTHX_ PL_no_modify);
if (!SvOOK(sv)) {
if (!SvLEN(sv)) { /* make copy of shared string */
const char *pvx = SvPVX_const(sv);
- STRLEN len = SvCUR(sv);
+ const STRLEN len = SvCUR(sv);
SvGROW(sv, len + 1);
Move(pvx,SvPVX_const(sv),len,char);
*SvEND(sv) = '\0';
SV* csv = sv_2mortal(newSVpvn(spv, slen));
sv_utf8_upgrade(csv);
- spv = SvPV(csv, slen);
+ spv = SvPV_const(csv, slen);
}
else
sv_utf8_upgrade_nomg(dsv);
const MGVTBL *vtable = 0;
MAGIC* mg;
-#ifdef PERL_COPY_ON_WRITE
+#ifdef PERL_OLD_COPY_ON_WRITE
if (SvIsCOW(sv))
sv_force_normal_flags(sv, 0);
#endif
(void)SvPOK_only_UTF8(bigstr);
if (offset + len > curlen) {
SvGROW(bigstr, offset+len+1);
- Zero(SvPVX_const(bigstr)+curlen, offset+len-curlen, char);
+ Zero(SvPVX(bigstr)+curlen, offset+len-curlen, char);
SvCUR_set(bigstr, offset+len);
}
*mid = '\0';
SvCUR_set(bigstr, mid - big);
}
- /*SUPPRESS 560*/
else if ((i = mid - big)) { /* faster from front */
midend -= littlelen;
mid = midend;
sv->sv_flags = nsv->sv_flags;
sv->sv_any = nsv->sv_any;
sv->sv_refcnt = nsv->sv_refcnt;
+ sv->sv_u = nsv->sv_u;
#else
StructCopy(nsv,sv,SV);
#endif
}
-#ifdef PERL_COPY_ON_WRITE
+#ifdef PERL_OLD_COPY_ON_WRITE
if (SvIsCOW_normal(nsv)) {
/* We need to follow the pointers around the loop to make the
previous SV point to sv, rather than nsv. */
freescalar:
/* Don't bother with SvOOK_off(sv); as we're only going to free it. */
if (SvOOK(sv)) {
- SvPV_set(sv, SvPVX(sv) - SvIVX(sv));
+ SvPV_set(sv, SvPVX_mutable(sv) - SvIVX(sv));
/* Don't even bother with turning off the OOK flag. */
}
/* FALL THROUGH */
else
SvREFCNT_dec(SvRV(sv));
}
-#ifdef PERL_COPY_ON_WRITE
+#ifdef PERL_OLD_COPY_ON_WRITE
else if (SvPVX_const(sv)) {
if (SvIsCOW(sv)) {
/* I believe I need to grab the global SV mutex here and
PerlIO_printf(Perl_debug_log, "Copy on write: clear\n");
sv_dump(sv);
}
- sv_release_COW(sv, SvPVX_const(sv), SvCUR(sv), SvLEN(sv),
- SvUVX(sv), SV_COW_NEXT_SV(sv));
+ sv_release_COW(sv, SvPVX_const(sv), SvLEN(sv),
+ SV_COW_NEXT_SV(sv));
/* And drop it here. */
SvFAKE_off(sv);
} else if (SvLEN(sv)) {
else if (SvPVX_const(sv) && SvLEN(sv))
Safefree(SvPVX_const(sv));
else if (SvPVX_const(sv) && SvREADONLY(sv) && SvFAKE(sv)) {
- unsharepvn(SvPVX_const(sv),
- SvUTF8(sv) ? -(I32)SvCUR(sv) : SvCUR(sv),
- SvUVX(sv));
+ unshare_hek(SvSHARED_HEK_FROM_PV(SvPVX_const(sv)));
SvFAKE_off(sv);
}
#endif
*
*/
STATIC bool
-S_utf8_mg_pos_init(pTHX_ SV *sv, MAGIC **mgp, STRLEN **cachep, I32 i, I32 offsetp, U8 *s, U8 *start)
+S_utf8_mg_pos_init(pTHX_ SV *sv, MAGIC **mgp, STRLEN **cachep, I32 i,
+ I32 offsetp, const U8 *s, const U8 *start)
{
bool found = FALSE;
*
*/
STATIC bool
-S_utf8_mg_pos(pTHX_ SV *sv, MAGIC **mgp, STRLEN **cachep, I32 i, I32 *offsetp, I32 uoff, U8 **sp, U8 *start, U8 *send)
+S_utf8_mg_pos(pTHX_ SV *sv, MAGIC **mgp, STRLEN **cachep, I32 i, I32 *offsetp, I32 uoff, const U8 **sp, const U8 *start, const U8 *send)
{
bool found = FALSE;
void
Perl_sv_pos_u2b(pTHX_ register SV *sv, I32* offsetp, I32* lenp)
{
- U8 *start;
+ const U8 *start;
STRLEN len;
if (!sv)
return;
- start = (U8*)SvPV(sv, len);
+ start = (U8*)SvPV_const(sv, len);
if (len) {
STRLEN boffset = 0;
STRLEN *cache = 0;
- U8 *s = start;
- I32 uoffset = *offsetp;
- U8 *send = s + len;
- MAGIC *mg = 0;
- bool found = FALSE;
+ const U8 *s = start;
+ I32 uoffset = *offsetp;
+ const U8 *send = s + len;
+ MAGIC *mg = 0;
+ bool found = FALSE;
if (utf8_mg_pos(sv, &mg, &cache, 0, offsetp, *offsetp, &s, start, send))
found = TRUE;
void
Perl_sv_pos_b2u(pTHX_ register SV* sv, I32* offsetp)
{
- U8* s;
+ const U8* s;
STRLEN len;
if (!sv)
return;
- s = (U8*)SvPV(sv, len);
+ s = (const U8*)SvPV_const(sv, len);
if ((I32)len < *offsetp)
Perl_croak(aTHX_ "panic: sv_pos_b2u: bad byte offset");
else {
- U8* send = s + *offsetp;
+ const U8* send = s + *offsetp;
MAGIC* mg = NULL;
STRLEN *cache = NULL;
STRLEN backw = cache[1] - *offsetp;
if (!(forw < 2 * backw)) {
- U8 *p = s + cache[1];
+ const U8 *p = s + cache[1];
STRLEN ubackw = 0;
cache[1] -= backw;
if (SvUTF8(sv1)) {
svrecode = newSVpvn(pv2, cur2);
sv_recode_to_utf8(svrecode, PL_encoding);
- pv2 = SvPV(svrecode, cur2);
+ pv2 = SvPV_const(svrecode, cur2);
}
else {
svrecode = newSVpvn(pv1, cur1);
sv_recode_to_utf8(svrecode, PL_encoding);
- pv1 = SvPV(svrecode, cur1);
+ pv1 = SvPV_const(svrecode, cur1);
}
/* Now both are in UTF-8. */
if (cur1 != cur2) {
if (PL_encoding) {
svrecode = newSVpvn(pv2, cur2);
sv_recode_to_utf8(svrecode, PL_encoding);
- pv2 = SvPV(svrecode, cur2);
+ pv2 = SvPV_const(svrecode, cur2);
}
else {
pv2 = tpv = (char*)bytes_to_utf8((const U8*)pv2, &cur2);
if (PL_encoding) {
svrecode = newSVpvn(pv1, cur1);
sv_recode_to_utf8(svrecode, PL_encoding);
- pv1 = SvPV(svrecode, cur1);
+ pv1 = SvPV_const(svrecode, cur1);
}
else {
pv1 = tpv = (char*)bytes_to_utf8((const U8*)pv1, &cur1);
mg = SvMAGICAL(sv) ? mg_find(sv, PERL_MAGIC_collxfrm) : (MAGIC *) NULL;
if (!mg || !mg->mg_ptr || *(U32*)mg->mg_ptr != PL_collation_ix) {
- char *s, *xf;
+ const char *s;
+ char *xf;
STRLEN len, xlen;
if (mg)
Safefree(mg->mg_ptr);
- s = SvPV(sv, len);
+ s = SvPV_const(sv, len);
if ((xf = mem_collxfrm(s, len, &xlen))) {
if (SvREADONLY(sv)) {
SAVEFREEPV(xf);
Perl_croak(aTHX_ "Wide character in $/");
}
}
- rsptr = SvPV(PL_rs, rslen);
+ rsptr = SvPV_const(PL_rs, rslen);
}
}
if (!(flags & SVp_POK) || !*SvPVX_const(sv)) {
if ((flags & SVTYPEMASK) < SVt_PVIV)
- sv_upgrade(sv, SVt_IV);
+ sv_upgrade(sv, ((flags & SVTYPEMASK) > SVt_IV ? SVt_PVIV : SVt_IV));
(void)SvIOK_only(sv);
SvIV_set(sv, 1);
return;
if (!hash)
PERL_HASH(hash, src, len);
new_SV(sv);
- sv_upgrade(sv, SVt_PVIV);
+ sv_upgrade(sv, SVt_PV);
SvPV_set(sv, sharepvn(src, is_utf8?-len:len, hash));
SvCUR_set(sv, len);
- SvUV_set(sv, hash);
SvLEN_set(sv, 0);
SvREADONLY_on(sv);
SvFAKE_on(sv);
char *
Perl_sv_pv(pTHX_ SV *sv)
{
- STRLEN n_a;
-
if (SvPOK(sv))
return SvPVX(sv);
- return sv_2pv(sv, &n_a);
+ return sv_2pv(sv, 0);
}
/*
sv_force_normal_flags(sv, 0);
if (SvPOK(sv)) {
- *lp = SvCUR(sv);
+ if (lp)
+ *lp = SvCUR(sv);
}
else {
char *s;
-
+ STRLEN len;
+
if (SvREADONLY(sv) && !(flags & SV_MUTABLE_RETURN)) {
if (PL_op)
Perl_croak(aTHX_ "Can't coerce readonly %s to string in %s",
OP_NAME(PL_op));
}
else
- s = sv_2pv_flags(sv, lp, flags);
+ s = sv_2pv_flags(sv, &len, flags);
+ if (lp)
+ *lp = len;
+
if (s != SvPVX_const(sv)) { /* Almost, but not quite, sv_setpvn() */
- const STRLEN len = *lp;
-
if (SvROK(sv))
sv_unref(sv);
SvUPGRADE(sv, SVt_PV); /* Never FALSE */
Perl_sv_tainted(pTHX_ SV *sv)
{
if (SvTYPE(sv) >= SVt_PVMG && SvMAGIC(sv)) {
- MAGIC *mg = mg_find(sv, PERL_MAGIC_taint);
+ MAGIC * const mg = mg_find(sv, PERL_MAGIC_taint);
if (mg && ((mg->mg_len & 1) || ((mg->mg_len & 2) && mg->mg_obj == sv)))
return TRUE;
}
const char *eptr = Nullch;
STRLEN elen = 0;
SV *vecsv = Nullsv;
- U8 *vecstr = Null(U8*);
+ const U8 *vecstr = Null(U8*);
STRLEN veclen = 0;
char c = 0;
int i;
else
vecsv = (evix ? evix <= svmax : svix < svmax) ?
svargs[evix ? evix-1 : svix++] : &PL_sv_undef;
- dotstr = SvPVx(vecsv, dotstrlen);
+ dotstr = SvPV_const(vecsv, dotstrlen);
if (DO_UTF8(vecsv))
is_utf8 = TRUE;
}
if (args) {
vecsv = va_arg(*args, SV*);
- vecstr = (U8*)SvPVx(vecsv,veclen);
+ vecstr = (U8*)SvPV_const(vecsv,veclen);
vec_utf8 = DO_UTF8(vecsv);
}
else if (efix ? efix <= svmax : svix < svmax) {
vecsv = svargs[efix ? efix-1 : svix++];
- vecstr = (U8*)SvPVx(vecsv,veclen);
+ vecstr = (U8*)SvPV_const(vecsv,veclen);
vec_utf8 = DO_UTF8(vecsv);
/* if this is a version object, we need to return the
* stringified representation (which the SvPVX_const has
if ( *q == 'd' && sv_derived_from(vecsv,"version") )
{
q++; /* skip past the rest of the %vd format */
- eptr = (char *) vecstr;
+ eptr = (const char *) vecstr;
elen = strlen(eptr);
vectorize=FALSE;
goto string;
else {
SV *nsv = sv_2mortal(newSVpvn(eptr, elen));
sv_utf8_upgrade(nsv);
- eptr = SvPVX(nsv);
+ eptr = SvPVX_const(nsv);
elen = SvCUR(nsv);
}
SvGROW(sv, SvCUR(sv) + elen + 1);
ret->subbeg = SAVEPVN(r->subbeg, r->sublen);
else
ret->subbeg = Nullch;
-#ifdef PERL_COPY_ON_WRITE
+#ifdef PERL_OLD_COPY_ON_WRITE
ret->saved_copy = Nullsv;
#endif
# define PTR_TABLE_HASH(ptr) (PTR2UV(ptr) >> 2)
#endif
-
-
-STATIC void
-S_more_pte(pTHX)
-{
- struct ptr_tbl_ent* pte;
- struct ptr_tbl_ent* pteend;
- New(0, pte, PERL_ARENA_SIZE/sizeof(struct ptr_tbl_ent), struct ptr_tbl_ent);
- pte->next = PL_pte_arenaroot;
- PL_pte_arenaroot = pte;
-
- pteend = &pte[PERL_ARENA_SIZE / sizeof(struct ptr_tbl_ent) - 1];
- PL_pte_root = ++pte;
- while (pte < pteend) {
- pte->next = pte + 1;
- pte++;
- }
- pte->next = 0;
-}
-
-STATIC struct ptr_tbl_ent*
-S_new_pte(pTHX)
-{
- struct ptr_tbl_ent* pte;
- if (!PL_pte_root)
- S_more_pte(aTHX);
- pte = PL_pte_root;
- PL_pte_root = pte->next;
- return pte;
-}
-
-STATIC void
-S_del_pte(pTHX_ struct ptr_tbl_ent*p)
-{
- p->next = PL_pte_root;
- PL_pte_root = p;
-}
+#define new_pte() new_body(struct ptr_tbl_ent, pte)
+#define del_pte(p) del_body(p, struct ptr_tbl_ent, pte)
/* map an existing pointer using a table */
return;
}
}
- tblent = S_new_pte(aTHX);
+ tblent = new_pte();
tblent->oldval = oldv;
tblent->newval = newv;
tblent->next = *otblent;
if (entry) {
PTR_TBL_ENT_t *oentry = entry;
entry = entry->next;
- S_del_pte(aTHX_ oentry);
+ del_pte(oentry);
}
if (!entry) {
if (++riter > max) {
}
else {
/* Special case - not normally malloced for some reason */
- if (SvREADONLY(sstr) && SvFAKE(sstr)) {
- /* A "shared" PV - clone it as unshared string */
- if(SvPADTMP(sstr)) {
- /* However, some of them live in the pad
- and they should not have these flags
- turned off */
-
- SvPV_set(dstr, sharepvn(SvPVX_const(sstr), SvCUR(sstr),
- SvUVX(sstr)));
- SvUV_set(dstr, SvUVX(sstr));
- } else {
-
- SvPV_set(dstr, SAVEPVN(SvPVX_const(sstr), SvCUR(sstr)));
- SvFAKE_off(dstr);
- SvREADONLY_off(dstr);
- }
+ if ((SvREADONLY(sstr) && SvFAKE(sstr))) {
+ /* A "shared" PV - clone it as "shared" PV */
+ SvPV_set(dstr,
+ HEK_KEY(hek_dup(SvSHARED_HEK_FROM_PV(SvPVX_const(sstr)),
+ param)));
}
else {
/* Some other special case - random pointer */
void (*dptr) (void*);
void (*dxptr) (pTHX_ void*);
OP *o;
- /* Unions for circumventing strict ANSI C89 casting rules. */
- union { void *vptr; void (*dptr)(void*); } u1, u2;
- union { void *vptr; void (*dxptr)(pTHX_ void*); } u3, u4;
Newz(54, nss, max, ANY);
ptr = POPPTR(ss,ix);
TOPPTR(nss,ix) = any_dup(ptr, proto_perl); /* XXX quite arbitrary */
dptr = POPDPTR(ss,ix);
- u1.dptr = dptr;
- u2.vptr = any_dup(u1.vptr, proto_perl);
- TOPDPTR(nss,ix) = u2.dptr;
+ TOPDPTR(nss,ix) = DPTR2FPTR(void (*)(void*),
+ any_dup(FPTR2DPTR(void *, dptr),
+ proto_perl));
break;
case SAVEt_DESTRUCTOR_X:
ptr = POPPTR(ss,ix);
TOPPTR(nss,ix) = any_dup(ptr, proto_perl); /* XXX quite arbitrary */
dxptr = POPDXPTR(ss,ix);
- u3.dxptr = dxptr;
- u4.vptr = any_dup(u3.vptr, proto_perl);;
- TOPDXPTR(nss,ix) = u4.dxptr;
+ TOPDXPTR(nss,ix) = DPTR2FPTR(void (*)(pTHX_ void*),
+ any_dup(FPTR2DPTR(void *, dxptr),
+ proto_perl));
break;
case SAVEt_REGCONTEXT:
case SAVEt_ALLOC:
/* create SV map for pointer relocation */
PL_ptr_table = ptr_table_new();
- /* and one for finding shared hash keys quickly */
- PL_shared_hek_table = ptr_table_new();
/* initialize these special pointers as early as possible */
SvANY(&PL_sv_undef) = NULL;
PL_reg_curpm = (PMOP*)NULL;
PL_reg_oldsaved = Nullch;
PL_reg_oldsavedlen = 0;
-#ifdef PERL_COPY_ON_WRITE
+#ifdef PERL_OLD_COPY_ON_WRITE
PL_nrs = Nullsv;
#endif
PL_reg_maxiter = 0;
if (!(flags & CLONEf_KEEP_PTR_TABLE)) {
ptr_table_free(PL_ptr_table);
PL_ptr_table = NULL;
- ptr_table_free(PL_shared_hek_table);
- PL_shared_hek_table = NULL;
}
/* Call the ->CLONE method, if it exists, for each of the stashes
if (SvPOK(sv) && !SvUTF8(sv) && !IN_BYTES && SvROK(encoding)) {
SV *uni;
STRLEN len;
- char *s;
+ const char *s;
dSP;
ENTER;
SAVETMPS;
SPAGAIN;
uni = POPs;
PUTBACK;
- s = SvPV(uni, len);
+ s = SvPV_const(uni, len);
if (s != SvPVX_const(sv)) {
SvGROW(sv, len + 1);
- Move(s, SvPVX_const(sv), len, char);
+ Move(s, SvPVX(sv), len + 1, char);
SvCUR_set(sv, len);
- SvPVX(sv)[len] = 0;
}
FREETMPS;
LEAVE;