* --jhi
*/
#define ASSERT_UTF8_CACHE(cache) \
- STMT_START { if (cache) { assert((cache)[0] <= (cache)[1]); } } STMT_END
+ STMT_START { if (cache) { assert((cache)[0] <= (cache)[1]); } } STMT_END
#else
#define ASSERT_UTF8_CACHE(cache) NOOP
#endif
=head1 Allocation and deallocation of SVs.
-An SV (or AV, HV, etc.) is allocated in two parts: the head (struct sv,
-av, hv...) contains type and reference count information, as well as a
-pointer to the body (struct xrv, xpv, xpviv...), which contains fields
-specific to each type.
+An SV (or AV, HV, etc.) is allocated in two parts: the head (struct
+sv, av, hv...) contains type and reference count information, and for
+many types, a pointer to the body (struct xrv, xpv, xpviv...), which
+contains fields specific to each type. Some types store all they need
+in the head, so don't have a body.
-In all but the most memory-paranoid configuations (ex: PURIFY), this
-allocation is done using arenas, which by default are approximately 4K
-chunks of memory parcelled up into N heads or bodies (of same size).
+In all but the most memory-paranoid configuations (ex: PURIFY), heads
+and bodies are allocated out of arenas, which by default are
+approximately 4K chunks of memory parcelled up into N heads or bodies.
Sv-bodies are allocated by their sv-type, guaranteeing size
consistency needed to allocate safely from arrays.
-The first slot in each arena is reserved, and is used to hold a link
-to the next arena. In the case of heads, the unused first slot also
-contains some flags and a note of the number of slots. Snaked through
-each arena chain is a linked list of free items; when this becomes
-empty, an extra arena is allocated and divided up into N items which
-are threaded into the free list.
+For SV-heads, the first slot in each arena is reserved, and holds a
+link to the next arena, some flags, and a note of the number of slots.
+Snaked through each arena chain is a linked list of free items; when
+this becomes empty, an extra arena is allocated and divided up into N
+items which are threaded into the free list.
+
+SV-bodies are similar, but they use arena-sets by default, which
+separate the link and info from the arena itself, and reclaim the 1st
+slot in the arena. SV-bodies are further described later.
The following global variables are associated with arenas:
PL_sv_arenaroot pointer to list of SV arenas
PL_sv_root pointer to list of free SV structures
- PL_body_arenaroots[] array of pointers to list of arenas, 1 per svtype
- PL_body_roots[] array of pointers to list of free bodies of svtype
- arrays are indexed by the svtype needed
-
-Note that some of the larger and more rarely used body types (eg
-xpvio) are not allocated using arenas, but are instead just
-malloc()/free()ed as required.
+ PL_body_arenas head of linked-list of body arenas
+ PL_body_roots[] array of pointers to list of free bodies of svtype
+ arrays are indexed by the svtype needed
-In addition, a few SV heads are not allocated from an arena, but are
-instead directly created as static or auto variables, eg PL_sv_undef.
+A few special SV heads are not allocated from an arena, but are
+instead directly created in the interpreter structure, eg PL_sv_undef.
The size of arenas can be changed from the default by setting
PERL_ARENA_SIZE appropriately at compile time.
more_sv() / sv_add_arena() to add an extra arena if the free list is empty.
SVs in the free list have their SvTYPE field set to all ones.
-Similarly, there are macros new_XIV()/del_XIV(), new_XNV()/del_XNV() etc
-that allocate and return individual body types. Normally these are mapped
-to the arena-manipulating functions new_xiv()/del_xiv() etc, but may be
-instead mapped directly to malloc()/free() if PURIFY is defined. The
-new/del functions remove from, or add to, the appropriate PL_foo_root
-list, and call more_xiv() etc to add a new arena if the list is empty.
-
At the time of very final cleanup, sv_free_arenas() is called from
perl_destruct() to physically free all the arenas allocated since the
start of the interpreter.
sv_report_used(), sv_clean_objs(), sv_clean_all(), sv_free_arenas()
-
=cut
============================================================================ */
-
-
/*
* "A time to plant, and a time to uproot what was planted..."
*/
#define uproot_SV(p) \
STMT_START { \
(p) = PL_sv_root; \
- PL_sv_root = (SV*)SvARENA_CHAIN(p); \
+ PL_sv_root = (SV*)SvARENA_CHAIN(p); \
++PL_sv_count; \
} STMT_END
}
else {
char *chunk; /* must use New here to match call to */
- Newx(chunk,PERL_ARENA_SIZE,char); /* Safefree() in sv_free_arenas() */
+ Newx(chunk,PERL_ARENA_SIZE,char); /* Safefree() in sv_free_arenas() */
sv_add_arena(chunk, PERL_ARENA_SIZE, 0);
}
uproot_SV(sv);
SvFLAGS(sv) |= SVf_BREAK;
if (PL_comppad == (AV*)sv) {
PL_comppad = NULL;
- PL_curpad = Null(SV**);
+ PL_curpad = NULL;
}
SvREFCNT_dec(sv);
}
The cost is 1 arena-set malloc per ~320 arena-mallocs, + the unused
memory in the last arena-set (1/2 on average). In trade, we get
back the 1st slot in each arena (ie 1.7% of a CV-arena, less for
- others)
-
- union arena is declared with a fixed size, but is intended to vary
- by type, allowing their use for big, rare body-types where theres
- currently too much wastage (unused arena slots)
+ smaller types). The recovery of the wasted space allows use of
+ small arenas for large, rare body types,
*/
-#define ARENASETS 1
-
struct arena_desc {
char *arena; /* the raw storage, allocated aligned */
size_t size; /* its size ~4k typ */
union arena* arp;
/* allocate and attach arena */
- Newx(arp, PERL_ARENA_SIZE, char);
+ Newx(arp, arena_size, char);
arp->next = PL_body_arenas;
PL_body_arenas = arp;
return arp;
#else
struct arena_desc* adesc;
- struct arena_set *newroot, *aroot = (struct arena_set*) PL_body_arenas;
+ struct arena_set *newroot, **aroot = (struct arena_set**) &PL_body_arenas;
int curr;
- if (!arena_size)
- arena_size = PERL_ARENA_SIZE;
+ /* shouldnt need this
+ if (!arena_size) arena_size = PERL_ARENA_SIZE;
+ */
/* may need new arena-set to hold new arena */
- if (!aroot || aroot->curr >= aroot->set_size) {
+ if (!*aroot || (*aroot)->curr >= (*aroot)->set_size) {
Newxz(newroot, 1, struct arena_set);
newroot->set_size = ARENAS_PER_SET;
- newroot->next = aroot;
- aroot = newroot;
- DEBUG_m(PerlIO_printf(Perl_debug_log, "new arenaset %p\n", aroot));
+ newroot->next = *aroot;
+ *aroot = newroot;
+ DEBUG_m(PerlIO_printf(Perl_debug_log, "new arenaset %p\n", *aroot));
}
/* ok, now have arena-set with at least 1 empty/available arena-desc */
- curr = aroot->curr++;
- adesc = &aroot->set[curr];
+ curr = (*aroot)->curr++;
+ adesc = &((*aroot)->set[curr]);
assert(!adesc->arena);
- /* old fixed-size way
- Newxz(adesc->arena, 1, union arena);
- adesc->size = sizeof(union arena);
- */
- /* new buggy way */
Newxz(adesc->arena, arena_size, char);
adesc->size = arena_size;
-
- /* adesc->count = sizeof(struct arena)/size; */
-
- DEBUG_m(PerlIO_printf(Perl_debug_log, "arena %d added: %p\n", curr, aroot));
+ DEBUG_m(PerlIO_printf(Perl_debug_log, "arena %d added: %p size %d\n",
+ curr, adesc->arena, arena_size));
return adesc->arena;
#endif
}
-STATIC void *
-S_more_bodies (pTHX_ size_t size, svtype sv_type)
-{
- dVAR;
- void ** const root = &PL_body_roots[sv_type];
- char *start;
- const char *end;
- const size_t count = PERL_ARENA_SIZE / size;
-
- start = (char*) Perl_get_arena(aTHX_ PERL_ARENA_SIZE); /* get a raw arena */
-
- end = start + (count-1) * size;
-
-#if !ARENASETS
- /* The initial slot is used to link the arenas together, so it isn't to be
- linked into the list of ready-to-use bodies. */
- start += size;
-#endif
-
- *root = (void *)start;
-
- while (start < end) {
- char * const next = start + size;
- *(void**) start = (void *)next;
- start = next;
- }
- *(void **)start = 0;
-
- return *root;
-}
-
-/* grab a new thing from the free list, allocating more if necessary */
-
-/* 1st, the inline version */
-
-#define new_body_inline(xpv, size, sv_type) \
- STMT_START { \
- void ** const r3wt = &PL_body_roots[sv_type]; \
- LOCK_SV_MUTEX; \
- xpv = *((void **)(r3wt)) \
- ? *((void **)(r3wt)) : S_more_bodies(aTHX_ size, sv_type); \
- *(r3wt) = *(void**)(xpv); \
- UNLOCK_SV_MUTEX; \
- } STMT_END
-
-/* now use the inline version in the proper function */
-
-#ifndef PURIFY
-
-/* This isn't being used with -DPURIFY, so don't declare it. Otherwise
- compilers issue warnings. */
-
-STATIC void *
-S_new_body(pTHX_ size_t size, svtype sv_type)
-{
- dVAR;
- void *xpv;
- new_body_inline(xpv, size, sv_type);
- return xpv;
-}
-
-#endif
/* return a thing to the free list */
} STMT_END
/*
- Revisiting type 3 arenas, there are 4 body-types which have some
- members that are never accessed. They are XPV, XPVIV, XPVAV,
- XPVHV, which have corresponding types: xpv_allocated,
- xpviv_allocated, xpvav_allocated, xpvhv_allocated,
-
- For these types, the arenas are carved up into *_allocated size
- chunks, we thus avoid wasted memory for those unaccessed members.
- When bodies are allocated, we adjust the pointer back in memory by
- the size of the bit not allocated, so it's as if we allocated the
- full structure. (But things will all go boom if you write to the
- part that is "not there", because you'll be overwriting the last
- members of the preceding structure in memory.)
-
- We calculate the correction using the STRUCT_OFFSET macro. For example, if
- xpv_allocated is the same structure as XPV then the two OFFSETs sum to zero,
- and the pointer is unchanged. If the allocated structure is smaller (no
- initial NV actually allocated) then the net effect is to subtract the size
- of the NV from the pointer, to return a new pointer as if an initial NV were
- actually allocated.
-
- This is the same trick as was used for NV and IV bodies. Ironically it
- doesn't need to be used for NV bodies any more, because NV is now at the
- start of the structure. IV bodies don't need it either, because they are
- no longer allocated. */
-
-/* The following 2 arrays hide the above details in a pair of
- lookup-tables, allowing us to be body-type agnostic.
-
- size maps svtype to its body's allocated size.
- offset maps svtype to the body-pointer adjustment needed
-
- NB: elements in latter are 0 or <0, and are added during
- allocation, and subtracted during deallocation. It may be clearer
- to invert the values, and call it shrinkage_by_svtype.
+
+=head1 SV-Body Allocation
+
+Allocation of SV-bodies is similar to SV-heads, differing as follows;
+the allocation mechanism is used for many body types, so is somewhat
+more complicated, it uses arena-sets, and has no need for still-live
+SV detection.
+
+At the outermost level, (new|del)_X*V macros return bodies of the
+appropriate type. These macros call either (new|del)_body_type or
+(new|del)_body_allocated macro pairs, depending on specifics of the
+type. Most body types use the former pair, the latter pair is used to
+allocate body types with "ghost fields".
+
+"ghost fields" are fields that are unused in certain types, and
+consequently dont need to actually exist. They are declared because
+they're part of a "base type", which allows use of functions as
+methods. The simplest examples are AVs and HVs, 2 aggregate types
+which don't use the fields which support SCALAR semantics.
+
+For these types, the arenas are carved up into *_allocated size
+chunks, we thus avoid wasted memory for those unaccessed members.
+When bodies are allocated, we adjust the pointer back in memory by the
+size of the bit not allocated, so it's as if we allocated the full
+structure. (But things will all go boom if you write to the part that
+is "not there", because you'll be overwriting the last members of the
+preceding structure in memory.)
+
+We calculate the correction using the STRUCT_OFFSET macro. For
+example, if xpv_allocated is the same structure as XPV then the two
+OFFSETs sum to zero, and the pointer is unchanged. If the allocated
+structure is smaller (no initial NV actually allocated) then the net
+effect is to subtract the size of the NV from the pointer, to return a
+new pointer as if an initial NV were actually allocated.
+
+This is the same trick as was used for NV and IV bodies. Ironically it
+doesn't need to be used for NV bodies any more, because NV is now at
+the start of the structure. IV bodies don't need it either, because
+they are no longer allocated.
+
+In turn, the new_body_* allocators call S_new_body(), which invokes
+new_body_inline macro, which takes a lock, and takes a body off the
+linked list at PL_body_roots[sv_type], calling S_more_bodies() if
+necessary to refresh an empty list. Then the lock is released, and
+the body is returned.
+
+S_more_bodies calls get_arena(), and carves it up into an array of N
+bodies, which it strings into a linked list. It looks up arena-size
+and body-size from the body_details table described below, thus
+supporting the multiple body-types.
+
+If PURIFY is defined, or PERL_ARENA_SIZE=0, arenas are not used, and
+the (new|del)_X*V macros are mapped directly to malloc/free.
+
+*/
+
+/*
+
+For each sv-type, struct body_details bodies_by_type[] carries
+parameters which control these aspects of SV handling:
+
+Arena_size determines whether arenas are used for this body type, and if
+so, how big they are. PURIFY or PERL_ARENA_SIZE=0 set this field to
+zero, forcing individual mallocs and frees.
+
+Body_size determines how big a body is, and therefore how many fit into
+each arena. Offset carries the body-pointer adjustment needed for
+*_allocated body types, and is used in *_allocated macros.
+
+But its main purpose is to parameterize info needed in
+Perl_sv_upgrade(). The info here dramatically simplifies the function
+vs the implementation in 5.8.7, making it table-driven. All fields
+are used for this, except for arena_size.
+
+For the sv-types that have no bodies, arenas are not used, so those
+PL_body_roots[sv_type] are unused, and can be overloaded. In
+something of a special case, SVt_NULL is borrowed for HE arenas;
+PL_body_roots[SVt_NULL] is filled by S_more_he, but the
+bodies_by_type[SVt_NULL] slot is not used, as the table is not
+available in hv.c,
+
+PTEs also use arenas, but are never seen in Perl_sv_upgrade.
+Nonetheless, they get their own slot in bodies_by_type[SVt_NULL], so
+they can just use the same allocation semantics. At first, PTEs were
+also overloaded to a non-body sv-type, but this yielded hard-to-find
+malloc bugs, so was simplified by claiming a new slot. This choice
+has no consequence at this time.
+
*/
struct body_details {
- size_t size; /* Size to allocate */
+ size_t body_size; /* Size to allocate */
size_t copy; /* Size of structure to copy (may be shorter) */
size_t offset;
- bool cant_upgrade; /* Can upgrade this type */
+ bool cant_upgrade; /* Cannot upgrade this type */
bool zero_nv; /* zero the NV when upgrading from this */
bool arena; /* Allocated from an arena */
+ size_t arena_size; /* Size of arena to allocate */
};
#define HADNV FALSE
#define NONV TRUE
+
#ifdef PURIFY
/* With -DPURFIY we allocate everything directly, and don't use arenas.
This seems a rather elegant way to simplify some of the code below. */
#endif
#define NOARENA FALSE
+/* Size the arenas to exactly fit a given number of bodies. A count
+ of 0 fits the max number bodies into a PERL_ARENA_SIZE.block,
+ simplifying the default. If count > 0, the arena is sized to fit
+ only that many bodies, allowing arenas to be used for large, rare
+ bodies (XPVFM, XPVIO) without undue waste. The arena size is
+ limited by PERL_ARENA_SIZE, so we can safely oversize the
+ declarations.
+ */
+#define FIT_ARENA(count, body_size) \
+ (!count || count * body_size > PERL_ARENA_SIZE) \
+ ? (int)(PERL_ARENA_SIZE / body_size) * body_size : count * body_size
+
/* A macro to work out the offset needed to subtract from a pointer to (say)
typedef struct {
+ sizeof (((type*)SvANY((SV*)0))->last_member)
static const struct body_details bodies_by_type[] = {
- {0, 0, 0, FALSE, NONV, NOARENA},
- /* IVs are in the head, so the allocation size is 0 */
- {0, sizeof(IV), STRUCT_OFFSET(XPVIV, xiv_iv), FALSE, NONV, NOARENA},
+ { sizeof(HE), 0, 0, FALSE, NONV, NOARENA, FIT_ARENA(0, sizeof(HE)) },
+
+ /* IVs are in the head, so the allocation size is 0.
+ However, the slot is overloaded for PTEs. */
+ { sizeof(struct ptr_tbl_ent), /* This is used for PTEs. */
+ sizeof(IV), /* This is used to copy out the IV body. */
+ STRUCT_OFFSET(XPVIV, xiv_iv), FALSE, NONV,
+ NOARENA /* IVS don't need an arena */,
+ /* But PTEs need to know the size of their arena */
+ FIT_ARENA(0, sizeof(struct ptr_tbl_ent))
+ },
+
/* 8 bytes on most ILP32 with IEEE doubles */
- {sizeof(NV), sizeof(NV), 0, FALSE, HADNV, HASARENA},
- /* RVs are in the head now */
- /* However, this slot is overloaded and used by the pte */
- {0, 0, 0, FALSE, NONV, NOARENA},
+ { sizeof(NV), sizeof(NV), 0, FALSE, HADNV, HASARENA,
+ FIT_ARENA(0, sizeof(NV)) },
+
+ /* RVs are in the head now. */
+ { 0, 0, 0, FALSE, NONV, NOARENA, 0 },
+
/* 8 bytes on most ILP32 with IEEE doubles */
- {sizeof(xpv_allocated),
- copy_length(XPV, xpv_len)
- - relative_STRUCT_OFFSET(xpv_allocated, XPV, xpv_cur),
- + relative_STRUCT_OFFSET(xpv_allocated, XPV, xpv_cur),
- FALSE, NONV, HASARENA},
+ { sizeof(xpv_allocated),
+ copy_length(XPV, xpv_len)
+ - relative_STRUCT_OFFSET(xpv_allocated, XPV, xpv_cur),
+ + relative_STRUCT_OFFSET(xpv_allocated, XPV, xpv_cur),
+ FALSE, NONV, HASARENA, FIT_ARENA(0, sizeof(xpv_allocated)) },
+
/* 12 */
- {sizeof(xpviv_allocated),
- copy_length(XPVIV, xiv_u)
- - relative_STRUCT_OFFSET(xpviv_allocated, XPVIV, xpv_cur),
- + relative_STRUCT_OFFSET(xpviv_allocated, XPVIV, xpv_cur),
- FALSE, NONV, HASARENA},
+ { sizeof(xpviv_allocated),
+ copy_length(XPVIV, xiv_u)
+ - relative_STRUCT_OFFSET(xpviv_allocated, XPVIV, xpv_cur),
+ + relative_STRUCT_OFFSET(xpviv_allocated, XPVIV, xpv_cur),
+ FALSE, NONV, HASARENA, FIT_ARENA(0, sizeof(xpviv_allocated)) },
+
/* 20 */
- {sizeof(XPVNV), copy_length(XPVNV, xiv_u), 0, FALSE, HADNV, HASARENA},
+ { sizeof(XPVNV), copy_length(XPVNV, xiv_u), 0, FALSE, HADNV,
+ HASARENA, FIT_ARENA(0, sizeof(XPVNV)) },
+
/* 28 */
- {sizeof(XPVMG), copy_length(XPVMG, xmg_stash), 0, FALSE, HADNV, HASARENA},
+ { sizeof(XPVMG), copy_length(XPVMG, xmg_stash), 0, FALSE, HADNV,
+ HASARENA, FIT_ARENA(0, sizeof(XPVMG)) },
+
/* 36 */
- {sizeof(XPVBM), sizeof(XPVBM), 0, TRUE, HADNV, HASARENA},
+ { sizeof(XPVBM), sizeof(XPVBM), 0, TRUE, HADNV,
+ HASARENA, FIT_ARENA(0, sizeof(XPVBM)) },
+
/* 48 */
- {sizeof(XPVGV), sizeof(XPVGV), 0, TRUE, HADNV, HASARENA},
+ { sizeof(XPVGV), sizeof(XPVGV), 0, TRUE, HADNV,
+ HASARENA, FIT_ARENA(0, sizeof(XPVGV)) },
+
/* 64 */
- {sizeof(XPVLV), sizeof(XPVLV), 0, TRUE, HADNV, HASARENA},
- /* 20 */
- {sizeof(xpvav_allocated),
- copy_length(XPVAV, xmg_stash)
- - relative_STRUCT_OFFSET(xpvav_allocated, XPVAV, xav_fill),
- + relative_STRUCT_OFFSET(xpvav_allocated, XPVAV, xav_fill),
- TRUE, HADNV, HASARENA},
- /* 20 */
- {sizeof(xpvhv_allocated),
- copy_length(XPVHV, xmg_stash)
- - relative_STRUCT_OFFSET(xpvhv_allocated, XPVHV, xhv_fill),
- + relative_STRUCT_OFFSET(xpvhv_allocated, XPVHV, xhv_fill),
- TRUE, HADNV, HASARENA},
+ { sizeof(XPVLV), sizeof(XPVLV), 0, TRUE, HADNV,
+ HASARENA, FIT_ARENA(0, sizeof(XPVLV)) },
+
+ { sizeof(xpvav_allocated),
+ copy_length(XPVAV, xmg_stash)
+ - relative_STRUCT_OFFSET(xpvav_allocated, XPVAV, xav_fill),
+ + relative_STRUCT_OFFSET(xpvav_allocated, XPVAV, xav_fill),
+ TRUE, HADNV, HASARENA, FIT_ARENA(0, sizeof(xpvav_allocated)) },
+
+ { sizeof(xpvhv_allocated),
+ copy_length(XPVHV, xmg_stash)
+ - relative_STRUCT_OFFSET(xpvhv_allocated, XPVHV, xhv_fill),
+ + relative_STRUCT_OFFSET(xpvhv_allocated, XPVHV, xhv_fill),
+ TRUE, HADNV, HASARENA, FIT_ARENA(0, sizeof(xpvhv_allocated)) },
+
/* 76 */
- {sizeof(XPVCV), sizeof(XPVCV), 0, TRUE, HADNV, HASARENA},
- /* 80 */
- {sizeof(XPVFM), sizeof(XPVFM), 0, TRUE, HADNV, NOARENA},
- /* 84 */
- {sizeof(XPVIO), sizeof(XPVIO), 0, TRUE, HADNV, NOARENA}
+ { sizeof(XPVCV), sizeof(XPVCV), 0, TRUE, HADNV,
+ HASARENA, FIT_ARENA(0, sizeof(XPVCV)) },
+
+ { sizeof(xpvfm_allocated),
+ sizeof(xpvfm_allocated)
+ - relative_STRUCT_OFFSET(xpvfm_allocated, XPVFM, xpv_cur),
+ + relative_STRUCT_OFFSET(xpvfm_allocated, XPVFM, xpv_cur),
+ TRUE, HADNV, NOARENA, FIT_ARENA(20, sizeof(xpvfm_allocated)) },
+
+ /* XPVIO is 84 bytes, fits 48x */
+ { sizeof(XPVIO), sizeof(XPVIO), 0, TRUE, HADNV,
+ HASARENA, FIT_ARENA(24, sizeof(XPVIO)) },
};
-#define new_body_type(sv_type) \
- (void *)((char *)S_new_body(aTHX_ bodies_by_type[sv_type].size, sv_type)\
- - bodies_by_type[sv_type].offset)
+#define new_body_type(sv_type) \
+ (void *)((char *)S_new_body(aTHX_ sv_type))
#define del_body_type(p, sv_type) \
del_body(p, &PL_body_roots[sv_type])
#define new_body_allocated(sv_type) \
- (void *)((char *)S_new_body(aTHX_ bodies_by_type[sv_type].size, sv_type)\
+ (void *)((char *)S_new_body(aTHX_ sv_type) \
- bodies_by_type[sv_type].offset)
#define del_body_allocated(p, sv_type) \
/* no arena for you! */
#define new_NOARENA(details) \
- my_safemalloc((details)->size + (details)->offset)
+ my_safemalloc((details)->body_size + (details)->offset)
#define new_NOARENAZ(details) \
- my_safecalloc((details)->size + (details)->offset)
+ my_safecalloc((details)->body_size + (details)->offset)
+
+STATIC void *
+S_more_bodies (pTHX_ svtype sv_type)
+{
+ dVAR;
+ void ** const root = &PL_body_roots[sv_type];
+ const struct body_details *bdp = &bodies_by_type[sv_type];
+ const size_t body_size = bdp->body_size;
+ char *start;
+ const char *end;
+
+ assert(bdp->arena_size);
+ start = (char*) Perl_get_arena(aTHX_ bdp->arena_size);
+
+ end = start + bdp->arena_size - body_size;
+
+#if !ARENASETS
+ /* The initial slot is used to link the arenas together, so it isn't to be
+ linked into the list of ready-to-use bodies. */
+ start += body_size;
+#else
+ /* computed count doesnt reflect the 1st slot reservation */
+ DEBUG_m(PerlIO_printf(Perl_debug_log,
+ "arena %p end %p arena-size %d type %d size %d ct %d\n",
+ start, end, bdp->arena_size, sv_type, body_size,
+ bdp->arena_size / body_size));
+#endif
+
+ *root = (void *)start;
+
+ while (start < end) {
+ char * const next = start + body_size;
+ *(void**) start = (void *)next;
+ start = next;
+ }
+ *(void **)start = 0;
+
+ return *root;
+}
+
+/* grab a new thing from the free list, allocating more if necessary.
+ The inline version is used for speed in hot routines, and the
+ function using it serves the rest (unless PURIFY).
+*/
+#define new_body_inline(xpv, sv_type) \
+ STMT_START { \
+ void ** const r3wt = &PL_body_roots[sv_type]; \
+ LOCK_SV_MUTEX; \
+ xpv = *((void **)(r3wt)) \
+ ? *((void **)(r3wt)) : S_more_bodies(aTHX_ sv_type); \
+ *(r3wt) = *(void**)(xpv); \
+ UNLOCK_SV_MUTEX; \
+ } STMT_END
+
+#ifndef PURIFY
+
+STATIC void *
+S_new_body(pTHX_ svtype sv_type)
+{
+ dVAR;
+ void *xpv;
+ new_body_inline(xpv, sv_type);
+ return xpv;
+}
+
+#endif
/*
=for apidoc sv_upgrade
void* old_body;
void* new_body;
const U32 old_type = SvTYPE(sv);
+ const struct body_details *new_type_details;
const struct body_details *const old_type_details
= bodies_by_type + old_type;
- const struct body_details *new_type_details = bodies_by_type + new_type;
if (new_type != SVt_PV && SvIsCOW(sv)) {
sv_force_normal_flags(sv, 0);
if (new_type < SVt_PVIV) {
new_type = (new_type == SVt_NV)
? SVt_PVNV : SVt_PVIV;
- new_type_details = bodies_by_type + new_type;
}
break;
case SVt_NV:
if (new_type < SVt_PVNV) {
new_type = SVt_PVNV;
- new_type_details = bodies_by_type + new_type;
}
break;
case SVt_RV:
break;
default:
if (old_type_details->cant_upgrade)
- Perl_croak(aTHX_ "Can't upgrade that kind of scalar");
+ Perl_croak(aTHX_ "Can't upgrade %s (%" UVuf ") to %" UVuf,
+ sv_reftype(sv, 0), (UV) old_type, (UV) new_type);
}
+ new_type_details = bodies_by_type + new_type;
SvFLAGS(sv) &= ~SVTYPEMASK;
SvFLAGS(sv) |= new_type;
SvRV_set(sv, 0);
return;
case SVt_PVHV:
- SvANY(sv) = new_XPVHV();
- HvFILL(sv) = 0;
- HvMAX(sv) = 0;
- HvTOTALKEYS(sv) = 0;
-
- goto hv_av_common;
-
case SVt_PVAV:
- SvANY(sv) = new_XPVAV();
- AvMAX(sv) = -1;
- AvFILLp(sv) = -1;
- AvALLOC(sv) = 0;
- AvREAL_only(sv);
+ assert(new_type_details->body_size);
+
+#ifndef PURIFY
+ assert(new_type_details->arena);
+ assert(new_type_details->arena_size);
+ /* This points to the start of the allocated area. */
+ new_body_inline(new_body, new_type);
+ Zero(new_body, new_type_details->body_size, char);
+ new_body = ((char *)new_body) - new_type_details->offset;
+#else
+ /* We always allocated the full length item with PURIFY. To do this
+ we fake things so that arena is false for all 16 types.. */
+ new_body = new_NOARENAZ(new_type_details);
+#endif
+ SvANY(sv) = new_body;
+ if (new_type == SVt_PVAV) {
+ AvMAX(sv) = -1;
+ AvFILLp(sv) = -1;
+ AvREAL_only(sv);
+ }
- hv_av_common:
/* SVt_NULL isn't the only thing upgraded to AV or HV.
The target created by newSVrv also is, and it can have magic.
However, it never has SvPVX set.
if (old_type >= SVt_PVMG) {
SvMAGIC_set(sv, ((XPVMG*)old_body)->xmg_magic);
SvSTASH_set(sv, ((XPVMG*)old_body)->xmg_stash);
- } else {
- SvMAGIC_set(sv, NULL);
- SvSTASH_set(sv, NULL);
}
break;
case SVt_PVNV:
case SVt_PV:
- assert(new_type_details->size);
+ assert(new_type_details->body_size);
/* We always allocated the full length item with PURIFY. To do this
we fake things so that arena is false for all 16 types.. */
if(new_type_details->arena) {
/* This points to the start of the allocated area. */
- new_body_inline(new_body, new_type_details->size, new_type);
- Zero(new_body, new_type_details->size, char);
+ new_body_inline(new_body, new_type);
+ Zero(new_body, new_type_details->body_size, char);
new_body = ((char *)new_body) - new_type_details->offset;
} else {
new_body = new_NOARENAZ(new_type_details);
(unsigned long)new_type);
}
- if (old_type_details->size) {
- /* If the old body had an allocated size, then we need to free it. */
+ if (old_type_details->arena) {
+ /* If there was an old body, then we need to free it.
+ Note that there is an assumption that all bodies of types that
+ can be upgraded came from arenas. Only the more complex non-
+ upgradable types are allowed to be directly malloc()ed. */
#ifdef PURIFY
my_safefree(old_body);
#else
if ((NV)(SvIVX(sv)) == SvNVX(sv)) {
SvIOK_on(sv);
} else {
- /* Integer is imprecise. NOK, IOKp */
+ /*EMPTY*/; /* Integer is imprecise. NOK, IOKp */
}
/* UV will not work better than IV */
} else {
if ((NV)(SvUVX(sv)) == SvNVX(sv)) {
SvIOK_on(sv);
} else {
- /* Integer is imprecise. NOK, IOKp, is UV */
+ /*EMPTY*/; /* Integer is imprecise. NOK, IOKp, is UV */
}
}
SvIsUV_on(sv);
mg_get(sv);
if (SvNOKp(sv))
return SvNVX(sv);
- if (SvPOKp(sv) && SvLEN(sv)) {
+ if ((SvPOKp(sv) && SvLEN(sv)) && !SvIOKp(sv)) {
if (!SvIOKp(sv) && ckWARN(WARN_NUMERIC) &&
!grok_number(SvPVX_const(sv), SvCUR(sv), NULL))
not_a_number(sv);
Gconvert(SvNVX(sv), NV_DIG, 0, tbuf);
len = strlen(tbuf);
}
- if (SvROK(sv)) { /* XXX Skip this when sv_pvn_force calls */
- /* Sneaky stuff here */
- SV * const tsv = newSVpvn(tbuf, len);
-
- sv_2mortal(tsv);
- if (lp)
- *lp = SvCUR(tsv);
- return SvPVX(tsv);
- }
- else {
+ assert(!SvROK(sv));
+ {
dVAR;
#ifdef FIXNEGATIVEZERO
*/
static void
-S_glob_assign(pTHX_ SV *dstr, SV *sstr, const int dtype)
+S_glob_assign_glob(pTHX_ SV *dstr, SV *sstr, const int dtype)
{
if (dtype != SVt_PVGV) {
const char * const name = GvNAME(sstr);
}
static void
-S_pvgv_assign(pTHX_ SV *dstr, SV *sstr) {
+S_glob_assign_ref(pTHX_ SV *dstr, SV *sstr) {
SV * const sref = SvREFCNT_inc(SvRV(sstr));
SV *dref = NULL;
const int intro = GvINTRO(dstr);
+ SV **location;
+ U8 import_flag = 0;
+ const U32 stype = SvTYPE(sref);
+
#ifdef GV_UNIQUE_CHECK
if (GvUNIQUE((GV*)dstr)) {
GvEGV(dstr) = (GV*)dstr;
}
GvMULTI_on(dstr);
- switch (SvTYPE(sref)) {
- case SVt_PVAV:
- if (intro)
- SAVEGENERICSV(GvAV(dstr));
- else
- dref = (SV*)GvAV(dstr);
- GvAV(dstr) = (AV*)sref;
- if (!GvIMPORTED_AV(dstr)
- && CopSTASH_ne(PL_curcop, GvSTASH(dstr)))
- {
- GvIMPORTED_AV_on(dstr);
- }
- break;
- case SVt_PVHV:
- if (intro)
- SAVEGENERICSV(GvHV(dstr));
- else
- dref = (SV*)GvHV(dstr);
- GvHV(dstr) = (HV*)sref;
- if (!GvIMPORTED_HV(dstr)
- && CopSTASH_ne(PL_curcop, GvSTASH(dstr)))
- {
- GvIMPORTED_HV_on(dstr);
- }
- break;
+ switch (stype) {
case SVt_PVCV:
+ location = (SV **) &GvCV(dstr);
+ import_flag = GVf_IMPORTED_CV;
+ goto common;
+ case SVt_PVHV:
+ location = (SV **) &GvHV(dstr);
+ import_flag = GVf_IMPORTED_HV;
+ goto common;
+ case SVt_PVAV:
+ location = (SV **) &GvAV(dstr);
+ import_flag = GVf_IMPORTED_AV;
+ goto common;
+ case SVt_PVIO:
+ location = (SV **) &GvIOp(dstr);
+ goto common;
+ case SVt_PVFM:
+ location = (SV **) &GvFORM(dstr);
+ default:
+ location = &GvSV(dstr);
+ import_flag = GVf_IMPORTED_SV;
+ common:
if (intro) {
- if (GvCVGEN(dstr) && GvCV(dstr) != (CV*)sref) {
- SvREFCNT_dec(GvCV(dstr));
- GvCV(dstr) = NULL;
- GvCVGEN(dstr) = 0; /* Switch off cacheness. */
- PL_sub_generation++;
+ if (stype == SVt_PVCV) {
+ if (GvCVGEN(dstr) && GvCV(dstr) != (CV*)sref) {
+ SvREFCNT_dec(GvCV(dstr));
+ GvCV(dstr) = NULL;
+ GvCVGEN(dstr) = 0; /* Switch off cacheness. */
+ PL_sub_generation++;
+ }
}
- SAVEGENERICSV(GvCV(dstr));
+ SAVEGENERICSV(*location);
}
else
- dref = (SV*)GvCV(dstr);
- if (GvCV(dstr) != (CV*)sref) {
- CV* const cv = GvCV(dstr);
+ dref = *location;
+ if (stype == SVt_PVCV && *location != sref) {
+ CV* const cv = (CV*)*location;
if (cv) {
if (!GvCVGEN((GV*)dstr) &&
(CvROOT(cv) || CvXSUB(cv)))
it was a const and its value changed. */
if (CvCONST(cv) && CvCONST((CV*)sref)
&& cv_const_sv(cv) == cv_const_sv((CV*)sref)) {
+ /*EMPTY*/
/* They are 2 constant subroutines generated from
the same constant. This probably means that
they are really the "same" proxy subroutine
cv_ckproto(cv, (GV*)dstr,
SvPOK(sref) ? SvPVX_const(sref) : NULL);
}
- GvCV(dstr) = (CV*)sref;
GvCVGEN(dstr) = 0; /* Switch off cacheness. */
GvASSUMECV_on(dstr);
PL_sub_generation++;
}
- if (!GvIMPORTED_CV(dstr) && CopSTASH_ne(PL_curcop, GvSTASH(dstr))) {
- GvIMPORTED_CV_on(dstr);
- }
- break;
- case SVt_PVIO:
- if (intro)
- SAVEGENERICSV(GvIOp(dstr));
- else
- dref = (SV*)GvIOp(dstr);
- GvIOp(dstr) = (IO*)sref;
- break;
- case SVt_PVFM:
- if (intro)
- SAVEGENERICSV(GvFORM(dstr));
- else
- dref = (SV*)GvFORM(dstr);
- GvFORM(dstr) = (CV*)sref;
- break;
- default:
- if (intro)
- SAVEGENERICSV(GvSV(dstr));
- else
- dref = (SV*)GvSV(dstr);
- GvSV(dstr) = sref;
- if (!GvIMPORTED_SV(dstr) && CopSTASH_ne(PL_curcop, GvSTASH(dstr))) {
- GvIMPORTED_SV_on(dstr);
+ *location = sref;
+ if (import_flag && !(GvFLAGS(dstr) & import_flag)
+ && CopSTASH_ne(PL_curcop, GvSTASH(dstr))) {
+ GvFLAGS(dstr) |= import_flag;
}
break;
}
case SVt_RV:
if (dtype < SVt_RV)
sv_upgrade(dstr, SVt_RV);
- else if (dtype == SVt_PVGV &&
- SvROK(sstr) && SvTYPE(SvRV(sstr)) == SVt_PVGV) {
- sstr = SvRV(sstr);
- if (sstr == dstr) {
- if (GvIMPORTED(dstr) != GVf_IMPORTED
- && CopSTASH_ne(PL_curcop, GvSTASH(dstr)))
- {
- GvIMPORTED_on(dstr);
- }
- GvMULTI_on(dstr);
- return;
- }
- S_glob_assign(aTHX_ dstr, sstr, dtype);
- return;
- }
break;
case SVt_PVFM:
#ifdef PERL_OLD_COPY_ON_WRITE
case SVt_PVGV:
if (dtype <= SVt_PVGV) {
- S_glob_assign(aTHX_ dstr, sstr, dtype);
+ S_glob_assign_glob(aTHX_ dstr, sstr, dtype);
return;
}
- /* FALL THROUGH */
+ /*FALLTHROUGH*/
default:
if (SvGMAGICAL(sstr) && (flags & SV_GMAGIC)) {
if ((int)SvTYPE(sstr) != stype) {
stype = SvTYPE(sstr);
if (stype == SVt_PVGV && dtype <= SVt_PVGV) {
- S_glob_assign(aTHX_ dstr, sstr, dtype);
+ S_glob_assign_glob(aTHX_ dstr, sstr, dtype);
return;
}
}
sflags = SvFLAGS(sstr);
if (sflags & SVf_ROK) {
+ if (dtype == SVt_PVGV &&
+ SvROK(sstr) && SvTYPE(SvRV(sstr)) == SVt_PVGV) {
+ sstr = SvRV(sstr);
+ if (sstr == dstr) {
+ if (GvIMPORTED(dstr) != GVf_IMPORTED
+ && CopSTASH_ne(PL_curcop, GvSTASH(dstr)))
+ {
+ GvIMPORTED_on(dstr);
+ }
+ GvMULTI_on(dstr);
+ return;
+ }
+ S_glob_assign_glob(aTHX_ dstr, sstr, dtype);
+ return;
+ }
+
if (dtype >= SVt_PV) {
if (dtype == SVt_PVGV) {
- S_pvgv_assign(aTHX_ dstr, sstr);
+ S_glob_assign_ref(aTHX_ dstr, sstr);
return;
}
if (SvPVX_const(dstr)) {
SvIV_set(dstr, SvIVX(sstr));
}
if (sflags & SVp_NOK) {
- SvFLAGS(dstr) |= sflags & (SVf_NOK|SVp_NOK);
SvNV_set(dstr, SvNVX(sstr));
}
}
/* sv_magic() refuses to add a magic of the same 'how' as an
existing one
*/
- if (how == PERL_MAGIC_taint)
+ if (how == PERL_MAGIC_taint) {
mg->mg_len |= 1;
+ /* Any scalar which already had taint magic on which someone
+ (erroneously?) did SvIOK_on() or similar will now be
+ incorrectly sporting public "OK" flags. */
+ SvFLAGS(sv) &= ~(SVf_IOK|SVf_NOK|SVf_POK);
+ }
return;
}
}
}
if (!SvMAGIC(sv)) {
SvMAGICAL_off(sv);
- SvFLAGS(sv) |= (SvFLAGS(sv) & (SVp_NOK|SVp_POK)) >> PRIVSHIFT;
+ SvFLAGS(sv) |= (SvFLAGS(sv) & (SVp_IOK|SVp_NOK|SVp_POK)) >> PRIVSHIFT;
SvMAGIC_set(sv, NULL);
}
assert(sv);
assert(SvREFCNT(sv) == 0);
- if (type <= SVt_IV)
+ if (type <= SVt_IV) {
+ /* See the comment in sv.h about the collusion between this early
+ return and the overloading of the NULL and IV slots in the size
+ table. */
return;
+ }
if (SvOBJECT(sv)) {
if (PL_defstash) { /* Still have a symbol table? */
del_body(((char *)SvANY(sv) + sv_type_details->offset),
&PL_body_roots[type]);
}
- else if (sv_type_details->size) {
+ else if (sv_type_details->body_size) {
my_safefree(SvANY(sv));
}
}
*/
raw_compare:
- /* FALL THROUGH */
+ /*FALLTHROUGH*/
#endif /* USE_LOCALE_COLLATE */
dVAR;
register SV *sv;
new_SV(sv);
- sv_vsetpvfn(sv, pat, strlen(pat), args, Null(SV**), 0, Null(bool*));
+ sv_vsetpvfn(sv, pat, strlen(pat), args, NULL, 0, NULL);
return sv;
}
* declaration! */
newSUB(start_subparse(FALSE, 0),
newSVOP(OP_CONST, 0, tmpsv),
- Nullop,
- Nullop);
+ NULL, NULL);
LEAVE;
if (!GvCVu(gv))
Perl_croak(aTHX_ "Unable to create sub named \"%"SVf"\"",
void
Perl_sv_vsetpvf(pTHX_ SV *sv, const char* pat, va_list* args)
{
- sv_vsetpvfn(sv, pat, strlen(pat), args, Null(SV**), 0, Null(bool*));
+ sv_vsetpvfn(sv, pat, strlen(pat), args, NULL, 0, NULL);
}
/*
void
Perl_sv_vsetpvf_mg(pTHX_ SV *sv, const char* pat, va_list* args)
{
- sv_vsetpvfn(sv, pat, strlen(pat), args, Null(SV**), 0, Null(bool*));
+ sv_vsetpvfn(sv, pat, strlen(pat), args, NULL, 0, NULL);
SvSETMAGIC(sv);
}
void
Perl_sv_vcatpvf(pTHX_ SV *sv, const char* pat, va_list* args)
{
- sv_vcatpvfn(sv, pat, strlen(pat), args, Null(SV**), 0, Null(bool*));
+ sv_vcatpvfn(sv, pat, strlen(pat), args, NULL, 0, NULL);
}
/*
void
Perl_sv_vcatpvf_mg(pTHX_ SV *sv, const char* pat, va_list* args)
{
- sv_vcatpvfn(sv, pat, strlen(pat), args, Null(SV**), 0, Null(bool*));
+ sv_vcatpvfn(sv, pat, strlen(pat), args, NULL, 0, NULL);
SvSETMAGIC(sv);
}
case '7': case '8': case '9':
var = *(*pattern)++ - '0';
while (isDIGIT(**pattern)) {
- I32 tmp = var * 10 + (*(*pattern)++ - '0');
+ const I32 tmp = var * 10 + (*(*pattern)++ - '0');
if (tmp < var)
Perl_croak(aTHX_ "Integer overflow in format string for %s", (PL_op ? OP_NAME(PL_op) : "sv_vcatpvfn"));
var = tmp;
const char *eptr = NULL;
STRLEN elen = 0;
SV *vecsv = NULL;
- const U8 *vecstr = Null(U8*);
+ const U8 *vecstr = NULL;
STRLEN veclen = 0;
char c = 0;
int i;
#endif
#if defined(HAS_QUAD) || defined(HAS_LONG_DOUBLE)
case 'L': /* Ld */
- /* FALL THROUGH */
+ /*FALLTHROUGH*/
#ifdef HAS_QUAD
case 'q': /* qd */
#endif
break;
}
#endif
- /* FALL THROUGH */
+ /*FALLTHROUGH*/
case 'h':
- /* FALL THROUGH */
+ /*FALLTHROUGH*/
case 'V':
intsize = *q++;
break;
#else
intsize = 'l';
#endif
- /* FALL THROUGH */
+ /*FALLTHROUGH*/
case 'd':
case 'i':
#if vdNUMBER
#else
intsize = 'l';
#endif
- /* FALL THROUGH */
+ /*FALLTHROUGH*/
case 'u':
base = 10;
goto uns_integer;
#else
intsize = 'l';
#endif
- /* FALL THROUGH */
+ /*FALLTHROUGH*/
case 'o':
base = 8;
goto uns_integer;
case 'F':
c = 'f'; /* maybe %F isn't supported here */
- /* FALL THROUGH */
+ /*FALLTHROUGH*/
case 'e': case 'E':
case 'f':
case 'g': case 'G':
break;
/* [perl #20339] - we should accept and ignore %lf rather than die */
case 'l':
- /* FALL THROUGH */
+ /*FALLTHROUGH*/
default:
#if defined(USE_LONG_DOUBLE)
intsize = args ? 0 : 'q';
#if defined(HAS_LONG_DOUBLE)
break;
#else
- /* FALL THROUGH */
+ /*FALLTHROUGH*/
#endif
case 'h':
goto unknown;
/* map an existing pointer using a table */
STATIC PTR_TBL_ENT_t *
-S_ptr_table_find(pTHX_ PTR_TBL_t *tbl, const void *sv) {
+S_ptr_table_find(PTR_TBL_t *tbl, const void *sv) {
PTR_TBL_ENT_t *tblent;
const UV hash = PTR_TABLE_HASH(sv);
assert(tbl);
void *
Perl_ptr_table_fetch(pTHX_ PTR_TBL_t *tbl, const void *sv)
{
- PTR_TBL_ENT_t const *const tblent = S_ptr_table_find(aTHX_ tbl, sv);
+ PTR_TBL_ENT_t const *const tblent = ptr_table_find(tbl, sv);
return tblent ? tblent->newval : (void *) 0;
}
void
Perl_ptr_table_store(pTHX_ PTR_TBL_t *tbl, const void *oldsv, void *newsv)
{
- PTR_TBL_ENT_t *tblent = S_ptr_table_find(aTHX_ tbl, oldsv);
+ PTR_TBL_ENT_t *tblent = S_ptr_table_find(tbl, oldsv);
if (tblent) {
tblent->newval = newsv;
} else {
const UV entry = PTR_TABLE_HASH(oldsv) & tbl->tbl_max;
- new_body_inline(tblent, sizeof(struct ptr_tbl_ent), PTE_SVSLOT);
+ new_body_inline(tblent, PTE_SVSLOT);
+
tblent->oldval = oldsv;
tblent->newval = newsv;
tblent->next = tbl->tbl_ary[entry];
}
}
else {
- /* Copy the Null */
+ /* Copy the NULL */
if (SvTYPE(dstr) == SVt_RV)
SvRV_set(dstr, NULL);
else
switch (sv_type) {
default:
- Perl_croak(aTHX_ "Bizarre SvTYPE [%" IVdf "]",
- (IV)SvTYPE(sstr));
+ Perl_croak(aTHX_ "Bizarre SvTYPE [%" IVdf "]", (IV)SvTYPE(sstr));
break;
case SVt_PVGV:
if (GvUNIQUE((GV*)sstr)) {
- /* Do sharing here, and fall through */
+ /*EMPTY*/; /* Do sharing here, and fall through */
}
case SVt_PVIO:
case SVt_PVFM:
case SVt_PVNV:
case SVt_PVIV:
case SVt_PV:
- assert(sv_type_details->size);
+ assert(sv_type_details->body_size);
if (sv_type_details->arena) {
- new_body_inline(new_body, sv_type_details->size, sv_type);
+ new_body_inline(new_body, sv_type);
new_body
= (void*)((char*)new_body - sv_type_details->offset);
} else {
#else
Copy(((char*)SvANY(sstr)),
((char*)SvANY(dstr)),
- sv_type_details->size + sv_type_details->offset, char);
+ sv_type_details->body_size + sv_type_details->offset, char);
#endif
if (sv_type != SVt_PVAV && sv_type != SVt_PVHV)
else
IoOFP(dstr) = fp_dup(IoOFP(dstr), IoTYPE(dstr), param);
/* PL_rsfp_filters entries have fake IoDIRP() */
- if (IoDIRP(dstr) && !(IoFLAGS(dstr) & IOf_FAKE_DIRP))
- IoDIRP(dstr) = dirp_dup(IoDIRP(dstr));
if(IoFLAGS(dstr) & IOf_FAKE_DIRP) {
/* I have no idea why fake dirp (rsfps)
should be treated differently but otherwise
IoTOP_GV(dstr) = gv_dup(IoTOP_GV(dstr), param);
IoFMT_GV(dstr) = gv_dup(IoFMT_GV(dstr), param);
IoBOTTOM_GV(dstr) = gv_dup(IoBOTTOM_GV(dstr), param);
+ if (IoDIRP(dstr)) {
+ IoDIRP(dstr) = dirp_dup(IoDIRP(dstr));
+ } else {
+ /*EMPTY*/;
+ /* IoDIRP(dstr) is already a copy of IoDIRP(sstr) */
+ }
}
IoTOP_NAME(dstr) = SAVEPV(IoTOP_NAME(dstr));
IoFMT_NAME(dstr) = SAVEPV(IoFMT_NAME(dstr));
av_push(param->stashes, dstr);
}
break;
- case SVt_PVFM:
case SVt_PVCV:
+ if (!(param->flags & CLONEf_COPY_STACKS)) {
+ CvDEPTH(dstr) = 0;
+ }
+ case SVt_PVFM:
/* NOTE: not refcounted */
CvSTASH(dstr) = hv_dup(CvSTASH(dstr), param);
OP_REFCNT_LOCK;
- CvROOT(dstr) = OpREFCNT_inc(CvROOT(dstr));
+ if (!CvISXSUB(dstr))
+ CvROOT(dstr) = OpREFCNT_inc(CvROOT(dstr));
OP_REFCNT_UNLOCK;
if (CvCONST(dstr)) {
CvXSUBANY(dstr).any_ptr = GvUNIQUE(CvGV(dstr)) ?
* duped GV may never be freed. A bit of a hack! DAPM */
CvGV(dstr) = (param->flags & CLONEf_JOIN_IN) ?
NULL : gv_dup(CvGV(dstr), param) ;
- if (!(param->flags & CLONEf_COPY_STACKS)) {
- CvDEPTH(dstr) = 0;
- }
PAD_DUP(CvPADLIST(dstr), CvPADLIST(sstr), param);
CvOUTSIDE(dstr) =
CvWEAKOUTSIDE(sstr)
? cv_dup( CvOUTSIDE(dstr), param)
: cv_dup_inc(CvOUTSIDE(dstr), param);
- if (!CvXSUB(dstr))
+ if (!CvISXSUB(dstr))
CvFILE(dstr) = SAVEPV(CvFILE(dstr));
break;
}
OpREFCNT_inc(o);
break;
default:
- TOPPTR(nss,ix) = Nullop;
+ TOPPTR(nss,ix) = NULL;
break;
}
}
else
- TOPPTR(nss,ix) = Nullop;
+ TOPPTR(nss,ix) = NULL;
break;
case SAVEt_FREEPV:
c = (char*)POPPTR(ss,ix);
IV i;
CLONE_PARAMS clone_params;
- CLONE_PARAMS* param = &clone_params;
+ CLONE_PARAMS* const param = &clone_params;
- PerlInterpreter *my_perl = (PerlInterpreter*)(*ipM->pMalloc)(ipM, sizeof(PerlInterpreter));
+ PerlInterpreter * const my_perl = (PerlInterpreter*)(*ipM->pMalloc)(ipM, sizeof(PerlInterpreter));
/* for each stash, determine whether its objects should be cloned */
S_visit(proto_perl, do_mark_cloneable_stash, SVt_PVHV, SVTYPEMASK);
PERL_SET_THX(my_perl);
# ifdef DEBUGGING
Poison(my_perl, 1, PerlInterpreter);
- PL_op = Nullop;
- PL_curcop = (COP *)Nullop;
+ PL_op = NULL;
+ PL_curcop = NULL;
PL_markstack = 0;
PL_scopestack = 0;
PL_savestack = 0;
IV i;
CLONE_PARAMS clone_params;
CLONE_PARAMS* param = &clone_params;
- PerlInterpreter *my_perl = (PerlInterpreter*)PerlMem_malloc(sizeof(PerlInterpreter));
+ PerlInterpreter * const my_perl = (PerlInterpreter*)PerlMem_malloc(sizeof(PerlInterpreter));
/* for each stash, determine whether its objects should be cloned */
S_visit(proto_perl, do_mark_cloneable_stash, SVt_PVHV, SVTYPEMASK);
PERL_SET_THX(my_perl);
# ifdef DEBUGGING
Poison(my_perl, 1, PerlInterpreter);
- PL_op = Nullop;
- PL_curcop = (COP *)Nullop;
+ PL_op = NULL;
+ PL_curcop = NULL;
PL_markstack = 0;
PL_scopestack = 0;
PL_savestack = 0;
PL_localizing = proto_perl->Tlocalizing;
PL_errors = sv_dup_inc(proto_perl->Terrors, param);
- PL_hv_fetch_ent_mh = Nullhe;
+ PL_hv_fetch_ent_mh = NULL;
PL_modcount = proto_perl->Tmodcount;
- PL_lastgotoprobe = Nullop;
+ PL_lastgotoprobe = NULL;
PL_dumpindent = proto_perl->Tdumpindent;
PL_sortcop = (OP*)any_dup(proto_perl->Tsortcop, proto_perl);
? '@' : '%',
o->op_targ, NULL, 0, FUV_SUBSCRIPT_WITHIN);
}
-
break;
case OP_AASSIGN:
case OP_CHOMP:
if (SvROK(PL_rs) && uninit_sv == SvRV(PL_rs))
return sv_2mortal(newSVpvs("${$/}"));
- /* FALL THROUGH */
+ /*FALLTHROUGH*/
default:
do_op:
/* if all except one arg are constant, or have no side-effects,
* or are optimized away, then it's unambiguous */
- o2 = Nullop;
+ o2 = NULL;
for (kid=o; kid; kid = kid->op_sibling) {
if (kid &&
( (kid->op_type == OP_CONST && SvOK(cSVOPx_sv(kid)))
)
continue;
if (o2) { /* more than one found */
- o2 = Nullop;
+ o2 = NULL;
break;
}
o2 = kid;