PP(pp_rv2av)
{
dSP; dTOPss;
- const I32 gimme = GIMME_V;
+ const U8 gimme = GIMME_V;
static const char an_array[] = "an ARRAY";
static const char a_hash[] = "a HASH";
const bool is_pp_rv2av = PL_op->op_type == OP_RV2AV
SV *sv;
AV *ary;
- I32 gimme;
+ U8 gimme;
HV *hash;
SSize_t i;
int magic;
const char *truebase; /* Start of string */
REGEXP *rx = PM_GETRE(pm);
bool rxtainted;
- const I32 gimme = GIMME_V;
+ const U8 gimme = GIMME_V;
STRLEN len;
const I32 oldsave = PL_savestack_ix;
I32 had_zerolen = 0;
PerlIO *fp;
IO * const io = GvIO(PL_last_in_gv);
const I32 type = PL_op->op_type;
- const I32 gimme = GIMME_V;
+ const U8 gimme = GIMME_V;
if (io) {
const MAGIC *const mg = SvTIED_mg((const SV *)io, PERL_MAGIC_tiedscalar);
SV **itersvp;
SV *retsv;
+ SV *sv;
+ AV *av;
+ IV ix;
+ IV inc;
+
cx = CX_CUR();
itersvp = CxITERVAR(cx);
+ assert(itersvp);
switch (CxTYPE(cx)) {
goto retno;
oldsv = *itersvp;
- if (LIKELY(SvREFCNT(oldsv) == 1 && !SvMAGICAL(oldsv))) {
+ /* NB: on the first iteration, oldsv will have a ref count of at
+ * least 2 (one extra from blk_loop.itersave), so the GV or pad
+ * slot will get localised; on subsequent iterations the RC==1
+ * optimisation may kick in and the SV will be reused. */
+ if (oldsv && LIKELY(SvREFCNT(oldsv) == 1 && !SvMAGICAL(oldsv))) {
/* safe to reuse old SV */
sv_setsv(oldsv, cur);
}
* completely new SV for closures/references to work as
* they used to */
*itersvp = newSVsv(cur);
- SvREFCNT_dec_NN(oldsv);
+ SvREFCNT_dec(oldsv);
}
if (strEQ(SvPVX_const(cur), max))
sv_setiv(cur, 0); /* terminate next time */
goto retno;
oldsv = *itersvp;
- /* don't risk potential race */
- if (LIKELY(SvREFCNT(oldsv) == 1 && !SvMAGICAL(oldsv))) {
+ /* see NB comment above */
+ if (oldsv && LIKELY(SvREFCNT(oldsv) == 1 && !SvMAGICAL(oldsv))) {
/* safe to reuse old SV */
- sv_setiv(oldsv, cur);
+
+ if ( (SvFLAGS(oldsv) & (SVTYPEMASK|SVf_THINKFIRST|SVf_IVisUV))
+ == SVt_IV)
+ {
+ /* Cheap SvIOK_only().
+ * Assert that flags which SvIOK_only() would test or
+ * clear can't be set, because we're SVt_IV */
+ assert(!(SvFLAGS(oldsv) &
+ (SVf_OOK|SVf_UTF8|(SVf_OK & ~(SVf_IOK|SVp_IOK)))));
+ SvFLAGS(oldsv) |= (SVf_IOK|SVp_IOK);
+ /* SvIV_set() where sv_any points to head */
+ oldsv->sv_u.svu_iv = cur;
+
+ }
+ else
+ sv_setiv(oldsv, cur);
}
else
{
* completely new SV for closures/references to work as they
* used to */
*itersvp = newSViv(cur);
- SvREFCNT_dec_NN(oldsv);
+ SvREFCNT_dec(oldsv);
}
if (UNLIKELY(cur == IV_MAX)) {
break;
}
- {
- SV *sv;
- AV *av;
- IV ix;
- IV inc;
-
case CXt_LOOP_LIST: /* for (1,2,3) */
assert(OPpITER_REVERSED == 2); /* so inc becomes -1 or 1 */
*itersvp = sv;
SvREFCNT_dec(oldsv);
break;
- }
default:
DIE(aTHX_ "panic: pp_iter, type=%u", CxTYPE(cx));
*++PL_stack_sp =retsv;
return PL_op->op_next;
-
-
}
/*
* searching for places in this sub that uses a particular var:
* iters maxiters r_flags oldsave rxtainted orig dstr targ
* s m strend rx once */
- PUSHSUBST(cx);
+ CX_PUSHSUBST(cx);
RETURNOP(cPMOP->op_pmreplrootu.op_pmreplroot);
}
first = TRUE;
/* All done yet? */
if (UNLIKELY(PL_stack_base + *PL_markstack_ptr > SP)) {
I32 items;
- const I32 gimme = GIMME_V;
+ const U8 gimme = GIMME_V;
LEAVE_with_name("grep"); /* exit outer scope */
(void)POPMARK; /* pop src */
}
}
-/* leavesub_adjust_stacks():
+/* leave_adjust_stacks():
+ *
+ * Process a scope's return args (in the range from_sp+1 .. PL_stack_sp),
+ * positioning them at to_sp+1 onwards, and do the equivalent of a
+ * FREEMPS and TAINT_NOT.
*
- * Process the sub's return args (in the range base_sp+1 .. PL_stack_sp),
- * and do the equivalent of a FREEMPS (and TAINT_NOT).
* Not intended to be called in void context.
*
- * The main things done to process the return args are:
+ * When leaving a sub, eval, do{} or other scope, the things that need
+ * doing to process the return args are:
* * in scalar context, only return the last arg (or PL_sv_undef if none);
- * * make a TEMP copy of every return arg, except where we can optimise
- * the copy away without it being semantically visible;
- * * make sure the arg isn't prematurely freed; in the case of an arg
- * not copied, this may involve mortalising it. For example, in
+ * * for the types of return that return copies of their args (such
+ * as rvalue sub return), make a mortal copy of every return arg,
+ * except where we can optimise the copy away without it being
+ * semantically visible;
+ * * make sure that the arg isn't prematurely freed; in the case of an
+ * arg not copied, this may involve mortalising it. For example, in
* C<sub f { my $x = ...; $x }>, $x would be freed when we do
* CX_LEAVE_SCOPE(cx) unless it's protected or copied.
*
+ * What condition to use when deciding whether to pass the arg through
+ * or make a copy, is determined by the 'pass' arg; its valid values are:
+ * 0: rvalue sub/eval exit
+ * 1: other rvalue scope exit
+ * 2: :lvalue sub exit in rvalue context
+ * 3: :lvalue sub exit in lvalue context and other lvalue scope exits
+ *
* There is a big issue with doing a FREETMPS. We would like to free any
- * temps created by the last statement the sub executed, rather than
+ * temps created by the last statement which the sub executed, rather than
* leaving them for the caller. In a situation where a sub call isn't
* soon followed by a nextstate (e.g. nested recursive calls, a la
* fibonacci()), temps can accumulate, causing memory and performance
* issues.
*
* On the other hand, we don't want to free any TEMPs which are keeping
- * alive any return args that we skip copying; nor do we wish to undo any
- * mortalising or mortal copying we do here.
+ * alive any return args that we skipped copying; nor do we wish to undo
+ * any mortalising done here.
*
* The solution is to split the temps stack frame into two, with a cut
* point delineating the two halves. We arrange that by the end of this
* function, all the temps stack frame entries we wish to keep are in the
- * range PL_tmps_floor+1.. tmps_base-1, while the ones we free now are in
+ * range PL_tmps_floor+1.. tmps_base-1, while the ones to free now are in
* the range tmps_base .. PL_tmps_ix. During the course of this
* function, tmps_base starts off as PL_tmps_floor+1, then increases
* whenever we find or create a temp that we know should be kept. In
* To determine whether a TEMP is keeping a return arg alive, every
* arg that is kept rather than copied and which has the SvTEMP flag
* set, has the flag temporarily unset, to mark it. At the end we scan
- * stack temps stack frame above the cut for entries without SvTEMP and
+ * the temps stack frame above the cut for entries without SvTEMP and
* keep them, while turning SvTEMP on again. Note that if we die before
- * the SvTEMPs are enabled again, its safe: at worst, subsequent use of
+ * the SvTEMPs flags are set again, its safe: at worst, subsequent use of
* those SVs may be slightly less efficient.
*
* In practice various optimisations for some common cases mean we can
* avoid most of the scanning and swapping about with the temps stack.
*/
-STATIC void
-S_leavesub_adjust_stacks(pTHX_ SV **base_sp, I32 gimme)
+void
+Perl_leave_adjust_stacks(pTHX_ SV **from_sp, SV **to_sp, U8 gimme, int pass)
{
+ dVAR;
dSP;
- SV **from_sp; /* where we're copying args from */
SSize_t tmps_base; /* lowest index into tmps stack that needs freeing now */
SSize_t nargs;
+ PERL_ARGS_ASSERT_LEAVE_ADJUST_STACKS;
+
TAINT_NOT;
if (gimme == G_ARRAY) {
- from_sp = base_sp + 1;
- nargs = SP - base_sp;
+ nargs = SP - from_sp;
+ from_sp++;
}
else {
assert(gimme == G_SCALAR);
- if (UNLIKELY(base_sp >= SP)) {
+ if (UNLIKELY(from_sp >= SP)) {
/* no return args */
- assert(base_sp == SP);
+ assert(from_sp == SP);
EXTEND(SP, 1);
*++SP = &PL_sv_undef;
- base_sp = SP;
+ to_sp = SP;
nargs = 0;
}
else {
EXTEND_MORTAL(nargs); /* one big extend for worst-case scenario */
tmps_basep = PL_tmps_stack + tmps_base;
- /* whether any SVs have have SvTEMP temporarily turned off,
- * indicating that they need saving below the cut */
/* process each return arg */
SV *sv = *from_sp++;
assert(PL_tmps_ix + nargs < PL_tmps_max);
+#ifdef DEBUGGING
+ /* PADTMPs with container set magic shouldn't appear in the
+ * wild. This assert is more important for pp_leavesublv(),
+ * but by testing for it here, we're more likely to catch
+ * bad cases (what with :lvalue subs not being widely
+ * deployed). The two issues are that for something like
+ * sub :lvalue { $tied{foo} }
+ * or
+ * sub :lvalue { substr($foo,1,2) }
+ * pp_leavesublv() will croak if the sub returns a PADTMP,
+ * and currently functions like pp_substr() return a mortal
+ * rather than using their PADTMP when returning a PVLV.
+ * This is because the PVLV will hold a ref to $foo,
+ * so $foo would get delayed in being freed while
+ * the PADTMP SV remained in the PAD.
+ * So if this assert fails it means either:
+ * 1) there is pp code similar to pp_substr that is
+ * returning a PADTMP instead of a mortal, and probably
+ * needs fixing, or
+ * 2) pp_leavesublv is making unwarranted assumptions
+ * about always croaking on a PADTMP
+ */
+ if (SvPADTMP(sv) && SvSMAGICAL(sv)) {
+ MAGIC *mg;
+ for (mg = SvMAGIC(sv); mg; mg = mg->mg_moremagic) {
+ assert(PERL_MAGIC_TYPE_IS_VALUE_MAGIC(mg->mg_type));
+ }
+ }
+#endif
- if (SvTEMP(sv) && !SvMAGICAL(sv) && SvREFCNT(sv) == 1) {
- /* can optimise away the copy */
- *++base_sp = sv;
-
- /* Since this SV is an SvTEMP with a ref count of 1, we
- * don't need to re-mortalise it; instead we just need to
- * ensure that its existing entry in the temps stack frame
- * ends up below the cut and so avoids being freed this
- * time round. We mark it as needing to be kept by
- * temporarily unsetting SvTEMP; then at the end, we
- * shuffle any !SvTEMP entries on the tmps stack back
- * below the cut.
- * However, there's a significant chance that there's a
- * 1:1 correspondence between the first few (or all)
- * elements in the return args stack frame and those in
- * the temps stack frame;
- * e,g. sub f { ....; map {...} .... },
- * or e.g. if we're exiting multiple scopes and one of the
- * inner scopes has already made mortal copies of each
- * return arg.
- *
- * If so, this arg sv will correspond to the next item
- * above the cut, and so can be kept merely by moving the
- * cut boundary up one, rather than messing with SvTEMP.
- * If all args arre 1:1 then we can avoid the sorting
- * stage below completely.
- */
- if (sv == *tmps_basep)
- tmps_basep++;
- else
- SvTEMP_off(sv);
+ if (
+ pass == 0 ? (SvTEMP(sv) && !SvMAGICAL(sv) && SvREFCNT(sv) == 1)
+ : pass == 1 ? ((SvTEMP(sv) || SvPADTMP(sv)) && !SvMAGICAL(sv) && SvREFCNT(sv) == 1)
+ : pass == 2 ? (!SvPADTMP(sv))
+ : 1)
+ {
+ /* pass through: skip copy for logic or optimisation
+ * reasons; instead mortalise it, except that ... */
+ *++to_sp = sv;
+
+ if (SvTEMP(sv)) {
+ /* ... since this SV is an SvTEMP , we don't need to
+ * re-mortalise it; instead we just need to ensure
+ * that its existing entry in the temps stack frame
+ * ends up below the cut and so avoids being freed
+ * this time round. We mark it as needing to be kept
+ * by temporarily unsetting SvTEMP; then at the end,
+ * we shuffle any !SvTEMP entries on the tmps stack
+ * back below the cut.
+ * However, there's a significant chance that there's
+ * a 1:1 correspondence between the first few (or all)
+ * elements in the return args stack frame and those
+ * in the temps stack frame; e,g.:
+ * sub f { ....; map {...} .... },
+ * or if we're exiting multiple scopes and one of the
+ * inner scopes has already made mortal copies of each
+ * return arg.
+ *
+ * If so, this arg sv will correspond to the next item
+ * on the tmps stack above the cut, and so can be kept
+ * merely by moving the cut boundary up one, rather
+ * than messing with SvTEMP. If all args are 1:1 then
+ * we can avoid the sorting stage below completely.
+ *
+ * If there are no items above the cut on the tmps
+ * stack, then the SvTEMP must comne from an item
+ * below the cut, so there's nothing to do.
+ */
+ if (tmps_basep <= &PL_tmps_stack[PL_tmps_ix]) {
+ if (sv == *tmps_basep)
+ tmps_basep++;
+ else
+ SvTEMP_off(sv);
+ }
+ }
+ else if (!SvPADTMP(sv)) {
+ /* mortalise arg to avoid it being freed during save
+ * stack unwinding. Pad tmps don't need mortalising as
+ * they're never freed. This is the equivalent of
+ * sv_2mortal(SvREFCNT_inc(sv)), except that:
+ * * it assumes that the temps stack has already been
+ * extended;
+ * * it puts the new item at the cut rather than at
+ * ++PL_tmps_ix, moving the previous occupant there
+ * instead.
+ */
+ if (!SvIMMORTAL(sv)) {
+ SvREFCNT_inc_simple_void_NN(sv);
+ SvTEMP_on(sv);
+ /* Note that if there's nothing above the cut,
+ * this copies the garbage one slot above
+ * PL_tmps_ix onto itself. This is harmless (the
+ * stack's already been extended), but might in
+ * theory trigger warnings from tools like ASan
+ */
+ PL_tmps_stack[++PL_tmps_ix] = *tmps_basep;
+ *tmps_basep++ = sv;
+ }
+ }
}
else {
/* Make a mortal copy of the SV.
PL_tmps_stack[++PL_tmps_ix] = *tmps_basep;
/* put it on the tmps stack early so it gets freed if we die */
*tmps_basep++ = newsv;
- *++base_sp = newsv;
+ *++to_sp = newsv;
if (SvTYPE(sv) <= SVt_IV) {
/* arg must be one of undef, IV/UV, or RV: skip
old_base = tmps_basep - PL_tmps_stack;
SvGETMAGIC(sv);
sv_setsv_flags(newsv, sv, SV_DO_COW_SVSETSV);
- /* the mg_get or sv_setv might have created new temps
+ /* the mg_get or sv_setsv might have created new temps
* or realloced the tmps stack; regrow and reload */
EXTEND_MORTAL(nargs);
tmps_basep = PL_tmps_stack + old_base;
tmps_base = tmps_basep - PL_tmps_stack;
}
- PL_stack_sp = base_sp;
+ PL_stack_sp = to_sp;
/* unrolled FREETMPS() but using tmps_base-1 rather than PL_tmps_floor */
while (PL_tmps_ix >= tmps_base) {
PP(pp_leavesub)
{
- I32 gimme;
+ U8 gimme;
PERL_CONTEXT *cx;
SV **oldsp;
OP *retop;
if (gimme == G_VOID)
PL_stack_sp = oldsp;
else
- S_leavesub_adjust_stacks(aTHX_ oldsp, gimme);
+ leave_adjust_stacks(oldsp, oldsp, gimme, 0);
CX_LEAVE_SCOPE(cx);
- POPSUB(cx); /* Stack values are safe: release CV and @_ ... */
- POPBLOCK(cx);
+ cx_popsub(cx); /* Stack values are safe: release CV and @_ ... */
+ cx_popblock(cx);
retop = cx->blk_sub.retop;
CX_POP(cx);
}
/* At this point we want to save PL_savestack_ix, either by doing a
- * PUSHSUB, or for XS, doing an ENTER. But we don't yet know the final
+ * cx_pushsub(), or for XS, doing an ENTER. But we don't yet know the final
* CV we will be using (so we don't know whether its XS, so we can't
- * PUSHSUB or ENTER yet), and determining cv may itself push stuff on
+ * cx_pushsub() or ENTER yet), and determining cv may itself push stuff on
* the save stack. So remember where we are currently on the save
* stack, and later update the CX or scopestack entry accordingly. */
old_savestack_ix = PL_savestack_ix;
/* these two fields are in a union. If they ever become separate,
* we have to test for both of them being null below */
+ assert(cv);
assert((void*)&CvROOT(cv) == (void*)&CvXSUB(cv));
while (UNLIKELY(!CvROOT(cv))) {
GV* autogv;
PADLIST *padlist;
I32 depth;
bool hasargs;
- I32 gimme;
+ U8 gimme;
/* keep PADTMP args alive throughout the call (we need to do this
* because @_ isn't refcounted). Note that we create the mortals
}
gimme = GIMME_V;
- PUSHBLOCK(cx, CXt_SUB, MARK);
+ cx = cx_pushblock(CXt_SUB, gimme, MARK, old_savestack_ix);
hasargs = cBOOL(PL_op->op_flags & OPf_STACKED);
- PUSHSUB(cx);
- cx->blk_sub.retop = PL_op->op_next;
- cx->blk_oldsaveix = old_savestack_ix;
+ cx_pushsub(cx, cv, PL_op->op_next, hasargs);
padlist = CvPADLIST(cv);
- if (UNLIKELY((depth = ++CvDEPTH(cv)) >= 2)) {
- PERL_STACK_OVERFLOW_CHECK();
+ if (UNLIKELY((depth = ++CvDEPTH(cv)) >= 2))
pad_push(padlist, depth);
- }
PAD_SET_CUR_NOSAVE(padlist, depth);
if (LIKELY(hasargs)) {
AV *const av = MUTABLE_AV(PAD_SVl(0));
/* it's the responsibility of whoever leaves a sub to ensure
* that a clean, empty AV is left in pad[0]. This is normally
- * done by POPSUB() */
+ * done by cx_popsub() */
assert(!AvREAL(av) && AvFILLp(av) == -1);
items = SP - MARK;
}
else {
SSize_t markix = TOPMARK;
+ bool is_scalar;
ENTER;
/* pretend we did the ENTER earlier */
PUTBACK;
if (UNLIKELY(((PL_op->op_private
- & PUSHSUB_GET_LVALUE_MASK(Perl_is_lvalue_sub)
+ & CX_PUSHSUB_GET_LVALUE_MASK(Perl_is_lvalue_sub)
) & OPpENTERSUB_LVAL_MASK) == OPpLVAL_INTRO &&
!CvLVALUE(cv)))
DIE(aTHX_ "Can't modify non-lvalue subroutine call of &%"SVf,
}
/* Do we need to open block here? XXXX */
+ /* calculate gimme here as PL_op might get changed and then not
+ * restored until the LEAVE further down */
+ is_scalar = (GIMME_V == G_SCALAR);
+
/* CvXSUB(cv) must not be NULL because newXS() refuses NULL xsub address */
assert(CvXSUB(cv));
CvXSUB(cv)(aTHX_ cv);
/* Enforce some sanity in scalar context. */
- if (GIMME_V == G_SCALAR) {
+ if (is_scalar) {
SV **svp = PL_stack_base + markix + 1;
if (svp != PL_stack_sp) {
*svp = svp > PL_stack_sp ? &PL_sv_undef : *PL_stack_sp;