3 * Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
4 * 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by Larry Wall and others
6 * You may distribute under the terms of either the GNU General Public
7 * License or the Artistic License, as specified in the README file.
12 * ...they shuffled back towards the rear of the line. 'No, not at the
13 * rear!' the slave-driver shouted. 'Three files up. And stay there...
15 * [p.931 of _The Lord of the Rings_, VI/ii: "The Land of Shadow"]
18 /* This file contains pp ("push/pop") functions that
19 * execute the opcodes that make up a perl program. A typical pp function
20 * expects to find its arguments on the stack, and usually pushes its
21 * results onto the stack, hence the 'pp' terminology. Each OP structure
22 * contains a pointer to the relevant pp_foo() function.
24 * This particular file just contains pp_sort(), which is complex
25 * enough to merit its own file! See the other pp*.c files for the rest of
30 #define PERL_IN_PP_SORT_C
33 #define sv_cmp_static Perl_sv_cmp
34 #define sv_cmp_locale_static Perl_sv_cmp_locale
37 #define SMALLSORT (200)
40 /* Flags for sortsv_flags */
42 #define SORTf_STABLE 2
43 #define SORTf_UNSTABLE 8
46 * The mergesort implementation is by Peter M. Mcilroy <pmcilroy@lucent.com>.
48 * The original code was written in conjunction with BSD Computer Software
49 * Research Group at University of California, Berkeley.
51 * See also: "Optimistic Sorting and Information Theoretic Complexity"
53 * SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms),
54 * pp 467-474, Austin, Texas, 25-27 January 1993.
56 * The integration to Perl is by John P. Linderman <jpl.jpl@gmail.com>.
58 * The code can be distributed under the same terms as Perl itself.
63 typedef char * aptr; /* pointer for arithmetic on sizes */
64 typedef SV * gptr; /* pointers in our lists */
66 /* Binary merge internal sort, with a few special mods
67 ** for the special perl environment it now finds itself in.
69 ** Things that were once options have been hotwired
70 ** to values suitable for this use. In particular, we'll always
71 ** initialize looking for natural runs, we'll always produce stable
72 ** output, and we'll always do Peter McIlroy's binary merge.
75 /* Pointer types for arithmetic and storage and convenience casts */
77 #define APTR(P) ((aptr)(P))
78 #define GPTP(P) ((gptr *)(P))
79 #define GPPP(P) ((gptr **)(P))
82 /* byte offset from pointer P to (larger) pointer Q */
83 #define BYTEOFF(P, Q) (APTR(Q) - APTR(P))
85 #define PSIZE sizeof(gptr)
87 /* If PSIZE is power of 2, make PSHIFT that power, if that helps */
90 #define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT))
91 #define PNBYTE(N) ((N) << (PSHIFT))
92 #define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N)))
94 /* Leave optimization to compiler */
95 #define PNELEM(P, Q) (GPTP(Q) - GPTP(P))
96 #define PNBYTE(N) ((N) * (PSIZE))
97 #define PINDEX(P, N) (GPTP(P) + (N))
100 /* Pointer into other corresponding to pointer into this */
101 #define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P))
103 #define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim)
106 /* Runs are identified by a pointer in the auxiliary list.
107 ** The pointer is at the start of the list,
108 ** and it points to the start of the next list.
109 ** NEXT is used as an lvalue, too.
112 #define NEXT(P) (*GPPP(P))
115 /* PTHRESH is the minimum number of pairs with the same sense to justify
116 ** checking for a run and extending it. Note that PTHRESH counts PAIRS,
117 ** not just elements, so PTHRESH == 8 means a run of 16.
122 /* RTHRESH is the number of elements in a run that must compare low
123 ** to the low element from the opposing run before we justify
124 ** doing a binary rampup instead of single stepping.
125 ** In random input, N in a row low should only happen with
126 ** probability 2^(1-N), so we can risk that we are dealing
127 ** with orderly input without paying much when we aren't.
134 ** Overview of algorithm and variables.
135 ** The array of elements at list1 will be organized into runs of length 2,
136 ** or runs of length >= 2 * PTHRESH. We only try to form long runs when
137 ** PTHRESH adjacent pairs compare in the same way, suggesting overall order.
139 ** Unless otherwise specified, pair pointers address the first of two elements.
141 ** b and b+1 are a pair that compare with sense "sense".
142 ** b is the "bottom" of adjacent pairs that might form a longer run.
144 ** p2 parallels b in the list2 array, where runs are defined by
147 ** t represents the "top" of the adjacent pairs that might extend
148 ** the run beginning at b. Usually, t addresses a pair
149 ** that compares with opposite sense from (b,b+1).
150 ** However, it may also address a singleton element at the end of list1,
151 ** or it may be equal to "last", the first element beyond list1.
153 ** r addresses the Nth pair following b. If this would be beyond t,
154 ** we back it off to t. Only when r is less than t do we consider the
155 ** run long enough to consider checking.
157 ** q addresses a pair such that the pairs at b through q already form a run.
158 ** Often, q will equal b, indicating we only are sure of the pair itself.
159 ** However, a search on the previous cycle may have revealed a longer run,
160 ** so q may be greater than b.
162 ** p is used to work back from a candidate r, trying to reach q,
163 ** which would mean b through r would be a run. If we discover such a run,
164 ** we start q at r and try to push it further towards t.
165 ** If b through r is NOT a run, we detect the wrong order at (p-1,p).
166 ** In any event, after the check (if any), we have two main cases.
168 ** 1) Short run. b <= q < p <= r <= t.
169 ** b through q is a run (perhaps trivial)
170 ** q through p are uninteresting pairs
171 ** p through r is a run
173 ** 2) Long run. b < r <= q < t.
174 ** b through q is a run (of length >= 2 * PTHRESH)
176 ** Note that degenerate cases are not only possible, but likely.
177 ** For example, if the pair following b compares with opposite sense,
178 ** then b == q < p == r == t.
183 dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp)
186 gptr *b, *p, *q, *t, *p2;
191 last = PINDEX(b, nmemb);
192 sense = (cmp(aTHX_ *b, *(b+1)) > 0);
193 for (p2 = list2; b < last; ) {
194 /* We just started, or just reversed sense.
195 ** Set t at end of pairs with the prevailing sense.
197 for (p = b+2, t = p; ++p < last; t = ++p) {
198 if ((cmp(aTHX_ *t, *p) > 0) != sense) break;
201 /* Having laid out the playing field, look for long runs */
203 p = r = b + (2 * PTHRESH);
204 if (r >= t) p = r = t; /* too short to care about */
206 while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) &&
209 /* b through r is a (long) run.
210 ** Extend it as far as possible.
213 while (((p += 2) < t) &&
214 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p;
215 r = p = q + 2; /* no simple pairs, no after-run */
218 if (q > b) { /* run of greater than 2 at b */
222 /* pick up singleton, if possible */
225 ((cmp(aTHX_ *(p-1), *p) > 0) == sense))
226 savep = r = p = q = last;
227 p2 = NEXT(p2) = p2 + (p - b); ++runs;
236 while (q < p) { /* simple pairs */
237 p2 = NEXT(p2) = p2 + 2; ++runs;
244 if (((b = p) == t) && ((t+1) == last)) {
245 NEXT(p2) = p2 + 1; ++runs;
256 /* The original merge sort, in use since 5.7, was as fast as, or faster than,
257 * qsort on many platforms, but slower than qsort, conspicuously so,
258 * on others. The most likely explanation was platform-specific
259 * differences in cache sizes and relative speeds.
261 * The quicksort divide-and-conquer algorithm guarantees that, as the
262 * problem is subdivided into smaller and smaller parts, the parts
263 * fit into smaller (and faster) caches. So it doesn't matter how
264 * many levels of cache exist, quicksort will "find" them, and,
265 * as long as smaller is faster, take advantage of them.
267 * By contrast, consider how the original mergesort algorithm worked.
268 * Suppose we have five runs (each typically of length 2 after dynprep).
277 * Adjacent pairs are merged in "grand sweeps" through the input.
278 * This means, on pass 1, the records in runs 1 and 2 aren't revisited until
279 * runs 3 and 4 are merged and the runs from run 5 have been copied.
280 * The only cache that matters is one large enough to hold *all* the input.
281 * On some platforms, this may be many times slower than smaller caches.
283 * The following pseudo-code uses the same basic merge algorithm,
284 * but in a divide-and-conquer way.
286 * # merge $runs runs at offset $offset of list $list1 into $list2.
287 * # all unmerged runs ($runs == 1) originate in list $base.
289 * my ($offset, $runs, $base, $list1, $list2) = @_;
292 * if ($list1 is $base) copy run to $list2
293 * return offset of end of list (or copy)
295 * $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1)
296 * mgsort2($off2, $runs/2, $base, $list2, $list1)
297 * merge the adjacent runs at $offset of $list1 into $list2
298 * return the offset of the end of the merged runs
301 * mgsort2(0, $runs, $base, $aux, $base);
303 * For our 5 runs, the tree of calls looks like
312 * and the corresponding activity looks like
314 * copy runs 1 and 2 from base to aux
315 * merge runs 1 and 2 from aux to base
316 * (run 3 is where it belongs, no copy needed)
317 * merge runs 12 and 3 from base to aux
318 * (runs 4 and 5 are where they belong, no copy needed)
319 * merge runs 4 and 5 from base to aux
320 * merge runs 123 and 45 from aux to base
322 * Note that we merge runs 1 and 2 immediately after copying them,
323 * while they are still likely to be in fast cache. Similarly,
324 * run 3 is merged with run 12 while it still may be lingering in cache.
325 * This implementation should therefore enjoy much of the cache-friendly
326 * behavior that quicksort does. In addition, it does less copying
327 * than the original mergesort implementation (only runs 1 and 2 are copied)
328 * and the "balancing" of merges is better (merged runs comprise more nearly
329 * equal numbers of original runs).
331 * The actual cache-friendly implementation will use a pseudo-stack
332 * to avoid recursion, and will unroll processing of runs of length 2,
333 * but it is otherwise similar to the recursive implementation.
337 IV offset; /* offset of 1st of 2 runs at this level */
338 IV runs; /* how many runs must be combined into 1 */
339 } off_runs; /* pseudo-stack element */
343 cmp_desc(pTHX_ gptr const a, gptr const b)
345 return -PL_sort_RealCmp(aTHX_ a, b);
349 =head1 SV Manipulation Functions
351 =for apidoc sortsv_flags
353 In-place sort an array of SV pointers with the given comparison routine,
354 with various SORTf_* flag options.
359 Perl_sortsv_flags(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
363 gptr *f1, *f2, *t, *b, *p;
367 gptr small[SMALLSORT];
369 off_runs stack[60], *stackp;
370 SVCOMPARE_t savecmp = NULL;
372 PERL_ARGS_ASSERT_SORTSV_FLAGS;
373 if (nmemb <= 1) return; /* sorted trivially */
375 if ((flags & SORTf_DESC) != 0) {
376 savecmp = PL_sort_RealCmp; /* Save current comparison routine, if any */
377 PL_sort_RealCmp = cmp; /* Put comparison routine where cmp_desc can find it */
381 if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */
382 else { Newx(aux,nmemb,gptr); } /* allocate auxiliary array */
385 stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp);
386 stackp->offset = offset = 0;
387 which[0] = which[2] = base;
390 /* On levels where both runs have be constructed (stackp->runs == 0),
391 * merge them, and note the offset of their end, in case the offset
392 * is needed at the next level up. Hop up a level, and,
393 * as long as stackp->runs is 0, keep merging.
395 IV runs = stackp->runs;
399 list1 = which[iwhich]; /* area where runs are now */
400 list2 = which[++iwhich]; /* area for merged runs */
403 offset = stackp->offset;
404 f1 = p1 = list1 + offset; /* start of first run */
405 p = tp2 = list2 + offset; /* where merged run will go */
406 t = NEXT(p); /* where first run ends */
407 f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */
408 t = NEXT(t); /* where second runs ends */
409 l2 = POTHER(t, list2, list1); /* ... on the other side */
410 offset = PNELEM(list2, t);
411 while (f1 < l1 && f2 < l2) {
412 /* If head 1 is larger than head 2, find ALL the elements
413 ** in list 2 strictly less than head1, write them all,
414 ** then head 1. Then compare the new heads, and repeat,
415 ** until one or both lists are exhausted.
417 ** In all comparisons (after establishing
418 ** which head to merge) the item to merge
419 ** (at pointer q) is the first operand of
420 ** the comparison. When we want to know
421 ** if "q is strictly less than the other",
424 ** because stability demands that we treat equality
425 ** as high when q comes from l2, and as low when
426 ** q was from l1. So we ask the question by doing
427 ** cmp(q, other) <= sense
428 ** and make sense == 0 when equality should look low,
429 ** and -1 when equality should look high.
433 if (cmp(aTHX_ *f1, *f2) <= 0) {
434 q = f2; b = f1; t = l1;
437 q = f1; b = f2; t = l2;
444 ** Leave t at something strictly
445 ** greater than q (or at the end of the list),
446 ** and b at something strictly less than q.
448 for (i = 1, run = 0 ;;) {
449 if ((p = PINDEX(b, i)) >= t) {
451 if (((p = PINDEX(t, -1)) > b) &&
452 (cmp(aTHX_ *q, *p) <= sense))
456 } else if (cmp(aTHX_ *q, *p) <= sense) {
460 if (++run >= RTHRESH) i += i;
464 /* q is known to follow b and must be inserted before t.
465 ** Increment b, so the range of possibilities is [b,t).
466 ** Round binary split down, to favor early appearance.
467 ** Adjust b and t until q belongs just before t.
472 p = PINDEX(b, (PNELEM(b, t) - 1) / 2);
473 if (cmp(aTHX_ *q, *p) <= sense) {
479 /* Copy all the strictly low elements */
482 FROMTOUPTO(f2, tp2, t);
485 FROMTOUPTO(f1, tp2, t);
491 /* Run out remaining list */
493 if (f2 < l2) FROMTOUPTO(f2, tp2, l2);
494 } else FROMTOUPTO(f1, tp2, l1);
495 p1 = NEXT(p1) = POTHER(tp2, list2, list1);
497 if (--level == 0) goto done;
499 t = list1; list1 = list2; list2 = t; /* swap lists */
500 } while ((runs = stackp->runs) == 0);
504 stackp->runs = 0; /* current run will finish level */
505 /* While there are more than 2 runs remaining,
506 * turn them into exactly 2 runs (at the "other" level),
507 * each made up of approximately half the runs.
508 * Stack the second half for later processing,
509 * and set about producing the first half now.
514 stackp->offset = offset;
515 runs -= stackp->runs = runs / 2;
517 /* We must construct a single run from 1 or 2 runs.
518 * All the original runs are in which[0] == base.
519 * The run we construct must end up in which[level&1].
523 /* Constructing a single run from a single run.
524 * If it's where it belongs already, there's nothing to do.
525 * Otherwise, copy it to where it belongs.
526 * A run of 1 is either a singleton at level 0,
527 * or the second half of a split 3. In neither event
528 * is it necessary to set offset. It will be set by the merge
529 * that immediately follows.
531 if (iwhich) { /* Belongs in aux, currently in base */
532 f1 = b = PINDEX(base, offset); /* where list starts */
533 f2 = PINDEX(aux, offset); /* where list goes */
534 t = NEXT(f2); /* where list will end */
535 offset = PNELEM(aux, t); /* offset thereof */
536 t = PINDEX(base, offset); /* where it currently ends */
537 FROMTOUPTO(f1, f2, t); /* copy */
538 NEXT(b) = t; /* set up parallel pointer */
539 } else if (level == 0) goto done; /* single run at level 0 */
541 /* Constructing a single run from two runs.
542 * The merge code at the top will do that.
543 * We need only make sure the two runs are in the "other" array,
544 * so they'll end up in the correct array after the merge.
548 stackp->offset = offset;
549 stackp->runs = 0; /* take care of both runs, trigger merge */
550 if (!iwhich) { /* Merged runs belong in aux, copy 1st */
551 f1 = b = PINDEX(base, offset); /* where first run starts */
552 f2 = PINDEX(aux, offset); /* where it will be copied */
553 t = NEXT(f2); /* where first run will end */
554 offset = PNELEM(aux, t); /* offset thereof */
555 p = PINDEX(base, offset); /* end of first run */
556 t = NEXT(t); /* where second run will end */
557 t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */
558 FROMTOUPTO(f1, f2, t); /* copy both runs */
559 NEXT(b) = p; /* paralleled pointer for 1st */
560 NEXT(p) = t; /* ... and for second */
565 if (aux != small) Safefree(aux); /* free iff allocated */
566 if (savecmp != NULL) {
567 PL_sort_RealCmp = savecmp; /* Restore current comparison routine, if any */
573 =head1 Array Manipulation Functions
577 In-place sort an array of SV pointers with the given comparison routine.
579 Currently this always uses mergesort. See C<L</sortsv_flags>> for a more
586 Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp)
588 PERL_ARGS_ASSERT_SORTSV;
590 sortsv_flags(array, nmemb, cmp, 0);
593 #define SvNSIOK(sv) ((SvFLAGS(sv) & SVf_NOK) || ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK))
594 #define SvSIOK(sv) ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK)
595 #define SvNSIV(sv) ( SvNOK(sv) ? SvNVX(sv) : ( SvSIOK(sv) ? SvIVX(sv) : sv_2nv(sv) ) )
599 dSP; dMARK; dORIGMARK;
600 SV **p1 = ORIGMARK+1, **p2;
606 OP* const nextop = PL_op->op_next;
608 bool hasargs = FALSE;
611 const U8 priv = PL_op->op_private;
612 const U8 flags = PL_op->op_flags;
616 if ((priv & OPpSORT_DESCEND) != 0)
617 sort_flags |= SORTf_DESC;
618 if ((priv & OPpSORT_STABLE) != 0)
619 sort_flags |= SORTf_STABLE;
620 if ((priv & OPpSORT_UNSTABLE) != 0)
621 sort_flags |= SORTf_UNSTABLE;
623 if (gimme != G_ARRAY) {
630 SAVEVPTR(PL_sortcop);
631 if (flags & OPf_STACKED) {
632 if (flags & OPf_SPECIAL) {
633 OP *nullop = OpSIBLING(cLISTOP->op_first); /* pass pushmark */
634 assert(nullop->op_type == OP_NULL);
635 PL_sortcop = nullop->op_next;
640 cv = sv_2cv(*++MARK, &stash, &gv, GV_ADD);
642 if (cv && SvPOK(cv)) {
643 const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv));
644 if (proto && strEQ(proto, "$$")) {
648 if (cv && CvISXSUB(cv) && CvXSUB(cv)) {
651 else if (!(cv && CvROOT(cv))) {
655 else if (!CvANON(cv) && (gv = CvGV(cv))) {
656 if (cv != GvCV(gv)) cv = GvCV(gv);
659 autogv = gv_autoload_pvn(
660 GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv),
661 GvNAMEUTF8(gv) ? SVf_UTF8 : 0
668 SV *tmpstr = sv_newmortal();
669 gv_efullname3(tmpstr, gv, NULL);
670 DIE(aTHX_ "Undefined sort subroutine \"%" SVf "\" called",
675 DIE(aTHX_ "Undefined subroutine in sort");
680 PL_sortcop = (OP*)cv;
682 PL_sortcop = CvSTART(cv);
689 /* optimiser converts "@a = sort @a" to "sort \@a". In this case,
690 * push (@a) onto stack, then assign result back to @a at the end of
692 if (priv & OPpSORT_INPLACE) {
693 assert( MARK+1 == SP && *SP && SvTYPE(*SP) == SVt_PVAV);
694 (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */
695 av = MUTABLE_AV((*SP));
697 Perl_croak_no_modify();
698 max = AvFILL(av) + 1;
701 for (i=0; i < max; i++) {
702 SV **svp = av_fetch(av, i, FALSE);
703 *SP++ = (svp) ? *svp : NULL;
707 SV **svp = AvARRAY(av);
708 assert(svp || max == 0);
709 for (i = 0; i < max; i++)
713 p1 = p2 = SP - (max-1);
720 /* shuffle stack down, removing optional initial cv (p1!=p2), plus
721 * any nulls; also stringify or converting to integer or number as
722 * required any args */
723 copytmps = cBOOL(PL_sortcop);
724 for (i=max; i > 0 ; i--) {
725 if ((*p1 = *p2++)) { /* Weed out nulls. */
726 if (copytmps && SvPADTMP(*p1)) {
727 *p1 = sv_mortalcopy(*p1);
731 if (priv & OPpSORT_NUMERIC) {
732 if (priv & OPpSORT_INTEGER) {
734 (void)sv_2iv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
738 (void)sv_2nv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
739 if (all_SIVs && !SvSIOK(*p1))
745 (void)sv_2pv_flags(*p1, 0,
746 SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD);
760 const bool oldcatch = CATCH_GET;
761 I32 old_savestack_ix = PL_savestack_ix;
766 PUSHSTACKi(PERLSI_SORT);
767 if (!hasargs && !is_xsub) {
768 SAVEGENERICSV(PL_firstgv);
769 SAVEGENERICSV(PL_secondgv);
770 PL_firstgv = MUTABLE_GV(SvREFCNT_inc(
771 gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV)
773 PL_secondgv = MUTABLE_GV(SvREFCNT_inc(
774 gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV)
776 /* make sure the GP isn't removed out from under us for
778 save_gp(PL_firstgv, 0);
779 save_gp(PL_secondgv, 0);
780 /* we don't want modifications localized */
781 GvINTRO_off(PL_firstgv);
782 GvINTRO_off(PL_secondgv);
783 SAVEGENERICSV(GvSV(PL_firstgv));
784 SvREFCNT_inc(GvSV(PL_firstgv));
785 SAVEGENERICSV(GvSV(PL_secondgv));
786 SvREFCNT_inc(GvSV(PL_secondgv));
790 cx = cx_pushblock(CXt_NULL, gimme, PL_stack_base, old_savestack_ix);
791 if (!(flags & OPf_SPECIAL)) {
792 cx->cx_type = CXt_SUB|CXp_MULTICALL;
793 cx_pushsub(cx, cv, NULL, hasargs);
795 PADLIST * const padlist = CvPADLIST(cv);
797 if (++CvDEPTH(cv) >= 2)
798 pad_push(padlist, CvDEPTH(cv));
799 PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv));
802 /* This is mostly copied from pp_entersub */
803 AV * const av = MUTABLE_AV(PAD_SVl(0));
805 cx->blk_sub.savearray = GvAV(PL_defgv);
806 GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av));
813 Perl_sortsv_flags(aTHX_ start, max,
814 (is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv),
817 /* Reset cx, in case the context stack has been reallocated. */
820 PL_stack_sp = PL_stack_base + cx->blk_oldsp;
823 if (!(flags & OPf_SPECIAL)) {
824 assert(CxTYPE(cx) == CXt_SUB);
828 assert(CxTYPE(cx) == CXt_NULL);
829 /* there isn't a POPNULL ! */
837 MEXTEND(SP, 20); /* Can't afford stack realloc on signal. */
839 if (priv & OPpSORT_NUMERIC) {
840 if ((priv & OPpSORT_INTEGER) || all_SIVs) {
842 Perl_sortsv_flags(aTHX_ start, max, S_amagic_i_ncmp, sort_flags);
844 Perl_sortsv_flags(aTHX_ start, max, S_sv_i_ncmp, sort_flags);
848 Perl_sortsv_flags(aTHX_ start, max, S_amagic_ncmp, sort_flags);
850 Perl_sortsv_flags(aTHX_ start, max, S_sv_ncmp, sort_flags);
853 #ifdef USE_LOCALE_COLLATE
854 else if(IN_LC_RUNTIME(LC_COLLATE)) {
856 Perl_sortsv_flags(aTHX_ start, max, S_amagic_cmp_locale, sort_flags);
858 Perl_sortsv_flags(aTHX_ start, max, sv_cmp_locale_static, sort_flags);
863 Perl_sortsv_flags(aTHX_ start, max, S_amagic_cmp, sort_flags);
865 Perl_sortsv_flags(aTHX_ start, max, sv_cmp_static, sort_flags);
868 if ((priv & OPpSORT_REVERSE) != 0) {
869 SV **q = start+max-1;
871 SV * const tmp = *start;
879 /* copy back result to the array */
880 SV** const base = MARK+1;
881 SSize_t max_minus_one = max - 1; /* attempt to work around mingw bug */
883 for (i = 0; i <= max_minus_one; i++)
884 base[i] = newSVsv(base[i]);
886 if (max_minus_one >= 0)
887 av_extend(av, max_minus_one);
888 for (i=0; i <= max_minus_one; i++) {
889 SV * const sv = base[i];
890 SV ** const didstore = av_store(av, i, sv);
898 /* the elements of av are likely to be the same as the
899 * (non-refcounted) elements on the stack, just in a different
900 * order. However, its possible that someone's messed with av
901 * in the meantime. So bump and unbump the relevant refcounts
904 for (i = 0; i <= max_minus_one; i++) {
907 if (SvREFCNT(sv) > 1)
908 base[i] = newSVsv(sv);
910 SvREFCNT_inc_simple_void_NN(sv);
913 if (max_minus_one >= 0) {
914 av_extend(av, max_minus_one);
915 Copy(base, AvARRAY(av), max, SV*);
917 AvFILLp(av) = max_minus_one;
923 PL_stack_sp = ORIGMARK + max;
928 S_sortcv(pTHX_ SV *const a, SV *const b)
930 const I32 oldsaveix = PL_savestack_ix;
932 PMOP * const pm = PL_curpm;
933 COP * const cop = PL_curcop;
936 PERL_ARGS_ASSERT_SORTCV;
938 olda = GvSV(PL_firstgv);
939 GvSV(PL_firstgv) = SvREFCNT_inc_simple_NN(a);
941 oldb = GvSV(PL_secondgv);
942 GvSV(PL_secondgv) = SvREFCNT_inc_simple_NN(b);
944 PL_stack_sp = PL_stack_base;
948 /* entry zero of a stack is always PL_sv_undef, which
949 * simplifies converting a '()' return into undef in scalar context */
950 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
951 result = SvIV(*PL_stack_sp);
953 LEAVE_SCOPE(oldsaveix);
959 S_sortcv_stacked(pTHX_ SV *const a, SV *const b)
961 const I32 oldsaveix = PL_savestack_ix;
963 AV * const av = GvAV(PL_defgv);
964 PMOP * const pm = PL_curpm;
965 COP * const cop = PL_curcop;
967 PERL_ARGS_ASSERT_SORTCV_STACKED;
975 SV **ary = AvALLOC(av);
976 if (AvARRAY(av) != ary) {
977 AvMAX(av) += AvARRAY(av) - AvALLOC(av);
991 PL_stack_sp = PL_stack_base;
995 /* entry zero of a stack is always PL_sv_undef, which
996 * simplifies converting a '()' return into undef in scalar context */
997 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
998 result = SvIV(*PL_stack_sp);
1000 LEAVE_SCOPE(oldsaveix);
1006 S_sortcv_xsub(pTHX_ SV *const a, SV *const b)
1009 const I32 oldsaveix = PL_savestack_ix;
1010 CV * const cv=MUTABLE_CV(PL_sortcop);
1012 PMOP * const pm = PL_curpm;
1014 PERL_ARGS_ASSERT_SORTCV_XSUB;
1022 (void)(*CvXSUB(cv))(aTHX_ cv);
1023 /* entry zero of a stack is always PL_sv_undef, which
1024 * simplifies converting a '()' return into undef in scalar context */
1025 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1026 result = SvIV(*PL_stack_sp);
1028 LEAVE_SCOPE(oldsaveix);
1035 S_sv_ncmp(pTHX_ SV *const a, SV *const b)
1037 I32 cmp = do_ncmp(a, b);
1039 PERL_ARGS_ASSERT_SV_NCMP;
1042 if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL);
1050 S_sv_i_ncmp(pTHX_ SV *const a, SV *const b)
1052 const IV iv1 = SvIV(a);
1053 const IV iv2 = SvIV(b);
1055 PERL_ARGS_ASSERT_SV_I_NCMP;
1057 return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0;
1060 #define tryCALL_AMAGICbin(left,right,meth) \
1061 (SvAMAGIC(left)||SvAMAGIC(right)) \
1062 ? amagic_call(left, right, meth, 0) \
1065 #define SORT_NORMAL_RETURN_VALUE(val) (((val) > 0) ? 1 : ((val) ? -1 : 0))
1068 S_amagic_ncmp(pTHX_ SV *const a, SV *const b)
1070 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg);
1072 PERL_ARGS_ASSERT_AMAGIC_NCMP;
1076 const I32 i = SvIVX(tmpsv);
1077 return SORT_NORMAL_RETURN_VALUE(i);
1080 const NV d = SvNV(tmpsv);
1081 return SORT_NORMAL_RETURN_VALUE(d);
1084 return S_sv_ncmp(aTHX_ a, b);
1088 S_amagic_i_ncmp(pTHX_ SV *const a, SV *const b)
1090 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg);
1092 PERL_ARGS_ASSERT_AMAGIC_I_NCMP;
1096 const I32 i = SvIVX(tmpsv);
1097 return SORT_NORMAL_RETURN_VALUE(i);
1100 const NV d = SvNV(tmpsv);
1101 return SORT_NORMAL_RETURN_VALUE(d);
1104 return S_sv_i_ncmp(aTHX_ a, b);
1108 S_amagic_cmp(pTHX_ SV *const str1, SV *const str2)
1110 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg);
1112 PERL_ARGS_ASSERT_AMAGIC_CMP;
1116 const I32 i = SvIVX(tmpsv);
1117 return SORT_NORMAL_RETURN_VALUE(i);
1120 const NV d = SvNV(tmpsv);
1121 return SORT_NORMAL_RETURN_VALUE(d);
1124 return sv_cmp(str1, str2);
1127 #ifdef USE_LOCALE_COLLATE
1130 S_amagic_cmp_locale(pTHX_ SV *const str1, SV *const str2)
1132 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg);
1134 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE;
1138 const I32 i = SvIVX(tmpsv);
1139 return SORT_NORMAL_RETURN_VALUE(i);
1142 const NV d = SvNV(tmpsv);
1143 return SORT_NORMAL_RETURN_VALUE(d);
1146 return sv_cmp_locale(str1, str2);
1152 * ex: set ts=8 sts=4 sw=4 et: