3 * Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
4 * 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by Larry Wall and others
6 * You may distribute under the terms of either the GNU General Public
7 * License or the Artistic License, as specified in the README file.
12 * ...they shuffled back towards the rear of the line. 'No, not at the
13 * rear!' the slave-driver shouted. 'Three files up. And stay there...
15 * [p.931 of _The Lord of the Rings_, VI/ii: "The Land of Shadow"]
18 /* This file contains pp ("push/pop") functions that
19 * execute the opcodes that make up a perl program. A typical pp function
20 * expects to find its arguments on the stack, and usually pushes its
21 * results onto the stack, hence the 'pp' terminology. Each OP structure
22 * contains a pointer to the relevant pp_foo() function.
24 * This particular file just contains pp_sort(), which is complex
25 * enough to merit its own file! See the other pp*.c files for the rest of
30 #define PERL_IN_PP_SORT_C
34 #define SMALLSORT (200)
37 /* Flags for sortsv_flags */
38 #define SORTf_STABLE 1
39 #define SORTf_UNSTABLE 2
42 * The mergesort implementation is by Peter M. Mcilroy <pmcilroy@lucent.com>.
44 * The original code was written in conjunction with BSD Computer Software
45 * Research Group at University of California, Berkeley.
47 * See also: "Optimistic Sorting and Information Theoretic Complexity"
49 * SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms),
50 * pp 467-474, Austin, Texas, 25-27 January 1993.
52 * The integration to Perl is by John P. Linderman <jpl.jpl@gmail.com>.
54 * The code can be distributed under the same terms as Perl itself.
59 typedef char * aptr; /* pointer for arithmetic on sizes */
60 typedef SV * gptr; /* pointers in our lists */
62 /* Binary merge internal sort, with a few special mods
63 ** for the special perl environment it now finds itself in.
65 ** Things that were once options have been hotwired
66 ** to values suitable for this use. In particular, we'll always
67 ** initialize looking for natural runs, we'll always produce stable
68 ** output, and we'll always do Peter McIlroy's binary merge.
71 /* Pointer types for arithmetic and storage and convenience casts */
73 #define APTR(P) ((aptr)(P))
74 #define GPTP(P) ((gptr *)(P))
75 #define GPPP(P) ((gptr **)(P))
78 /* byte offset from pointer P to (larger) pointer Q */
79 #define BYTEOFF(P, Q) (APTR(Q) - APTR(P))
81 #define PSIZE sizeof(gptr)
83 /* If PSIZE is power of 2, make PSHIFT that power, if that helps */
86 #define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT))
87 #define PNBYTE(N) ((N) << (PSHIFT))
88 #define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N)))
90 /* Leave optimization to compiler */
91 #define PNELEM(P, Q) (GPTP(Q) - GPTP(P))
92 #define PNBYTE(N) ((N) * (PSIZE))
93 #define PINDEX(P, N) (GPTP(P) + (N))
96 /* Pointer into other corresponding to pointer into this */
97 #define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P))
99 #define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim)
102 /* Runs are identified by a pointer in the auxiliary list.
103 ** The pointer is at the start of the list,
104 ** and it points to the start of the next list.
105 ** NEXT is used as an lvalue, too.
108 #define NEXT(P) (*GPPP(P))
111 /* PTHRESH is the minimum number of pairs with the same sense to justify
112 ** checking for a run and extending it. Note that PTHRESH counts PAIRS,
113 ** not just elements, so PTHRESH == 8 means a run of 16.
118 /* RTHRESH is the number of elements in a run that must compare low
119 ** to the low element from the opposing run before we justify
120 ** doing a binary rampup instead of single stepping.
121 ** In random input, N in a row low should only happen with
122 ** probability 2^(1-N), so we can risk that we are dealing
123 ** with orderly input without paying much when we aren't.
130 ** Overview of algorithm and variables.
131 ** The array of elements at list1 will be organized into runs of length 2,
132 ** or runs of length >= 2 * PTHRESH. We only try to form long runs when
133 ** PTHRESH adjacent pairs compare in the same way, suggesting overall order.
135 ** Unless otherwise specified, pair pointers address the first of two elements.
137 ** b and b+1 are a pair that compare with sense "sense".
138 ** b is the "bottom" of adjacent pairs that might form a longer run.
140 ** p2 parallels b in the list2 array, where runs are defined by
143 ** t represents the "top" of the adjacent pairs that might extend
144 ** the run beginning at b. Usually, t addresses a pair
145 ** that compares with opposite sense from (b,b+1).
146 ** However, it may also address a singleton element at the end of list1,
147 ** or it may be equal to "last", the first element beyond list1.
149 ** r addresses the Nth pair following b. If this would be beyond t,
150 ** we back it off to t. Only when r is less than t do we consider the
151 ** run long enough to consider checking.
153 ** q addresses a pair such that the pairs at b through q already form a run.
154 ** Often, q will equal b, indicating we only are sure of the pair itself.
155 ** However, a search on the previous cycle may have revealed a longer run,
156 ** so q may be greater than b.
158 ** p is used to work back from a candidate r, trying to reach q,
159 ** which would mean b through r would be a run. If we discover such a run,
160 ** we start q at r and try to push it further towards t.
161 ** If b through r is NOT a run, we detect the wrong order at (p-1,p).
162 ** In any event, after the check (if any), we have two main cases.
164 ** 1) Short run. b <= q < p <= r <= t.
165 ** b through q is a run (perhaps trivial)
166 ** q through p are uninteresting pairs
167 ** p through r is a run
169 ** 2) Long run. b < r <= q < t.
170 ** b through q is a run (of length >= 2 * PTHRESH)
172 ** Note that degenerate cases are not only possible, but likely.
173 ** For example, if the pair following b compares with opposite sense,
174 ** then b == q < p == r == t.
178 PERL_STATIC_FORCE_INLINE IV __attribute__always_inline__
179 dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp)
182 gptr *b, *p, *q, *t, *p2;
187 last = PINDEX(b, nmemb);
188 sense = (cmp(aTHX_ *b, *(b+1)) > 0);
189 for (p2 = list2; b < last; ) {
190 /* We just started, or just reversed sense.
191 ** Set t at end of pairs with the prevailing sense.
193 for (p = b+2, t = p; ++p < last; t = ++p) {
194 if ((cmp(aTHX_ *t, *p) > 0) != sense) break;
197 /* Having laid out the playing field, look for long runs */
199 p = r = b + (2 * PTHRESH);
200 if (r >= t) p = r = t; /* too short to care about */
202 while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) &&
205 /* b through r is a (long) run.
206 ** Extend it as far as possible.
209 while (((p += 2) < t) &&
210 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p;
211 r = p = q + 2; /* no simple pairs, no after-run */
214 if (q > b) { /* run of greater than 2 at b */
218 /* pick up singleton, if possible */
221 ((cmp(aTHX_ *(p-1), *p) > 0) == sense))
222 savep = r = p = q = last;
223 p2 = NEXT(p2) = p2 + (p - b); ++runs;
232 while (q < p) { /* simple pairs */
233 p2 = NEXT(p2) = p2 + 2; ++runs;
240 if (((b = p) == t) && ((t+1) == last)) {
241 NEXT(p2) = p2 + 1; ++runs;
252 /* The original merge sort, in use since 5.7, was as fast as, or faster than,
253 * qsort on many platforms, but slower than qsort, conspicuously so,
254 * on others. The most likely explanation was platform-specific
255 * differences in cache sizes and relative speeds.
257 * The quicksort divide-and-conquer algorithm guarantees that, as the
258 * problem is subdivided into smaller and smaller parts, the parts
259 * fit into smaller (and faster) caches. So it doesn't matter how
260 * many levels of cache exist, quicksort will "find" them, and,
261 * as long as smaller is faster, take advantage of them.
263 * By contrast, consider how the original mergesort algorithm worked.
264 * Suppose we have five runs (each typically of length 2 after dynprep).
273 * Adjacent pairs are merged in "grand sweeps" through the input.
274 * This means, on pass 1, the records in runs 1 and 2 aren't revisited until
275 * runs 3 and 4 are merged and the runs from run 5 have been copied.
276 * The only cache that matters is one large enough to hold *all* the input.
277 * On some platforms, this may be many times slower than smaller caches.
279 * The following pseudo-code uses the same basic merge algorithm,
280 * but in a divide-and-conquer way.
282 * # merge $runs runs at offset $offset of list $list1 into $list2.
283 * # all unmerged runs ($runs == 1) originate in list $base.
285 * my ($offset, $runs, $base, $list1, $list2) = @_;
288 * if ($list1 is $base) copy run to $list2
289 * return offset of end of list (or copy)
291 * $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1)
292 * mgsort2($off2, $runs/2, $base, $list2, $list1)
293 * merge the adjacent runs at $offset of $list1 into $list2
294 * return the offset of the end of the merged runs
297 * mgsort2(0, $runs, $base, $aux, $base);
299 * For our 5 runs, the tree of calls looks like
308 * and the corresponding activity looks like
310 * copy runs 1 and 2 from base to aux
311 * merge runs 1 and 2 from aux to base
312 * (run 3 is where it belongs, no copy needed)
313 * merge runs 12 and 3 from base to aux
314 * (runs 4 and 5 are where they belong, no copy needed)
315 * merge runs 4 and 5 from base to aux
316 * merge runs 123 and 45 from aux to base
318 * Note that we merge runs 1 and 2 immediately after copying them,
319 * while they are still likely to be in fast cache. Similarly,
320 * run 3 is merged with run 12 while it still may be lingering in cache.
321 * This implementation should therefore enjoy much of the cache-friendly
322 * behavior that quicksort does. In addition, it does less copying
323 * than the original mergesort implementation (only runs 1 and 2 are copied)
324 * and the "balancing" of merges is better (merged runs comprise more nearly
325 * equal numbers of original runs).
327 * The actual cache-friendly implementation will use a pseudo-stack
328 * to avoid recursion, and will unroll processing of runs of length 2,
329 * but it is otherwise similar to the recursive implementation.
333 IV offset; /* offset of 1st of 2 runs at this level */
334 IV runs; /* how many runs must be combined into 1 */
335 } off_runs; /* pseudo-stack element */
337 PERL_STATIC_FORCE_INLINE void
338 S_sortsv_flags_impl(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
342 gptr *f1, *f2, *t, *b, *p;
346 gptr small[SMALLSORT];
348 off_runs stack[60], *stackp;
350 PERL_UNUSED_ARG(flags);
351 PERL_ARGS_ASSERT_SORTSV_FLAGS_IMPL;
352 if (nmemb <= 1) return; /* sorted trivially */
354 if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */
355 else { Newx(aux,nmemb,gptr); } /* allocate auxiliary array */
358 stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp);
359 stackp->offset = offset = 0;
360 which[0] = which[2] = base;
363 /* On levels where both runs have be constructed (stackp->runs == 0),
364 * merge them, and note the offset of their end, in case the offset
365 * is needed at the next level up. Hop up a level, and,
366 * as long as stackp->runs is 0, keep merging.
368 IV runs = stackp->runs;
372 list1 = which[iwhich]; /* area where runs are now */
373 list2 = which[++iwhich]; /* area for merged runs */
376 offset = stackp->offset;
377 f1 = p1 = list1 + offset; /* start of first run */
378 p = tp2 = list2 + offset; /* where merged run will go */
379 t = NEXT(p); /* where first run ends */
380 f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */
381 t = NEXT(t); /* where second runs ends */
382 l2 = POTHER(t, list2, list1); /* ... on the other side */
383 offset = PNELEM(list2, t);
384 while (f1 < l1 && f2 < l2) {
385 /* If head 1 is larger than head 2, find ALL the elements
386 ** in list 2 strictly less than head1, write them all,
387 ** then head 1. Then compare the new heads, and repeat,
388 ** until one or both lists are exhausted.
390 ** In all comparisons (after establishing
391 ** which head to merge) the item to merge
392 ** (at pointer q) is the first operand of
393 ** the comparison. When we want to know
394 ** if "q is strictly less than the other",
397 ** because stability demands that we treat equality
398 ** as high when q comes from l2, and as low when
399 ** q was from l1. So we ask the question by doing
400 ** cmp(q, other) <= sense
401 ** and make sense == 0 when equality should look low,
402 ** and -1 when equality should look high.
406 if (cmp(aTHX_ *f1, *f2) <= 0) {
407 q = f2; b = f1; t = l1;
410 q = f1; b = f2; t = l2;
417 ** Leave t at something strictly
418 ** greater than q (or at the end of the list),
419 ** and b at something strictly less than q.
421 for (i = 1, run = 0 ;;) {
422 if ((p = PINDEX(b, i)) >= t) {
424 if (((p = PINDEX(t, -1)) > b) &&
425 (cmp(aTHX_ *q, *p) <= sense))
429 } else if (cmp(aTHX_ *q, *p) <= sense) {
433 if (++run >= RTHRESH) i += i;
437 /* q is known to follow b and must be inserted before t.
438 ** Increment b, so the range of possibilities is [b,t).
439 ** Round binary split down, to favor early appearance.
440 ** Adjust b and t until q belongs just before t.
445 p = PINDEX(b, (PNELEM(b, t) - 1) / 2);
446 if (cmp(aTHX_ *q, *p) <= sense) {
452 /* Copy all the strictly low elements */
455 FROMTOUPTO(f2, tp2, t);
458 FROMTOUPTO(f1, tp2, t);
464 /* Run out remaining list */
466 if (f2 < l2) FROMTOUPTO(f2, tp2, l2);
467 } else FROMTOUPTO(f1, tp2, l1);
468 p1 = NEXT(p1) = POTHER(tp2, list2, list1);
470 if (--level == 0) goto done;
472 t = list1; list1 = list2; list2 = t; /* swap lists */
473 } while ((runs = stackp->runs) == 0);
477 stackp->runs = 0; /* current run will finish level */
478 /* While there are more than 2 runs remaining,
479 * turn them into exactly 2 runs (at the "other" level),
480 * each made up of approximately half the runs.
481 * Stack the second half for later processing,
482 * and set about producing the first half now.
487 stackp->offset = offset;
488 runs -= stackp->runs = runs / 2;
490 /* We must construct a single run from 1 or 2 runs.
491 * All the original runs are in which[0] == base.
492 * The run we construct must end up in which[level&1].
496 /* Constructing a single run from a single run.
497 * If it's where it belongs already, there's nothing to do.
498 * Otherwise, copy it to where it belongs.
499 * A run of 1 is either a singleton at level 0,
500 * or the second half of a split 3. In neither event
501 * is it necessary to set offset. It will be set by the merge
502 * that immediately follows.
504 if (iwhich) { /* Belongs in aux, currently in base */
505 f1 = b = PINDEX(base, offset); /* where list starts */
506 f2 = PINDEX(aux, offset); /* where list goes */
507 t = NEXT(f2); /* where list will end */
508 offset = PNELEM(aux, t); /* offset thereof */
509 t = PINDEX(base, offset); /* where it currently ends */
510 FROMTOUPTO(f1, f2, t); /* copy */
511 NEXT(b) = t; /* set up parallel pointer */
512 } else if (level == 0) goto done; /* single run at level 0 */
514 /* Constructing a single run from two runs.
515 * The merge code at the top will do that.
516 * We need only make sure the two runs are in the "other" array,
517 * so they'll end up in the correct array after the merge.
521 stackp->offset = offset;
522 stackp->runs = 0; /* take care of both runs, trigger merge */
523 if (!iwhich) { /* Merged runs belong in aux, copy 1st */
524 f1 = b = PINDEX(base, offset); /* where first run starts */
525 f2 = PINDEX(aux, offset); /* where it will be copied */
526 t = NEXT(f2); /* where first run will end */
527 offset = PNELEM(aux, t); /* offset thereof */
528 p = PINDEX(base, offset); /* end of first run */
529 t = NEXT(t); /* where second run will end */
530 t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */
531 FROMTOUPTO(f1, f2, t); /* copy both runs */
532 NEXT(b) = p; /* paralleled pointer for 1st */
533 NEXT(p) = t; /* ... and for second */
538 if (aux != small) Safefree(aux); /* free iff allocated */
544 =for apidoc sortsv_flags
546 In-place sort an array of SV pointers with the given comparison routine,
547 with various SORTf_* flag options.
552 Perl_sortsv_flags(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
554 PERL_ARGS_ASSERT_SORTSV_FLAGS;
556 sortsv_flags_impl(base, nmemb, cmp, flags);
560 * Each of sortsv_* functions contains an inlined copy of
561 * sortsv_flags_impl() with an inlined comparator. Basically, we are
562 * emulating C++ templates by using __attribute__((always_inline)).
564 * The purpose of that is to avoid the function call overhead inside
565 * the sorting routine, which calls the comparison function multiple
566 * times per sorted item.
570 sortsv_amagic_i_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
572 sortsv_flags_impl(base, nmemb, S_amagic_i_ncmp, flags);
576 sortsv_amagic_i_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
578 sortsv_flags_impl(base, nmemb, S_amagic_i_ncmp_desc, flags);
582 sortsv_i_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
584 sortsv_flags_impl(base, nmemb, S_sv_i_ncmp, flags);
588 sortsv_i_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
590 sortsv_flags_impl(base, nmemb, S_sv_i_ncmp_desc, flags);
594 sortsv_amagic_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
596 sortsv_flags_impl(base, nmemb, S_amagic_ncmp, flags);
600 sortsv_amagic_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
602 sortsv_flags_impl(base, nmemb, S_amagic_ncmp_desc, flags);
606 sortsv_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
608 sortsv_flags_impl(base, nmemb, S_sv_ncmp, flags);
612 sortsv_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
614 sortsv_flags_impl(base, nmemb, S_sv_ncmp_desc, flags);
618 sortsv_amagic_cmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
620 sortsv_flags_impl(base, nmemb, S_amagic_cmp, flags);
624 sortsv_amagic_cmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
626 sortsv_flags_impl(base, nmemb, S_amagic_cmp_desc, flags);
630 sortsv_cmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
632 sortsv_flags_impl(base, nmemb, Perl_sv_cmp, flags);
636 sortsv_cmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
638 sortsv_flags_impl(base, nmemb, S_cmp_desc, flags);
641 #ifdef USE_LOCALE_COLLATE
644 sortsv_amagic_cmp_locale(pTHX_ gptr *base, size_t nmemb, U32 flags)
646 sortsv_flags_impl(base, nmemb, S_amagic_cmp_locale, flags);
650 sortsv_amagic_cmp_locale_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
652 sortsv_flags_impl(base, nmemb, S_amagic_cmp_locale_desc, flags);
656 sortsv_cmp_locale(pTHX_ gptr *base, size_t nmemb, U32 flags)
658 sortsv_flags_impl(base, nmemb, Perl_sv_cmp_locale, flags);
662 sortsv_cmp_locale_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
664 sortsv_flags_impl(base, nmemb, S_cmp_locale_desc, flags);
673 In-place sort an array of SV pointers with the given comparison routine.
675 Currently this always uses mergesort. See C<L</sortsv_flags>> for a more
682 Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp)
684 PERL_ARGS_ASSERT_SORTSV;
686 sortsv_flags(array, nmemb, cmp, 0);
689 #define SvNSIOK(sv) ((SvFLAGS(sv) & SVf_NOK) || ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK))
690 #define SvSIOK(sv) ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK)
691 #define SvNSIV(sv) ( SvNOK(sv) ? SvNVX(sv) : ( SvSIOK(sv) ? SvIVX(sv) : sv_2nv(sv) ) )
695 dSP; dMARK; dORIGMARK;
696 SV **p1 = ORIGMARK+1, **p2;
702 OP* const nextop = PL_op->op_next;
704 bool hasargs = FALSE;
707 const U8 priv = PL_op->op_private;
708 const U8 flags = PL_op->op_flags;
710 I32 all_SIVs = 1, descending = 0;
712 if ((priv & OPpSORT_DESCEND) != 0)
714 if ((priv & OPpSORT_STABLE) != 0)
715 sort_flags |= SORTf_STABLE;
716 if ((priv & OPpSORT_UNSTABLE) != 0)
717 sort_flags |= SORTf_UNSTABLE;
719 if (gimme != G_ARRAY) {
726 SAVEVPTR(PL_sortcop);
727 if (flags & OPf_STACKED) {
728 if (flags & OPf_SPECIAL) {
729 OP *nullop = OpSIBLING(cLISTOP->op_first); /* pass pushmark */
730 assert(nullop->op_type == OP_NULL);
731 PL_sortcop = nullop->op_next;
736 cv = sv_2cv(*++MARK, &stash, &gv, GV_ADD);
738 if (cv && SvPOK(cv)) {
739 const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv));
740 if (proto && strEQ(proto, "$$")) {
744 if (cv && CvISXSUB(cv) && CvXSUB(cv)) {
747 else if (!(cv && CvROOT(cv))) {
751 else if (!CvANON(cv) && (gv = CvGV(cv))) {
752 if (cv != GvCV(gv)) cv = GvCV(gv);
755 autogv = gv_autoload_pvn(
756 GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv),
757 GvNAMEUTF8(gv) ? SVf_UTF8 : 0
764 SV *tmpstr = sv_newmortal();
765 gv_efullname3(tmpstr, gv, NULL);
766 DIE(aTHX_ "Undefined sort subroutine \"%" SVf "\" called",
771 DIE(aTHX_ "Undefined subroutine in sort");
776 PL_sortcop = (OP*)cv;
778 PL_sortcop = CvSTART(cv);
785 /* optimiser converts "@a = sort @a" to "sort \@a". In this case,
786 * push (@a) onto stack, then assign result back to @a at the end of
788 if (priv & OPpSORT_INPLACE) {
789 assert( MARK+1 == SP && *SP && SvTYPE(*SP) == SVt_PVAV);
790 (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */
791 av = MUTABLE_AV((*SP));
793 Perl_croak_no_modify();
794 max = AvFILL(av) + 1;
797 for (i=0; i < max; i++) {
798 SV **svp = av_fetch(av, i, FALSE);
799 *SP++ = (svp) ? *svp : NULL;
803 SV **svp = AvARRAY(av);
804 assert(svp || max == 0);
805 for (i = 0; i < max; i++)
809 p1 = p2 = SP - (max-1);
816 /* shuffle stack down, removing optional initial cv (p1!=p2), plus
817 * any nulls; also stringify or converting to integer or number as
818 * required any args */
819 copytmps = cBOOL(PL_sortcop);
820 for (i=max; i > 0 ; i--) {
821 if ((*p1 = *p2++)) { /* Weed out nulls. */
822 if (copytmps && SvPADTMP(*p1)) {
823 *p1 = sv_mortalcopy(*p1);
827 if (priv & OPpSORT_NUMERIC) {
828 if (priv & OPpSORT_INTEGER) {
830 (void)sv_2iv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
834 (void)sv_2nv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
835 if (all_SIVs && !SvSIOK(*p1))
841 (void)sv_2pv_flags(*p1, 0,
842 SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD);
856 const bool oldcatch = CATCH_GET;
857 I32 old_savestack_ix = PL_savestack_ix;
862 PUSHSTACKi(PERLSI_SORT);
863 if (!hasargs && !is_xsub) {
864 SAVEGENERICSV(PL_firstgv);
865 SAVEGENERICSV(PL_secondgv);
866 PL_firstgv = MUTABLE_GV(SvREFCNT_inc(
867 gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV)
869 PL_secondgv = MUTABLE_GV(SvREFCNT_inc(
870 gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV)
872 /* make sure the GP isn't removed out from under us for
874 save_gp(PL_firstgv, 0);
875 save_gp(PL_secondgv, 0);
876 /* we don't want modifications localized */
877 GvINTRO_off(PL_firstgv);
878 GvINTRO_off(PL_secondgv);
879 SAVEGENERICSV(GvSV(PL_firstgv));
880 SvREFCNT_inc(GvSV(PL_firstgv));
881 SAVEGENERICSV(GvSV(PL_secondgv));
882 SvREFCNT_inc(GvSV(PL_secondgv));
886 cx = cx_pushblock(CXt_NULL, gimme, PL_stack_base, old_savestack_ix);
887 if (!(flags & OPf_SPECIAL)) {
888 cx->cx_type = CXt_SUB|CXp_MULTICALL;
889 cx_pushsub(cx, cv, NULL, hasargs);
891 PADLIST * const padlist = CvPADLIST(cv);
893 if (++CvDEPTH(cv) >= 2)
894 pad_push(padlist, CvDEPTH(cv));
895 PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv));
898 /* This is mostly copied from pp_entersub */
899 AV * const av = MUTABLE_AV(PAD_SVl(0));
901 cx->blk_sub.savearray = GvAV(PL_defgv);
902 GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av));
909 Perl_sortsv_flags(aTHX_ start, max,
910 (is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv),
913 /* Reset cx, in case the context stack has been reallocated. */
916 PL_stack_sp = PL_stack_base + cx->blk_oldsp;
919 if (!(flags & OPf_SPECIAL)) {
920 assert(CxTYPE(cx) == CXt_SUB);
924 assert(CxTYPE(cx) == CXt_NULL);
925 /* there isn't a POPNULL ! */
933 MEXTEND(SP, 20); /* Can't afford stack realloc on signal. */
935 if (priv & OPpSORT_NUMERIC) {
936 if ((priv & OPpSORT_INTEGER) || all_SIVs) {
939 sortsv_amagic_i_ncmp_desc(aTHX_ start, max, sort_flags);
941 sortsv_amagic_i_ncmp(aTHX_ start, max, sort_flags);
944 sortsv_i_ncmp_desc(aTHX_ start, max, sort_flags);
946 sortsv_i_ncmp(aTHX_ start, max, sort_flags);
951 sortsv_amagic_ncmp_desc(aTHX_ start, max, sort_flags);
953 sortsv_amagic_ncmp(aTHX_ start, max, sort_flags);
956 sortsv_ncmp_desc(aTHX_ start, max, sort_flags);
958 sortsv_ncmp(aTHX_ start, max, sort_flags);
961 #ifdef USE_LOCALE_COLLATE
962 else if(IN_LC_RUNTIME(LC_COLLATE)) {
965 sortsv_amagic_cmp_locale_desc(aTHX_ start, max, sort_flags);
967 sortsv_amagic_cmp_locale(aTHX_ start, max, sort_flags);
970 sortsv_cmp_locale_desc(aTHX_ start, max, sort_flags);
972 sortsv_cmp_locale(aTHX_ start, max, sort_flags);
978 sortsv_amagic_cmp_desc(aTHX_ start, max, sort_flags);
980 sortsv_amagic_cmp(aTHX_ start, max, sort_flags);
983 sortsv_cmp_desc(aTHX_ start, max, sort_flags);
985 sortsv_cmp(aTHX_ start, max, sort_flags);
988 if ((priv & OPpSORT_REVERSE) != 0) {
989 SV **q = start+max-1;
991 SV * const tmp = *start;
999 /* copy back result to the array */
1000 SV** const base = MARK+1;
1001 SSize_t max_minus_one = max - 1; /* attempt to work around mingw bug */
1002 if (SvMAGICAL(av)) {
1003 for (i = 0; i <= max_minus_one; i++)
1004 base[i] = newSVsv(base[i]);
1006 if (max_minus_one >= 0)
1007 av_extend(av, max_minus_one);
1008 for (i=0; i <= max_minus_one; i++) {
1009 SV * const sv = base[i];
1010 SV ** const didstore = av_store(av, i, sv);
1018 /* the elements of av are likely to be the same as the
1019 * (non-refcounted) elements on the stack, just in a different
1020 * order. However, its possible that someone's messed with av
1021 * in the meantime. So bump and unbump the relevant refcounts
1024 for (i = 0; i <= max_minus_one; i++) {
1027 if (SvREFCNT(sv) > 1)
1028 base[i] = newSVsv(sv);
1030 SvREFCNT_inc_simple_void_NN(sv);
1033 if (max_minus_one >= 0) {
1034 av_extend(av, max_minus_one);
1035 Copy(base, AvARRAY(av), max, SV*);
1037 AvFILLp(av) = max_minus_one;
1043 PL_stack_sp = ORIGMARK + max;
1048 S_sortcv(pTHX_ SV *const a, SV *const b)
1050 const I32 oldsaveix = PL_savestack_ix;
1052 PMOP * const pm = PL_curpm;
1053 COP * const cop = PL_curcop;
1056 PERL_ARGS_ASSERT_SORTCV;
1058 olda = GvSV(PL_firstgv);
1059 GvSV(PL_firstgv) = SvREFCNT_inc_simple_NN(a);
1061 oldb = GvSV(PL_secondgv);
1062 GvSV(PL_secondgv) = SvREFCNT_inc_simple_NN(b);
1064 PL_stack_sp = PL_stack_base;
1068 /* entry zero of a stack is always PL_sv_undef, which
1069 * simplifies converting a '()' return into undef in scalar context */
1070 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1071 result = SvIV(*PL_stack_sp);
1073 LEAVE_SCOPE(oldsaveix);
1079 S_sortcv_stacked(pTHX_ SV *const a, SV *const b)
1081 const I32 oldsaveix = PL_savestack_ix;
1083 AV * const av = GvAV(PL_defgv);
1084 PMOP * const pm = PL_curpm;
1085 COP * const cop = PL_curcop;
1087 PERL_ARGS_ASSERT_SORTCV_STACKED;
1094 if (AvMAX(av) < 1) {
1095 SV **ary = AvALLOC(av);
1096 if (AvARRAY(av) != ary) {
1097 AvMAX(av) += AvARRAY(av) - AvALLOC(av);
1100 if (AvMAX(av) < 1) {
1111 PL_stack_sp = PL_stack_base;
1115 /* entry zero of a stack is always PL_sv_undef, which
1116 * simplifies converting a '()' return into undef in scalar context */
1117 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1118 result = SvIV(*PL_stack_sp);
1120 LEAVE_SCOPE(oldsaveix);
1126 S_sortcv_xsub(pTHX_ SV *const a, SV *const b)
1129 const I32 oldsaveix = PL_savestack_ix;
1130 CV * const cv=MUTABLE_CV(PL_sortcop);
1132 PMOP * const pm = PL_curpm;
1134 PERL_ARGS_ASSERT_SORTCV_XSUB;
1142 (void)(*CvXSUB(cv))(aTHX_ cv);
1143 /* entry zero of a stack is always PL_sv_undef, which
1144 * simplifies converting a '()' return into undef in scalar context */
1145 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1146 result = SvIV(*PL_stack_sp);
1148 LEAVE_SCOPE(oldsaveix);
1154 PERL_STATIC_FORCE_INLINE I32
1155 S_sv_ncmp(pTHX_ SV *const a, SV *const b)
1157 I32 cmp = do_ncmp(a, b);
1159 PERL_ARGS_ASSERT_SV_NCMP;
1162 if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL);
1169 PERL_STATIC_FORCE_INLINE I32
1170 S_sv_ncmp_desc(pTHX_ SV *const a, SV *const b)
1172 PERL_ARGS_ASSERT_SV_NCMP_DESC;
1174 return -S_sv_ncmp(aTHX_ a, b);
1177 PERL_STATIC_FORCE_INLINE I32
1178 S_sv_i_ncmp(pTHX_ SV *const a, SV *const b)
1180 const IV iv1 = SvIV(a);
1181 const IV iv2 = SvIV(b);
1183 PERL_ARGS_ASSERT_SV_I_NCMP;
1185 return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0;
1188 PERL_STATIC_FORCE_INLINE I32
1189 S_sv_i_ncmp_desc(pTHX_ SV *const a, SV *const b)
1191 PERL_ARGS_ASSERT_SV_I_NCMP_DESC;
1193 return -S_sv_i_ncmp(aTHX_ a, b);
1196 #define tryCALL_AMAGICbin(left,right,meth) \
1197 (SvAMAGIC(left)||SvAMAGIC(right)) \
1198 ? amagic_call(left, right, meth, 0) \
1201 #define SORT_NORMAL_RETURN_VALUE(val) (((val) > 0) ? 1 : ((val) ? -1 : 0))
1203 PERL_STATIC_FORCE_INLINE I32
1204 S_amagic_ncmp(pTHX_ SV *const a, SV *const b)
1206 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg);
1208 PERL_ARGS_ASSERT_AMAGIC_NCMP;
1212 const I32 i = SvIVX(tmpsv);
1213 return SORT_NORMAL_RETURN_VALUE(i);
1216 const NV d = SvNV(tmpsv);
1217 return SORT_NORMAL_RETURN_VALUE(d);
1220 return S_sv_ncmp(aTHX_ a, b);
1223 PERL_STATIC_FORCE_INLINE I32
1224 S_amagic_ncmp_desc(pTHX_ SV *const a, SV *const b)
1226 PERL_ARGS_ASSERT_AMAGIC_NCMP_DESC;
1228 return -S_amagic_ncmp(aTHX_ a, b);
1231 PERL_STATIC_FORCE_INLINE I32
1232 S_amagic_i_ncmp(pTHX_ SV *const a, SV *const b)
1234 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg);
1236 PERL_ARGS_ASSERT_AMAGIC_I_NCMP;
1240 const I32 i = SvIVX(tmpsv);
1241 return SORT_NORMAL_RETURN_VALUE(i);
1244 const NV d = SvNV(tmpsv);
1245 return SORT_NORMAL_RETURN_VALUE(d);
1248 return S_sv_i_ncmp(aTHX_ a, b);
1251 PERL_STATIC_FORCE_INLINE I32
1252 S_amagic_i_ncmp_desc(pTHX_ SV *const a, SV *const b)
1254 PERL_ARGS_ASSERT_AMAGIC_I_NCMP_DESC;
1256 return -S_amagic_i_ncmp(aTHX_ a, b);
1259 PERL_STATIC_FORCE_INLINE I32
1260 S_amagic_cmp(pTHX_ SV *const str1, SV *const str2)
1262 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg);
1264 PERL_ARGS_ASSERT_AMAGIC_CMP;
1268 const I32 i = SvIVX(tmpsv);
1269 return SORT_NORMAL_RETURN_VALUE(i);
1272 const NV d = SvNV(tmpsv);
1273 return SORT_NORMAL_RETURN_VALUE(d);
1276 return sv_cmp(str1, str2);
1279 PERL_STATIC_FORCE_INLINE I32
1280 S_amagic_cmp_desc(pTHX_ SV *const str1, SV *const str2)
1282 PERL_ARGS_ASSERT_AMAGIC_CMP_DESC;
1284 return -S_amagic_cmp(aTHX_ str1, str2);
1287 PERL_STATIC_FORCE_INLINE I32
1288 S_cmp_desc(pTHX_ SV *const str1, SV *const str2)
1290 PERL_ARGS_ASSERT_CMP_DESC;
1292 return -sv_cmp(str1, str2);
1295 #ifdef USE_LOCALE_COLLATE
1297 PERL_STATIC_FORCE_INLINE I32
1298 S_amagic_cmp_locale(pTHX_ SV *const str1, SV *const str2)
1300 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg);
1302 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE;
1306 const I32 i = SvIVX(tmpsv);
1307 return SORT_NORMAL_RETURN_VALUE(i);
1310 const NV d = SvNV(tmpsv);
1311 return SORT_NORMAL_RETURN_VALUE(d);
1314 return sv_cmp_locale(str1, str2);
1317 PERL_STATIC_FORCE_INLINE I32
1318 S_amagic_cmp_locale_desc(pTHX_ SV *const str1, SV *const str2)
1320 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE_DESC;
1322 return -S_amagic_cmp_locale(aTHX_ str1, str2);
1325 PERL_STATIC_FORCE_INLINE I32
1326 S_cmp_locale_desc(pTHX_ SV *const str1, SV *const str2)
1328 PERL_ARGS_ASSERT_CMP_LOCALE_DESC;
1330 return -sv_cmp_locale(str1, str2);
1336 * ex: set ts=8 sts=4 sw=4 et: