3 * Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
4 * 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by Larry Wall and others
6 * You may distribute under the terms of either the GNU General Public
7 * License or the Artistic License, as specified in the README file.
12 * ...they shuffled back towards the rear of the line. 'No, not at the
13 * rear!' the slave-driver shouted. 'Three files up. And stay there...
15 * [p.931 of _The Lord of the Rings_, VI/ii: "The Land of Shadow"]
18 /* This file contains pp ("push/pop") functions that
19 * execute the opcodes that make up a perl program. A typical pp function
20 * expects to find its arguments on the stack, and usually pushes its
21 * results onto the stack, hence the 'pp' terminology. Each OP structure
22 * contains a pointer to the relevant pp_foo() function.
24 * This particular file just contains pp_sort(), which is complex
25 * enough to merit its own file! See the other pp*.c files for the rest of
30 #define PERL_IN_PP_SORT_C
33 #define sv_cmp_static Perl_sv_cmp
34 #define sv_cmp_locale_static Perl_sv_cmp_locale
37 #define SMALLSORT (200)
40 /* Flags for qsortsv and mergesortsv */
42 #define SORTf_STABLE 2
43 #define SORTf_UNSTABLE 8
46 * The mergesort implementation is by Peter M. Mcilroy <pmcilroy@lucent.com>.
48 * The original code was written in conjunction with BSD Computer Software
49 * Research Group at University of California, Berkeley.
51 * See also: "Optimistic Sorting and Information Theoretic Complexity"
53 * SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms),
54 * pp 467-474, Austin, Texas, 25-27 January 1993.
56 * The integration to Perl is by John P. Linderman <jpl.jpl@gmail.com>.
58 * The code can be distributed under the same terms as Perl itself.
63 typedef char * aptr; /* pointer for arithmetic on sizes */
64 typedef SV * gptr; /* pointers in our lists */
66 /* Binary merge internal sort, with a few special mods
67 ** for the special perl environment it now finds itself in.
69 ** Things that were once options have been hotwired
70 ** to values suitable for this use. In particular, we'll always
71 ** initialize looking for natural runs, we'll always produce stable
72 ** output, and we'll always do Peter McIlroy's binary merge.
75 /* Pointer types for arithmetic and storage and convenience casts */
77 #define APTR(P) ((aptr)(P))
78 #define GPTP(P) ((gptr *)(P))
79 #define GPPP(P) ((gptr **)(P))
82 /* byte offset from pointer P to (larger) pointer Q */
83 #define BYTEOFF(P, Q) (APTR(Q) - APTR(P))
85 #define PSIZE sizeof(gptr)
87 /* If PSIZE is power of 2, make PSHIFT that power, if that helps */
90 #define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT))
91 #define PNBYTE(N) ((N) << (PSHIFT))
92 #define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N)))
94 /* Leave optimization to compiler */
95 #define PNELEM(P, Q) (GPTP(Q) - GPTP(P))
96 #define PNBYTE(N) ((N) * (PSIZE))
97 #define PINDEX(P, N) (GPTP(P) + (N))
100 /* Pointer into other corresponding to pointer into this */
101 #define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P))
103 #define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim)
106 /* Runs are identified by a pointer in the auxiliary list.
107 ** The pointer is at the start of the list,
108 ** and it points to the start of the next list.
109 ** NEXT is used as an lvalue, too.
112 #define NEXT(P) (*GPPP(P))
115 /* PTHRESH is the minimum number of pairs with the same sense to justify
116 ** checking for a run and extending it. Note that PTHRESH counts PAIRS,
117 ** not just elements, so PTHRESH == 8 means a run of 16.
122 /* RTHRESH is the number of elements in a run that must compare low
123 ** to the low element from the opposing run before we justify
124 ** doing a binary rampup instead of single stepping.
125 ** In random input, N in a row low should only happen with
126 ** probability 2^(1-N), so we can risk that we are dealing
127 ** with orderly input without paying much when we aren't.
134 ** Overview of algorithm and variables.
135 ** The array of elements at list1 will be organized into runs of length 2,
136 ** or runs of length >= 2 * PTHRESH. We only try to form long runs when
137 ** PTHRESH adjacent pairs compare in the same way, suggesting overall order.
139 ** Unless otherwise specified, pair pointers address the first of two elements.
141 ** b and b+1 are a pair that compare with sense "sense".
142 ** b is the "bottom" of adjacent pairs that might form a longer run.
144 ** p2 parallels b in the list2 array, where runs are defined by
147 ** t represents the "top" of the adjacent pairs that might extend
148 ** the run beginning at b. Usually, t addresses a pair
149 ** that compares with opposite sense from (b,b+1).
150 ** However, it may also address a singleton element at the end of list1,
151 ** or it may be equal to "last", the first element beyond list1.
153 ** r addresses the Nth pair following b. If this would be beyond t,
154 ** we back it off to t. Only when r is less than t do we consider the
155 ** run long enough to consider checking.
157 ** q addresses a pair such that the pairs at b through q already form a run.
158 ** Often, q will equal b, indicating we only are sure of the pair itself.
159 ** However, a search on the previous cycle may have revealed a longer run,
160 ** so q may be greater than b.
162 ** p is used to work back from a candidate r, trying to reach q,
163 ** which would mean b through r would be a run. If we discover such a run,
164 ** we start q at r and try to push it further towards t.
165 ** If b through r is NOT a run, we detect the wrong order at (p-1,p).
166 ** In any event, after the check (if any), we have two main cases.
168 ** 1) Short run. b <= q < p <= r <= t.
169 ** b through q is a run (perhaps trivial)
170 ** q through p are uninteresting pairs
171 ** p through r is a run
173 ** 2) Long run. b < r <= q < t.
174 ** b through q is a run (of length >= 2 * PTHRESH)
176 ** Note that degenerate cases are not only possible, but likely.
177 ** For example, if the pair following b compares with opposite sense,
178 ** then b == q < p == r == t.
183 dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp)
186 gptr *b, *p, *q, *t, *p2;
191 last = PINDEX(b, nmemb);
192 sense = (cmp(aTHX_ *b, *(b+1)) > 0);
193 for (p2 = list2; b < last; ) {
194 /* We just started, or just reversed sense.
195 ** Set t at end of pairs with the prevailing sense.
197 for (p = b+2, t = p; ++p < last; t = ++p) {
198 if ((cmp(aTHX_ *t, *p) > 0) != sense) break;
201 /* Having laid out the playing field, look for long runs */
203 p = r = b + (2 * PTHRESH);
204 if (r >= t) p = r = t; /* too short to care about */
206 while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) &&
209 /* b through r is a (long) run.
210 ** Extend it as far as possible.
213 while (((p += 2) < t) &&
214 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p;
215 r = p = q + 2; /* no simple pairs, no after-run */
218 if (q > b) { /* run of greater than 2 at b */
222 /* pick up singleton, if possible */
225 ((cmp(aTHX_ *(p-1), *p) > 0) == sense))
226 savep = r = p = q = last;
227 p2 = NEXT(p2) = p2 + (p - b); ++runs;
236 while (q < p) { /* simple pairs */
237 p2 = NEXT(p2) = p2 + 2; ++runs;
244 if (((b = p) == t) && ((t+1) == last)) {
245 NEXT(p2) = p2 + 1; ++runs;
256 /* The original merge sort, in use since 5.7, was as fast as, or faster than,
257 * qsort on many platforms, but slower than qsort, conspicuously so,
258 * on others. The most likely explanation was platform-specific
259 * differences in cache sizes and relative speeds.
261 * The quicksort divide-and-conquer algorithm guarantees that, as the
262 * problem is subdivided into smaller and smaller parts, the parts
263 * fit into smaller (and faster) caches. So it doesn't matter how
264 * many levels of cache exist, quicksort will "find" them, and,
265 * as long as smaller is faster, take advantage of them.
267 * By contrast, consider how the original mergesort algorithm worked.
268 * Suppose we have five runs (each typically of length 2 after dynprep).
277 * Adjacent pairs are merged in "grand sweeps" through the input.
278 * This means, on pass 1, the records in runs 1 and 2 aren't revisited until
279 * runs 3 and 4 are merged and the runs from run 5 have been copied.
280 * The only cache that matters is one large enough to hold *all* the input.
281 * On some platforms, this may be many times slower than smaller caches.
283 * The following pseudo-code uses the same basic merge algorithm,
284 * but in a divide-and-conquer way.
286 * # merge $runs runs at offset $offset of list $list1 into $list2.
287 * # all unmerged runs ($runs == 1) originate in list $base.
289 * my ($offset, $runs, $base, $list1, $list2) = @_;
292 * if ($list1 is $base) copy run to $list2
293 * return offset of end of list (or copy)
295 * $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1)
296 * mgsort2($off2, $runs/2, $base, $list2, $list1)
297 * merge the adjacent runs at $offset of $list1 into $list2
298 * return the offset of the end of the merged runs
301 * mgsort2(0, $runs, $base, $aux, $base);
303 * For our 5 runs, the tree of calls looks like
312 * and the corresponding activity looks like
314 * copy runs 1 and 2 from base to aux
315 * merge runs 1 and 2 from aux to base
316 * (run 3 is where it belongs, no copy needed)
317 * merge runs 12 and 3 from base to aux
318 * (runs 4 and 5 are where they belong, no copy needed)
319 * merge runs 4 and 5 from base to aux
320 * merge runs 123 and 45 from aux to base
322 * Note that we merge runs 1 and 2 immediately after copying them,
323 * while they are still likely to be in fast cache. Similarly,
324 * run 3 is merged with run 12 while it still may be lingering in cache.
325 * This implementation should therefore enjoy much of the cache-friendly
326 * behavior that quicksort does. In addition, it does less copying
327 * than the original mergesort implementation (only runs 1 and 2 are copied)
328 * and the "balancing" of merges is better (merged runs comprise more nearly
329 * equal numbers of original runs).
331 * The actual cache-friendly implementation will use a pseudo-stack
332 * to avoid recursion, and will unroll processing of runs of length 2,
333 * but it is otherwise similar to the recursive implementation.
337 IV offset; /* offset of 1st of 2 runs at this level */
338 IV runs; /* how many runs must be combined into 1 */
339 } off_runs; /* pseudo-stack element */
343 cmp_desc(pTHX_ gptr const a, gptr const b)
345 return -PL_sort_RealCmp(aTHX_ a, b);
349 =head1 SV Manipulation Functions
351 =for apidoc sortsv_flags
353 In-place sort an array of SV pointers with the given comparison routine,
354 with various SORTf_* flag options.
359 Perl_sortsv_flags(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
363 gptr *f1, *f2, *t, *b, *p;
367 gptr small[SMALLSORT];
369 off_runs stack[60], *stackp;
370 SVCOMPARE_t savecmp = NULL;
372 PERL_ARGS_ASSERT_SORTSV_FLAGS;
373 if (nmemb <= 1) return; /* sorted trivially */
375 if ((flags & SORTf_DESC) != 0) {
376 savecmp = PL_sort_RealCmp; /* Save current comparison routine, if any */
377 PL_sort_RealCmp = cmp; /* Put comparison routine where cmp_desc can find it */
381 if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */
382 else { Newx(aux,nmemb,gptr); } /* allocate auxiliary array */
385 stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp);
386 stackp->offset = offset = 0;
387 which[0] = which[2] = base;
390 /* On levels where both runs have be constructed (stackp->runs == 0),
391 * merge them, and note the offset of their end, in case the offset
392 * is needed at the next level up. Hop up a level, and,
393 * as long as stackp->runs is 0, keep merging.
395 IV runs = stackp->runs;
399 list1 = which[iwhich]; /* area where runs are now */
400 list2 = which[++iwhich]; /* area for merged runs */
403 offset = stackp->offset;
404 f1 = p1 = list1 + offset; /* start of first run */
405 p = tp2 = list2 + offset; /* where merged run will go */
406 t = NEXT(p); /* where first run ends */
407 f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */
408 t = NEXT(t); /* where second runs ends */
409 l2 = POTHER(t, list2, list1); /* ... on the other side */
410 offset = PNELEM(list2, t);
411 while (f1 < l1 && f2 < l2) {
412 /* If head 1 is larger than head 2, find ALL the elements
413 ** in list 2 strictly less than head1, write them all,
414 ** then head 1. Then compare the new heads, and repeat,
415 ** until one or both lists are exhausted.
417 ** In all comparisons (after establishing
418 ** which head to merge) the item to merge
419 ** (at pointer q) is the first operand of
420 ** the comparison. When we want to know
421 ** if "q is strictly less than the other",
424 ** because stability demands that we treat equality
425 ** as high when q comes from l2, and as low when
426 ** q was from l1. So we ask the question by doing
427 ** cmp(q, other) <= sense
428 ** and make sense == 0 when equality should look low,
429 ** and -1 when equality should look high.
433 if (cmp(aTHX_ *f1, *f2) <= 0) {
434 q = f2; b = f1; t = l1;
437 q = f1; b = f2; t = l2;
444 ** Leave t at something strictly
445 ** greater than q (or at the end of the list),
446 ** and b at something strictly less than q.
448 for (i = 1, run = 0 ;;) {
449 if ((p = PINDEX(b, i)) >= t) {
451 if (((p = PINDEX(t, -1)) > b) &&
452 (cmp(aTHX_ *q, *p) <= sense))
456 } else if (cmp(aTHX_ *q, *p) <= sense) {
460 if (++run >= RTHRESH) i += i;
464 /* q is known to follow b and must be inserted before t.
465 ** Increment b, so the range of possibilities is [b,t).
466 ** Round binary split down, to favor early appearance.
467 ** Adjust b and t until q belongs just before t.
472 p = PINDEX(b, (PNELEM(b, t) - 1) / 2);
473 if (cmp(aTHX_ *q, *p) <= sense) {
479 /* Copy all the strictly low elements */
482 FROMTOUPTO(f2, tp2, t);
485 FROMTOUPTO(f1, tp2, t);
491 /* Run out remaining list */
493 if (f2 < l2) FROMTOUPTO(f2, tp2, l2);
494 } else FROMTOUPTO(f1, tp2, l1);
495 p1 = NEXT(p1) = POTHER(tp2, list2, list1);
497 if (--level == 0) goto done;
499 t = list1; list1 = list2; list2 = t; /* swap lists */
500 } while ((runs = stackp->runs) == 0);
504 stackp->runs = 0; /* current run will finish level */
505 /* While there are more than 2 runs remaining,
506 * turn them into exactly 2 runs (at the "other" level),
507 * each made up of approximately half the runs.
508 * Stack the second half for later processing,
509 * and set about producing the first half now.
514 stackp->offset = offset;
515 runs -= stackp->runs = runs / 2;
517 /* We must construct a single run from 1 or 2 runs.
518 * All the original runs are in which[0] == base.
519 * The run we construct must end up in which[level&1].
523 /* Constructing a single run from a single run.
524 * If it's where it belongs already, there's nothing to do.
525 * Otherwise, copy it to where it belongs.
526 * A run of 1 is either a singleton at level 0,
527 * or the second half of a split 3. In neither event
528 * is it necessary to set offset. It will be set by the merge
529 * that immediately follows.
531 if (iwhich) { /* Belongs in aux, currently in base */
532 f1 = b = PINDEX(base, offset); /* where list starts */
533 f2 = PINDEX(aux, offset); /* where list goes */
534 t = NEXT(f2); /* where list will end */
535 offset = PNELEM(aux, t); /* offset thereof */
536 t = PINDEX(base, offset); /* where it currently ends */
537 FROMTOUPTO(f1, f2, t); /* copy */
538 NEXT(b) = t; /* set up parallel pointer */
539 } else if (level == 0) goto done; /* single run at level 0 */
541 /* Constructing a single run from two runs.
542 * The merge code at the top will do that.
543 * We need only make sure the two runs are in the "other" array,
544 * so they'll end up in the correct array after the merge.
548 stackp->offset = offset;
549 stackp->runs = 0; /* take care of both runs, trigger merge */
550 if (!iwhich) { /* Merged runs belong in aux, copy 1st */
551 f1 = b = PINDEX(base, offset); /* where first run starts */
552 f2 = PINDEX(aux, offset); /* where it will be copied */
553 t = NEXT(f2); /* where first run will end */
554 offset = PNELEM(aux, t); /* offset thereof */
555 p = PINDEX(base, offset); /* end of first run */
556 t = NEXT(t); /* where second run will end */
557 t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */
558 FROMTOUPTO(f1, f2, t); /* copy both runs */
559 NEXT(b) = p; /* paralleled pointer for 1st */
560 NEXT(p) = t; /* ... and for second */
565 if (aux != small) Safefree(aux); /* free iff allocated */
566 if (savecmp != NULL) {
567 PL_sort_RealCmp = savecmp; /* Restore current comparison routine, if any */
573 * The quicksort implementation was derived from source code contributed
576 * NOTE: this code was derived from Tom Horsley's qsort replacement
577 * and should not be confused with the original code.
580 /* Copyright (C) Tom Horsley, 1997. All rights reserved.
582 Permission granted to distribute under the same terms as perl which are
585 This program is free software; you can redistribute it and/or modify
586 it under the terms of either:
588 a) the GNU General Public License as published by the Free
589 Software Foundation; either version 1, or (at your option) any
592 b) the "Artistic License" which comes with this Kit.
594 Details on the perl license can be found in the perl source code which
595 may be located via the www.perl.com web page.
597 This is the most wonderfulest possible qsort I can come up with (and
598 still be mostly portable) My (limited) tests indicate it consistently
599 does about 20% fewer calls to compare than does the qsort in the Visual
600 C++ library, other vendors may vary.
602 Some of the ideas in here can be found in "Algorithms" by Sedgewick,
603 others I invented myself (or more likely re-invented since they seemed
604 pretty obvious once I watched the algorithm operate for a while).
606 Most of this code was written while watching the Marlins sweep the Giants
607 in the 1997 National League Playoffs - no Braves fans allowed to use this
608 code (just kidding :-).
610 I realize that if I wanted to be true to the perl tradition, the only
611 comment in this file would be something like:
613 ...they shuffled back towards the rear of the line. 'No, not at the
614 rear!' the slave-driver shouted. 'Three files up. And stay there...
616 However, I really needed to violate that tradition just so I could keep
617 track of what happens myself, not to mention some poor fool trying to
618 understand this years from now :-).
621 /* ********************************************************** Configuration */
623 #ifndef QSORT_ORDER_GUESS
624 #define QSORT_ORDER_GUESS 2 /* Select doubling version of the netBSD trick */
627 /* QSORT_MAX_STACK is the largest number of partitions that can be stacked up for
628 future processing - a good max upper bound is log base 2 of memory size
629 (32 on 32 bit machines, 64 on 64 bit machines, etc). In reality can
630 safely be smaller than that since the program is taking up some space and
631 most operating systems only let you grab some subset of contiguous
632 memory (not to mention that you are normally sorting data larger than
633 1 byte element size :-).
635 #ifndef QSORT_MAX_STACK
636 #define QSORT_MAX_STACK 32
639 /* QSORT_BREAK_EVEN is the size of the largest partition we should insertion sort.
640 Anything bigger and we use qsort. If you make this too small, the qsort
641 will probably break (or become less efficient), because it doesn't expect
642 the middle element of a partition to be the same as the right or left -
643 you have been warned).
645 #ifndef QSORT_BREAK_EVEN
646 #define QSORT_BREAK_EVEN 6
649 /* QSORT_PLAY_SAFE is the size of the largest partition we're willing
650 to go quadratic on. We innoculate larger partitions against
651 quadratic behavior by shuffling them before sorting. This is not
652 an absolute guarantee of non-quadratic behavior, but it would take
653 staggeringly bad luck to pick extreme elements as the pivot
654 from randomized data.
656 #ifndef QSORT_PLAY_SAFE
657 #define QSORT_PLAY_SAFE 255
660 /* ************************************************************* Data Types */
662 /* hold left and right index values of a partition waiting to be sorted (the
663 partition includes both left and right - right is NOT one past the end or
666 struct partition_stack_entry {
669 #ifdef QSORT_ORDER_GUESS
670 int qsort_break_even;
674 /* ******************************************************* Shorthand Macros */
676 /* Note that these macros will be used from inside the qsort function where
677 we happen to know that the variable 'elt_size' contains the size of an
678 array element and the variable 'temp' points to enough space to hold a
679 temp element and the variable 'array' points to the array being sorted
680 and 'compare' is the pointer to the compare routine.
682 Also note that there are very many highly architecture specific ways
683 these might be sped up, but this is simply the most generally portable
684 code I could think of.
687 /* Return < 0 == 0 or > 0 as the value of elt1 is < elt2, == elt2, > elt2
689 #define qsort_cmp(elt1, elt2) \
690 ((*compare)(aTHX_ array[elt1], array[elt2]))
692 #ifdef QSORT_ORDER_GUESS
693 #define QSORT_NOTICE_SWAP swapped++;
695 #define QSORT_NOTICE_SWAP
698 /* swaps contents of array elements elt1, elt2.
700 #define qsort_swap(elt1, elt2) \
703 temp = array[elt1]; \
704 array[elt1] = array[elt2]; \
705 array[elt2] = temp; \
708 /* rotate contents of elt1, elt2, elt3 such that elt1 gets elt2, elt2 gets
709 elt3 and elt3 gets elt1.
711 #define qsort_rotate(elt1, elt2, elt3) \
714 temp = array[elt1]; \
715 array[elt1] = array[elt2]; \
716 array[elt2] = array[elt3]; \
717 array[elt3] = temp; \
720 /* ************************************************************ Debug stuff */
727 return; /* good place to set a breakpoint */
730 #define qsort_assert(t) (void)( (t) || (break_here(), 0) )
737 int (*compare)(const void * elt1, const void * elt2),
738 int pc_left, int pc_right, int u_left, int u_right)
742 qsort_assert(pc_left <= pc_right);
743 qsort_assert(u_right < pc_left);
744 qsort_assert(pc_right < u_left);
745 for (i = u_right + 1; i < pc_left; ++i) {
746 qsort_assert(qsort_cmp(i, pc_left) < 0);
748 for (i = pc_left; i < pc_right; ++i) {
749 qsort_assert(qsort_cmp(i, pc_right) == 0);
751 for (i = pc_right + 1; i < u_left; ++i) {
752 qsort_assert(qsort_cmp(pc_right, i) < 0);
756 #define qsort_all_asserts(PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) \
757 doqsort_all_asserts(array, num_elts, elt_size, compare, \
758 PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT)
762 #define qsort_assert(t) ((void)0)
764 #define qsort_all_asserts(PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) ((void)0)
769 =head1 Array Manipulation Functions
773 In-place sort an array of SV pointers with the given comparison routine.
775 Currently this always uses mergesort. See C<L</sortsv_flags>> for a more
782 Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp)
784 PERL_ARGS_ASSERT_SORTSV;
786 sortsv_flags(array, nmemb, cmp, 0);
789 #define SvNSIOK(sv) ((SvFLAGS(sv) & SVf_NOK) || ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK))
790 #define SvSIOK(sv) ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK)
791 #define SvNSIV(sv) ( SvNOK(sv) ? SvNVX(sv) : ( SvSIOK(sv) ? SvIVX(sv) : sv_2nv(sv) ) )
795 dSP; dMARK; dORIGMARK;
796 SV **p1 = ORIGMARK+1, **p2;
802 OP* const nextop = PL_op->op_next;
804 bool hasargs = FALSE;
807 const U8 priv = PL_op->op_private;
808 const U8 flags = PL_op->op_flags;
810 void (*sortsvp)(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
814 if ((priv & OPpSORT_DESCEND) != 0)
815 sort_flags |= SORTf_DESC;
816 if ((priv & OPpSORT_STABLE) != 0)
817 sort_flags |= SORTf_STABLE;
818 if ((priv & OPpSORT_UNSTABLE) != 0)
819 sort_flags |= SORTf_UNSTABLE;
821 if (gimme != G_ARRAY) {
828 SAVEVPTR(PL_sortcop);
829 if (flags & OPf_STACKED) {
830 if (flags & OPf_SPECIAL) {
831 OP *nullop = OpSIBLING(cLISTOP->op_first); /* pass pushmark */
832 assert(nullop->op_type == OP_NULL);
833 PL_sortcop = nullop->op_next;
838 cv = sv_2cv(*++MARK, &stash, &gv, GV_ADD);
840 if (cv && SvPOK(cv)) {
841 const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv));
842 if (proto && strEQ(proto, "$$")) {
846 if (cv && CvISXSUB(cv) && CvXSUB(cv)) {
849 else if (!(cv && CvROOT(cv))) {
853 else if (!CvANON(cv) && (gv = CvGV(cv))) {
854 if (cv != GvCV(gv)) cv = GvCV(gv);
857 autogv = gv_autoload_pvn(
858 GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv),
859 GvNAMEUTF8(gv) ? SVf_UTF8 : 0
866 SV *tmpstr = sv_newmortal();
867 gv_efullname3(tmpstr, gv, NULL);
868 DIE(aTHX_ "Undefined sort subroutine \"%" SVf "\" called",
873 DIE(aTHX_ "Undefined subroutine in sort");
878 PL_sortcop = (OP*)cv;
880 PL_sortcop = CvSTART(cv);
887 /* optimiser converts "@a = sort @a" to "sort \@a". In this case,
888 * push (@a) onto stack, then assign result back to @a at the end of
890 if (priv & OPpSORT_INPLACE) {
891 assert( MARK+1 == SP && *SP && SvTYPE(*SP) == SVt_PVAV);
892 (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */
893 av = MUTABLE_AV((*SP));
895 Perl_croak_no_modify();
896 max = AvFILL(av) + 1;
899 for (i=0; i < max; i++) {
900 SV **svp = av_fetch(av, i, FALSE);
901 *SP++ = (svp) ? *svp : NULL;
905 SV **svp = AvARRAY(av);
906 assert(svp || max == 0);
907 for (i = 0; i < max; i++)
911 p1 = p2 = SP - (max-1);
918 /* shuffle stack down, removing optional initial cv (p1!=p2), plus
919 * any nulls; also stringify or converting to integer or number as
920 * required any args */
921 copytmps = cBOOL(PL_sortcop);
922 for (i=max; i > 0 ; i--) {
923 if ((*p1 = *p2++)) { /* Weed out nulls. */
924 if (copytmps && SvPADTMP(*p1)) {
925 *p1 = sv_mortalcopy(*p1);
929 if (priv & OPpSORT_NUMERIC) {
930 if (priv & OPpSORT_INTEGER) {
932 (void)sv_2iv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
936 (void)sv_2nv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
937 if (all_SIVs && !SvSIOK(*p1))
943 (void)sv_2pv_flags(*p1, 0,
944 SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD);
958 const bool oldcatch = CATCH_GET;
959 I32 old_savestack_ix = PL_savestack_ix;
964 PUSHSTACKi(PERLSI_SORT);
965 if (!hasargs && !is_xsub) {
966 SAVEGENERICSV(PL_firstgv);
967 SAVEGENERICSV(PL_secondgv);
968 PL_firstgv = MUTABLE_GV(SvREFCNT_inc(
969 gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV)
971 PL_secondgv = MUTABLE_GV(SvREFCNT_inc(
972 gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV)
974 /* make sure the GP isn't removed out from under us for
976 save_gp(PL_firstgv, 0);
977 save_gp(PL_secondgv, 0);
978 /* we don't want modifications localized */
979 GvINTRO_off(PL_firstgv);
980 GvINTRO_off(PL_secondgv);
981 SAVEGENERICSV(GvSV(PL_firstgv));
982 SvREFCNT_inc(GvSV(PL_firstgv));
983 SAVEGENERICSV(GvSV(PL_secondgv));
984 SvREFCNT_inc(GvSV(PL_secondgv));
988 cx = cx_pushblock(CXt_NULL, gimme, PL_stack_base, old_savestack_ix);
989 if (!(flags & OPf_SPECIAL)) {
990 cx->cx_type = CXt_SUB|CXp_MULTICALL;
991 cx_pushsub(cx, cv, NULL, hasargs);
993 PADLIST * const padlist = CvPADLIST(cv);
995 if (++CvDEPTH(cv) >= 2)
996 pad_push(padlist, CvDEPTH(cv));
997 PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv));
1000 /* This is mostly copied from pp_entersub */
1001 AV * const av = MUTABLE_AV(PAD_SVl(0));
1003 cx->blk_sub.savearray = GvAV(PL_defgv);
1004 GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av));
1011 sortsvp(aTHX_ start, max,
1012 (is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv),
1015 /* Reset cx, in case the context stack has been reallocated. */
1018 PL_stack_sp = PL_stack_base + cx->blk_oldsp;
1021 if (!(flags & OPf_SPECIAL)) {
1022 assert(CxTYPE(cx) == CXt_SUB);
1026 assert(CxTYPE(cx) == CXt_NULL);
1027 /* there isn't a POPNULL ! */
1032 CATCH_SET(oldcatch);
1035 MEXTEND(SP, 20); /* Can't afford stack realloc on signal. */
1037 sortsvp(aTHX_ start, max,
1038 (priv & OPpSORT_NUMERIC)
1039 ? ( ( ( priv & OPpSORT_INTEGER) || all_SIVs)
1040 ? ( overloading ? S_amagic_i_ncmp : S_sv_i_ncmp)
1041 : ( overloading ? S_amagic_ncmp : S_sv_ncmp ) )
1043 #ifdef USE_LOCALE_COLLATE
1044 IN_LC_RUNTIME(LC_COLLATE)
1046 ? (SVCOMPARE_t)S_amagic_cmp_locale
1047 : (SVCOMPARE_t)sv_cmp_locale_static)
1050 ( overloading ? (SVCOMPARE_t)S_amagic_cmp : (SVCOMPARE_t)sv_cmp_static)),
1053 if ((priv & OPpSORT_REVERSE) != 0) {
1054 SV **q = start+max-1;
1056 SV * const tmp = *start;
1064 /* copy back result to the array */
1065 SV** const base = MARK+1;
1066 if (SvMAGICAL(av)) {
1067 for (i = 0; i < max; i++)
1068 base[i] = newSVsv(base[i]);
1071 for (i=0; i < max; i++) {
1072 SV * const sv = base[i];
1073 SV ** const didstore = av_store(av, i, sv);
1081 /* the elements of av are likely to be the same as the
1082 * (non-refcounted) elements on the stack, just in a different
1083 * order. However, its possible that someone's messed with av
1084 * in the meantime. So bump and unbump the relevant refcounts
1087 for (i = 0; i < max; i++) {
1090 if (SvREFCNT(sv) > 1)
1091 base[i] = newSVsv(sv);
1093 SvREFCNT_inc_simple_void_NN(sv);
1098 Copy(base, AvARRAY(av), max, SV*);
1100 AvFILLp(av) = max - 1;
1106 PL_stack_sp = ORIGMARK + max;
1111 S_sortcv(pTHX_ SV *const a, SV *const b)
1113 const I32 oldsaveix = PL_savestack_ix;
1115 PMOP * const pm = PL_curpm;
1116 COP * const cop = PL_curcop;
1119 PERL_ARGS_ASSERT_SORTCV;
1121 olda = GvSV(PL_firstgv);
1122 GvSV(PL_firstgv) = SvREFCNT_inc_simple_NN(a);
1124 oldb = GvSV(PL_secondgv);
1125 GvSV(PL_secondgv) = SvREFCNT_inc_simple_NN(b);
1127 PL_stack_sp = PL_stack_base;
1131 /* entry zero of a stack is always PL_sv_undef, which
1132 * simplifies converting a '()' return into undef in scalar context */
1133 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1134 result = SvIV(*PL_stack_sp);
1136 LEAVE_SCOPE(oldsaveix);
1142 S_sortcv_stacked(pTHX_ SV *const a, SV *const b)
1144 const I32 oldsaveix = PL_savestack_ix;
1146 AV * const av = GvAV(PL_defgv);
1147 PMOP * const pm = PL_curpm;
1148 COP * const cop = PL_curcop;
1150 PERL_ARGS_ASSERT_SORTCV_STACKED;
1157 if (AvMAX(av) < 1) {
1158 SV **ary = AvALLOC(av);
1159 if (AvARRAY(av) != ary) {
1160 AvMAX(av) += AvARRAY(av) - AvALLOC(av);
1163 if (AvMAX(av) < 1) {
1174 PL_stack_sp = PL_stack_base;
1178 /* entry zero of a stack is always PL_sv_undef, which
1179 * simplifies converting a '()' return into undef in scalar context */
1180 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1181 result = SvIV(*PL_stack_sp);
1183 LEAVE_SCOPE(oldsaveix);
1189 S_sortcv_xsub(pTHX_ SV *const a, SV *const b)
1192 const I32 oldsaveix = PL_savestack_ix;
1193 CV * const cv=MUTABLE_CV(PL_sortcop);
1195 PMOP * const pm = PL_curpm;
1197 PERL_ARGS_ASSERT_SORTCV_XSUB;
1205 (void)(*CvXSUB(cv))(aTHX_ cv);
1206 /* entry zero of a stack is always PL_sv_undef, which
1207 * simplifies converting a '()' return into undef in scalar context */
1208 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1209 result = SvIV(*PL_stack_sp);
1211 LEAVE_SCOPE(oldsaveix);
1218 S_sv_ncmp(pTHX_ SV *const a, SV *const b)
1220 I32 cmp = do_ncmp(a, b);
1222 PERL_ARGS_ASSERT_SV_NCMP;
1225 if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL);
1233 S_sv_i_ncmp(pTHX_ SV *const a, SV *const b)
1235 const IV iv1 = SvIV(a);
1236 const IV iv2 = SvIV(b);
1238 PERL_ARGS_ASSERT_SV_I_NCMP;
1240 return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0;
1243 #define tryCALL_AMAGICbin(left,right,meth) \
1244 (SvAMAGIC(left)||SvAMAGIC(right)) \
1245 ? amagic_call(left, right, meth, 0) \
1248 #define SORT_NORMAL_RETURN_VALUE(val) (((val) > 0) ? 1 : ((val) ? -1 : 0))
1251 S_amagic_ncmp(pTHX_ SV *const a, SV *const b)
1253 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg);
1255 PERL_ARGS_ASSERT_AMAGIC_NCMP;
1259 const I32 i = SvIVX(tmpsv);
1260 return SORT_NORMAL_RETURN_VALUE(i);
1263 const NV d = SvNV(tmpsv);
1264 return SORT_NORMAL_RETURN_VALUE(d);
1267 return S_sv_ncmp(aTHX_ a, b);
1271 S_amagic_i_ncmp(pTHX_ SV *const a, SV *const b)
1273 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg);
1275 PERL_ARGS_ASSERT_AMAGIC_I_NCMP;
1279 const I32 i = SvIVX(tmpsv);
1280 return SORT_NORMAL_RETURN_VALUE(i);
1283 const NV d = SvNV(tmpsv);
1284 return SORT_NORMAL_RETURN_VALUE(d);
1287 return S_sv_i_ncmp(aTHX_ a, b);
1291 S_amagic_cmp(pTHX_ SV *const str1, SV *const str2)
1293 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg);
1295 PERL_ARGS_ASSERT_AMAGIC_CMP;
1299 const I32 i = SvIVX(tmpsv);
1300 return SORT_NORMAL_RETURN_VALUE(i);
1303 const NV d = SvNV(tmpsv);
1304 return SORT_NORMAL_RETURN_VALUE(d);
1307 return sv_cmp(str1, str2);
1310 #ifdef USE_LOCALE_COLLATE
1313 S_amagic_cmp_locale(pTHX_ SV *const str1, SV *const str2)
1315 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg);
1317 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE;
1321 const I32 i = SvIVX(tmpsv);
1322 return SORT_NORMAL_RETURN_VALUE(i);
1325 const NV d = SvNV(tmpsv);
1326 return SORT_NORMAL_RETURN_VALUE(d);
1329 return sv_cmp_locale(str1, str2);
1335 * ex: set ts=8 sts=4 sw=4 et: