This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5
Puut... The TODO... Beck!
[perl5.git] / pp_sort.c
CommitLineData
84d4ea48
JH
1/* pp_sort.c
2 *
3 * Copyright (c) 1991-2001, Larry Wall
4 *
5 * You may distribute under the terms of either the GNU General Public
6 * License or the Artistic License, as specified in the README file.
7 *
8 */
9
10/*
11 * ...they shuffled back towards the rear of the line. 'No, not at the
12 * rear!' the slave-driver shouted. 'Three files up. And stay there...
13 */
14
15#include "EXTERN.h"
16#define PERL_IN_PP_SORT_C
17#include "perl.h"
18
19static I32 sortcv(pTHX_ SV *a, SV *b);
20static I32 sortcv_stacked(pTHX_ SV *a, SV *b);
21static I32 sortcv_xsub(pTHX_ SV *a, SV *b);
22static I32 sv_ncmp(pTHX_ SV *a, SV *b);
23static I32 sv_i_ncmp(pTHX_ SV *a, SV *b);
24static I32 amagic_ncmp(pTHX_ SV *a, SV *b);
25static I32 amagic_i_ncmp(pTHX_ SV *a, SV *b);
26static I32 amagic_cmp(pTHX_ SV *a, SV *b);
27static I32 amagic_cmp_locale(pTHX_ SV *a, SV *b);
28
29#define sv_cmp_static Perl_sv_cmp
30#define sv_cmp_locale_static Perl_sv_cmp_locale
31
32#define SORTHINTS(hintsvp) \
33 ((PL_hintgv && \
34 (hintsvp = hv_fetch(GvHV(PL_hintgv), "SORT", 4, FALSE))) ? \
35 (I32)SvIV(*hintsvp) : 0)
36
c53fc8a6
JH
37#ifndef SMALLSORT
38#define SMALLSORT (200)
39#endif
40
84d4ea48
JH
41/*
42 * The mergesort implementation is by Peter M. Mcilroy <pmcilroy@lucent.com>.
43 *
44 * The original code was written in conjunction with BSD Computer Software
45 * Research Group at University of California, Berkeley.
46 *
47 * See also: "Optimistic Merge Sort" (SODA '92)
48 *
49 * The integration to Perl is by John P. Linderman <jpl@research.att.com>.
50 *
51 * The code can be distributed under the same terms as Perl itself.
52 *
53 */
54
55#ifdef TESTHARNESS
56#include <sys/types.h>
57typedef void SV;
58#define pTHX_
59#define STATIC
60#define New(ID,VAR,N,TYPE) VAR=(TYPE *)malloc((N)*sizeof(TYPE))
61#define Safefree(VAR) free(VAR)
62typedef int (*SVCOMPARE_t) (pTHX_ SV*, SV*);
63#endif /* TESTHARNESS */
64
65typedef char * aptr; /* pointer for arithmetic on sizes */
66typedef SV * gptr; /* pointers in our lists */
67
68/* Binary merge internal sort, with a few special mods
69** for the special perl environment it now finds itself in.
70**
71** Things that were once options have been hotwired
72** to values suitable for this use. In particular, we'll always
73** initialize looking for natural runs, we'll always produce stable
74** output, and we'll always do Peter McIlroy's binary merge.
75*/
76
77/* Pointer types for arithmetic and storage and convenience casts */
78
79#define APTR(P) ((aptr)(P))
80#define GPTP(P) ((gptr *)(P))
81#define GPPP(P) ((gptr **)(P))
82
83
84/* byte offset from pointer P to (larger) pointer Q */
85#define BYTEOFF(P, Q) (APTR(Q) - APTR(P))
86
87#define PSIZE sizeof(gptr)
88
89/* If PSIZE is power of 2, make PSHIFT that power, if that helps */
90
91#ifdef PSHIFT
92#define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT))
93#define PNBYTE(N) ((N) << (PSHIFT))
94#define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N)))
95#else
96/* Leave optimization to compiler */
97#define PNELEM(P, Q) (GPTP(Q) - GPTP(P))
98#define PNBYTE(N) ((N) * (PSIZE))
99#define PINDEX(P, N) (GPTP(P) + (N))
100#endif
101
102/* Pointer into other corresponding to pointer into this */
103#define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P))
104
105#define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim)
106
107
108/* Runs are identified by a pointer in the auxilliary list.
109** The pointer is at the start of the list,
110** and it points to the start of the next list.
111** NEXT is used as an lvalue, too.
112*/
113
114#define NEXT(P) (*GPPP(P))
115
116
117/* PTHRESH is the minimum number of pairs with the same sense to justify
118** checking for a run and extending it. Note that PTHRESH counts PAIRS,
119** not just elements, so PTHRESH == 8 means a run of 16.
120*/
121
122#define PTHRESH (8)
123
124/* RTHRESH is the number of elements in a run that must compare low
125** to the low element from the opposing run before we justify
126** doing a binary rampup instead of single stepping.
127** In random input, N in a row low should only happen with
128** probability 2^(1-N), so we can risk that we are dealing
129** with orderly input without paying much when we aren't.
130*/
131
132#define RTHRESH (6)
133
134
135/*
136** Overview of algorithm and variables.
137** The array of elements at list1 will be organized into runs of length 2,
138** or runs of length >= 2 * PTHRESH. We only try to form long runs when
139** PTHRESH adjacent pairs compare in the same way, suggesting overall order.
140**
141** Unless otherwise specified, pair pointers address the first of two elements.
142**
143** b and b+1 are a pair that compare with sense ``sense''.
144** b is the ``bottom'' of adjacent pairs that might form a longer run.
145**
146** p2 parallels b in the list2 array, where runs are defined by
147** a pointer chain.
148**
149** t represents the ``top'' of the adjacent pairs that might extend
150** the run beginning at b. Usually, t addresses a pair
151** that compares with opposite sense from (b,b+1).
152** However, it may also address a singleton element at the end of list1,
153** or it may be equal to ``last'', the first element beyond list1.
154**
155** r addresses the Nth pair following b. If this would be beyond t,
156** we back it off to t. Only when r is less than t do we consider the
157** run long enough to consider checking.
158**
159** q addresses a pair such that the pairs at b through q already form a run.
160** Often, q will equal b, indicating we only are sure of the pair itself.
161** However, a search on the previous cycle may have revealed a longer run,
162** so q may be greater than b.
163**
164** p is used to work back from a candidate r, trying to reach q,
165** which would mean b through r would be a run. If we discover such a run,
166** we start q at r and try to push it further towards t.
167** If b through r is NOT a run, we detect the wrong order at (p-1,p).
168** In any event, after the check (if any), we have two main cases.
169**
170** 1) Short run. b <= q < p <= r <= t.
171** b through q is a run (perhaps trivial)
172** q through p are uninteresting pairs
173** p through r is a run
174**
175** 2) Long run. b < r <= q < t.
176** b through q is a run (of length >= 2 * PTHRESH)
177**
178** Note that degenerate cases are not only possible, but likely.
179** For example, if the pair following b compares with opposite sense,
180** then b == q < p == r == t.
181*/
182
183
184static void
185dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, SVCOMPARE_t cmp)
186{
187 int sense;
188 register gptr *b, *p, *q, *t, *p2;
189 register gptr c, *last, *r;
190 gptr *savep;
191
192 b = list1;
193 last = PINDEX(b, nmemb);
194 sense = (cmp(aTHX_ *b, *(b+1)) > 0);
195 for (p2 = list2; b < last; ) {
196 /* We just started, or just reversed sense.
197 ** Set t at end of pairs with the prevailing sense.
198 */
199 for (p = b+2, t = p; ++p < last; t = ++p) {
200 if ((cmp(aTHX_ *t, *p) > 0) != sense) break;
201 }
202 q = b;
203 /* Having laid out the playing field, look for long runs */
204 do {
205 p = r = b + (2 * PTHRESH);
206 if (r >= t) p = r = t; /* too short to care about */
207 else {
208 while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) &&
209 ((p -= 2) > q));
210 if (p <= q) {
211 /* b through r is a (long) run.
212 ** Extend it as far as possible.
213 */
214 p = q = r;
215 while (((p += 2) < t) &&
216 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p;
217 r = p = q + 2; /* no simple pairs, no after-run */
218 }
219 }
220 if (q > b) { /* run of greater than 2 at b */
221 savep = p;
222 p = q += 2;
223 /* pick up singleton, if possible */
224 if ((p == t) &&
225 ((t + 1) == last) &&
226 ((cmp(aTHX_ *(p-1), *p) > 0) == sense))
227 savep = r = p = q = last;
228 p2 = NEXT(p2) = p2 + (p - b);
229 if (sense) while (b < --p) {
230 c = *b;
231 *b++ = *p;
232 *p = c;
233 }
234 p = savep;
235 }
236 while (q < p) { /* simple pairs */
237 p2 = NEXT(p2) = p2 + 2;
238 if (sense) {
239 c = *q++;
240 *(q-1) = *q;
241 *q++ = c;
242 } else q += 2;
243 }
244 if (((b = p) == t) && ((t+1) == last)) {
245 NEXT(p2) = p2 + 1;
246 b++;
247 }
248 q = r;
249 } while (b < t);
250 sense = !sense;
251 }
252 return;
253}
254
255
256/* Overview of bmerge variables:
257**
258** list1 and list2 address the main and auxiliary arrays.
259** They swap identities after each merge pass.
260** Base points to the original list1, so we can tell if
261** the pointers ended up where they belonged (or must be copied).
262**
263** When we are merging two lists, f1 and f2 are the next elements
264** on the respective lists. l1 and l2 mark the end of the lists.
265** tp2 is the current location in the merged list.
266**
267** p1 records where f1 started.
268** After the merge, a new descriptor is built there.
269**
270** p2 is a ``parallel'' pointer in (what starts as) descriptor space.
271** It is used to identify and delimit the runs.
272**
273** In the heat of determining where q, the greater of the f1/f2 elements,
274** belongs in the other list, b, t and p, represent bottom, top and probe
275** locations, respectively, in the other list.
276** They make convenient temporary pointers in other places.
277*/
278
279STATIC void
280S_mergesortsv(pTHX_ gptr *list1, size_t nmemb, SVCOMPARE_t cmp)
281{
282 int i, run;
283 int sense;
284 register gptr *f1, *f2, *t, *b, *p, *tp2, *l1, *l2, *q;
285 gptr *aux, *list2, *p2, *last;
286 gptr *base = list1;
287 gptr *p1;
c53fc8a6 288 gptr small[SMALLSORT];
84d4ea48
JH
289
290 if (nmemb <= 1) return; /* sorted trivially */
c53fc8a6
JH
291 if (nmemb <= SMALLSORT) list2 = small; /* use stack for aux array */
292 else { New(799,list2,nmemb,gptr); } /* allocate auxilliary array */
84d4ea48
JH
293 aux = list2;
294 dynprep(aTHX_ list1, list2, nmemb, cmp);
295 last = PINDEX(list2, nmemb);
296 while (NEXT(list2) != last) {
297 /* More than one run remains. Do some merging to reduce runs. */
298 l2 = p1 = list1;
299 for (tp2 = p2 = list2; p2 != last;) {
300 /* The new first run begins where the old second list ended.
301 ** Use the p2 ``parallel'' pointer to identify the end of the run.
302 */
303 f1 = l2;
304 t = NEXT(p2);
305 f2 = l1 = POTHER(t, list2, list1);
306 if (t != last) t = NEXT(t);
307 l2 = POTHER(t, list2, list1);
308 p2 = t;
309 while (f1 < l1 && f2 < l2) {
310 /* If head 1 is larger than head 2, find ALL the elements
311 ** in list 2 strictly less than head1, write them all,
312 ** then head 1. Then compare the new heads, and repeat,
313 ** until one or both lists are exhausted.
314 **
315 ** In all comparisons (after establishing
316 ** which head to merge) the item to merge
317 ** (at pointer q) is the first operand of
318 ** the comparison. When we want to know
319 ** if ``q is strictly less than the other'',
320 ** we can't just do
321 ** cmp(q, other) < 0
322 ** because stability demands that we treat equality
323 ** as high when q comes from l2, and as low when
324 ** q was from l1. So we ask the question by doing
325 ** cmp(q, other) <= sense
326 ** and make sense == 0 when equality should look low,
327 ** and -1 when equality should look high.
328 */
329
330
331 if (cmp(aTHX_ *f1, *f2) <= 0) {
332 q = f2; b = f1; t = l1;
333 sense = -1;
334 } else {
335 q = f1; b = f2; t = l2;
336 sense = 0;
337 }
338
339
340 /* ramp up
341 **
342 ** Leave t at something strictly
343 ** greater than q (or at the end of the list),
344 ** and b at something strictly less than q.
345 */
346 for (i = 1, run = 0 ;;) {
347 if ((p = PINDEX(b, i)) >= t) {
348 /* off the end */
349 if (((p = PINDEX(t, -1)) > b) &&
350 (cmp(aTHX_ *q, *p) <= sense))
351 t = p;
352 else b = p;
353 break;
354 } else if (cmp(aTHX_ *q, *p) <= sense) {
355 t = p;
356 break;
357 } else b = p;
358 if (++run >= RTHRESH) i += i;
359 }
360
361
362 /* q is known to follow b and must be inserted before t.
363 ** Increment b, so the range of possibilities is [b,t).
364 ** Round binary split down, to favor early appearance.
365 ** Adjust b and t until q belongs just before t.
366 */
367
368 b++;
369 while (b < t) {
370 p = PINDEX(b, (PNELEM(b, t) - 1) / 2);
371 if (cmp(aTHX_ *q, *p) <= sense) {
372 t = p;
373 } else b = p + 1;
374 }
375
376
377 /* Copy all the strictly low elements */
378
379 if (q == f1) {
380 FROMTOUPTO(f2, tp2, t);
381 *tp2++ = *f1++;
382 } else {
383 FROMTOUPTO(f1, tp2, t);
384 *tp2++ = *f2++;
385 }
386 }
387
388
389 /* Run out remaining list */
390 if (f1 == l1) {
391 if (f2 < l2) FROMTOUPTO(f2, tp2, l2);
392 } else FROMTOUPTO(f1, tp2, l1);
393 p1 = NEXT(p1) = POTHER(tp2, list2, list1);
394 }
395 t = list1;
396 list1 = list2;
397 list2 = t;
398 last = PINDEX(list2, nmemb);
399 }
400 if (base == list2) {
401 last = PINDEX(list1, nmemb);
402 FROMTOUPTO(list1, list2, last);
403 }
c53fc8a6 404 if (aux != small) Safefree(aux); /* free iff allocated */
84d4ea48
JH
405 return;
406}
407
408/*
409 * The quicksort implementation was derived from source code contributed
410 * by Tom Horsley.
411 *
412 * NOTE: this code was derived from Tom Horsley's qsort replacement
413 * and should not be confused with the original code.
414 */
415
416/* Copyright (C) Tom Horsley, 1997. All rights reserved.
417
418 Permission granted to distribute under the same terms as perl which are
419 (briefly):
420
421 This program is free software; you can redistribute it and/or modify
422 it under the terms of either:
423
424 a) the GNU General Public License as published by the Free
425 Software Foundation; either version 1, or (at your option) any
426 later version, or
427
428 b) the "Artistic License" which comes with this Kit.
429
430 Details on the perl license can be found in the perl source code which
431 may be located via the www.perl.com web page.
432
433 This is the most wonderfulest possible qsort I can come up with (and
434 still be mostly portable) My (limited) tests indicate it consistently
435 does about 20% fewer calls to compare than does the qsort in the Visual
436 C++ library, other vendors may vary.
437
438 Some of the ideas in here can be found in "Algorithms" by Sedgewick,
439 others I invented myself (or more likely re-invented since they seemed
440 pretty obvious once I watched the algorithm operate for a while).
441
442 Most of this code was written while watching the Marlins sweep the Giants
443 in the 1997 National League Playoffs - no Braves fans allowed to use this
444 code (just kidding :-).
445
446 I realize that if I wanted to be true to the perl tradition, the only
447 comment in this file would be something like:
448
449 ...they shuffled back towards the rear of the line. 'No, not at the
450 rear!' the slave-driver shouted. 'Three files up. And stay there...
451
452 However, I really needed to violate that tradition just so I could keep
453 track of what happens myself, not to mention some poor fool trying to
454 understand this years from now :-).
455*/
456
457/* ********************************************************** Configuration */
458
459#ifndef QSORT_ORDER_GUESS
460#define QSORT_ORDER_GUESS 2 /* Select doubling version of the netBSD trick */
461#endif
462
463/* QSORT_MAX_STACK is the largest number of partitions that can be stacked up for
464 future processing - a good max upper bound is log base 2 of memory size
465 (32 on 32 bit machines, 64 on 64 bit machines, etc). In reality can
466 safely be smaller than that since the program is taking up some space and
467 most operating systems only let you grab some subset of contiguous
468 memory (not to mention that you are normally sorting data larger than
469 1 byte element size :-).
470*/
471#ifndef QSORT_MAX_STACK
472#define QSORT_MAX_STACK 32
473#endif
474
475/* QSORT_BREAK_EVEN is the size of the largest partition we should insertion sort.
476 Anything bigger and we use qsort. If you make this too small, the qsort
477 will probably break (or become less efficient), because it doesn't expect
478 the middle element of a partition to be the same as the right or left -
479 you have been warned).
480*/
481#ifndef QSORT_BREAK_EVEN
482#define QSORT_BREAK_EVEN 6
483#endif
484
4eb872f6
JL
485/* QSORT_PLAY_SAFE is the size of the largest partition we're willing
486 to go quadratic on. We innoculate larger partitions against
487 quadratic behavior by shuffling them before sorting. This is not
488 an absolute guarantee of non-quadratic behavior, but it would take
489 staggeringly bad luck to pick extreme elements as the pivot
490 from randomized data.
491*/
492#ifndef QSORT_PLAY_SAFE
493#define QSORT_PLAY_SAFE 255
494#endif
495
84d4ea48
JH
496/* ************************************************************* Data Types */
497
498/* hold left and right index values of a partition waiting to be sorted (the
499 partition includes both left and right - right is NOT one past the end or
500 anything like that).
501*/
502struct partition_stack_entry {
503 int left;
504 int right;
505#ifdef QSORT_ORDER_GUESS
506 int qsort_break_even;
507#endif
508};
509
510/* ******************************************************* Shorthand Macros */
511
512/* Note that these macros will be used from inside the qsort function where
513 we happen to know that the variable 'elt_size' contains the size of an
514 array element and the variable 'temp' points to enough space to hold a
515 temp element and the variable 'array' points to the array being sorted
516 and 'compare' is the pointer to the compare routine.
517
518 Also note that there are very many highly architecture specific ways
519 these might be sped up, but this is simply the most generally portable
520 code I could think of.
521*/
522
523/* Return < 0 == 0 or > 0 as the value of elt1 is < elt2, == elt2, > elt2
524*/
525#define qsort_cmp(elt1, elt2) \
526 ((*compare)(aTHX_ array[elt1], array[elt2]))
527
528#ifdef QSORT_ORDER_GUESS
529#define QSORT_NOTICE_SWAP swapped++;
530#else
531#define QSORT_NOTICE_SWAP
532#endif
533
534/* swaps contents of array elements elt1, elt2.
535*/
536#define qsort_swap(elt1, elt2) \
537 STMT_START { \
538 QSORT_NOTICE_SWAP \
539 temp = array[elt1]; \
540 array[elt1] = array[elt2]; \
541 array[elt2] = temp; \
542 } STMT_END
543
544/* rotate contents of elt1, elt2, elt3 such that elt1 gets elt2, elt2 gets
545 elt3 and elt3 gets elt1.
546*/
547#define qsort_rotate(elt1, elt2, elt3) \
548 STMT_START { \
549 QSORT_NOTICE_SWAP \
550 temp = array[elt1]; \
551 array[elt1] = array[elt2]; \
552 array[elt2] = array[elt3]; \
553 array[elt3] = temp; \
554 } STMT_END
555
556/* ************************************************************ Debug stuff */
557
558#ifdef QSORT_DEBUG
559
560static void
561break_here()
562{
563 return; /* good place to set a breakpoint */
564}
565
566#define qsort_assert(t) (void)( (t) || (break_here(), 0) )
567
568static void
569doqsort_all_asserts(
570 void * array,
571 size_t num_elts,
572 size_t elt_size,
573 int (*compare)(const void * elt1, const void * elt2),
574 int pc_left, int pc_right, int u_left, int u_right)
575{
576 int i;
577
578 qsort_assert(pc_left <= pc_right);
579 qsort_assert(u_right < pc_left);
580 qsort_assert(pc_right < u_left);
581 for (i = u_right + 1; i < pc_left; ++i) {
582 qsort_assert(qsort_cmp(i, pc_left) < 0);
583 }
584 for (i = pc_left; i < pc_right; ++i) {
585 qsort_assert(qsort_cmp(i, pc_right) == 0);
586 }
587 for (i = pc_right + 1; i < u_left; ++i) {
588 qsort_assert(qsort_cmp(pc_right, i) < 0);
589 }
590}
591
592#define qsort_all_asserts(PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) \
593 doqsort_all_asserts(array, num_elts, elt_size, compare, \
594 PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT)
595
596#else
597
598#define qsort_assert(t) ((void)0)
599
600#define qsort_all_asserts(PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) ((void)0)
601
602#endif
603
604/* ****************************************************************** qsort */
605
606STATIC void /* the standard unstable (u) quicksort (qsort) */
607S_qsortsvu(pTHX_ SV ** array, size_t num_elts, SVCOMPARE_t compare)
608{
609 register SV * temp;
610
611 struct partition_stack_entry partition_stack[QSORT_MAX_STACK];
612 int next_stack_entry = 0;
613
614 int part_left;
615 int part_right;
616#ifdef QSORT_ORDER_GUESS
617 int qsort_break_even;
618 int swapped;
619#endif
620
621 /* Make sure we actually have work to do.
622 */
623 if (num_elts <= 1) {
624 return;
625 }
626
4eb872f6
JL
627 /* Innoculate large partitions against quadratic behavior */
628 if (num_elts > QSORT_PLAY_SAFE) {
629 register size_t n, j;
630 register SV **q;
631 for (n = num_elts, q = array; n > 1; ) {
632 j = n-- * Drand01();
633 temp = q[j];
634 q[j] = q[n];
635 q[n] = temp;
636 }
637 }
638
84d4ea48
JH
639 /* Setup the initial partition definition and fall into the sorting loop
640 */
641 part_left = 0;
642 part_right = (int)(num_elts - 1);
643#ifdef QSORT_ORDER_GUESS
644 qsort_break_even = QSORT_BREAK_EVEN;
645#else
646#define qsort_break_even QSORT_BREAK_EVEN
647#endif
648 for ( ; ; ) {
649 if ((part_right - part_left) >= qsort_break_even) {
650 /* OK, this is gonna get hairy, so lets try to document all the
651 concepts and abbreviations and variables and what they keep
652 track of:
653
654 pc: pivot chunk - the set of array elements we accumulate in the
655 middle of the partition, all equal in value to the original
656 pivot element selected. The pc is defined by:
657
658 pc_left - the leftmost array index of the pc
659 pc_right - the rightmost array index of the pc
660
661 we start with pc_left == pc_right and only one element
662 in the pivot chunk (but it can grow during the scan).
663
664 u: uncompared elements - the set of elements in the partition
665 we have not yet compared to the pivot value. There are two
666 uncompared sets during the scan - one to the left of the pc
667 and one to the right.
668
669 u_right - the rightmost index of the left side's uncompared set
670 u_left - the leftmost index of the right side's uncompared set
671
672 The leftmost index of the left sides's uncompared set
673 doesn't need its own variable because it is always defined
674 by the leftmost edge of the whole partition (part_left). The
675 same goes for the rightmost edge of the right partition
676 (part_right).
677
678 We know there are no uncompared elements on the left once we
679 get u_right < part_left and no uncompared elements on the
680 right once u_left > part_right. When both these conditions
681 are met, we have completed the scan of the partition.
682
683 Any elements which are between the pivot chunk and the
684 uncompared elements should be less than the pivot value on
685 the left side and greater than the pivot value on the right
686 side (in fact, the goal of the whole algorithm is to arrange
687 for that to be true and make the groups of less-than and
688 greater-then elements into new partitions to sort again).
689
690 As you marvel at the complexity of the code and wonder why it
691 has to be so confusing. Consider some of the things this level
692 of confusion brings:
693
694 Once I do a compare, I squeeze every ounce of juice out of it. I
695 never do compare calls I don't have to do, and I certainly never
696 do redundant calls.
697
698 I also never swap any elements unless I can prove there is a
699 good reason. Many sort algorithms will swap a known value with
700 an uncompared value just to get things in the right place (or
701 avoid complexity :-), but that uncompared value, once it gets
702 compared, may then have to be swapped again. A lot of the
703 complexity of this code is due to the fact that it never swaps
704 anything except compared values, and it only swaps them when the
705 compare shows they are out of position.
706 */
707 int pc_left, pc_right;
708 int u_right, u_left;
709
710 int s;
711
712 pc_left = ((part_left + part_right) / 2);
713 pc_right = pc_left;
714 u_right = pc_left - 1;
715 u_left = pc_right + 1;
716
717 /* Qsort works best when the pivot value is also the median value
718 in the partition (unfortunately you can't find the median value
719 without first sorting :-), so to give the algorithm a helping
720 hand, we pick 3 elements and sort them and use the median value
721 of that tiny set as the pivot value.
722
723 Some versions of qsort like to use the left middle and right as
724 the 3 elements to sort so they can insure the ends of the
725 partition will contain values which will stop the scan in the
726 compare loop, but when you have to call an arbitrarily complex
727 routine to do a compare, its really better to just keep track of
728 array index values to know when you hit the edge of the
729 partition and avoid the extra compare. An even better reason to
730 avoid using a compare call is the fact that you can drop off the
731 edge of the array if someone foolishly provides you with an
732 unstable compare function that doesn't always provide consistent
733 results.
734
735 So, since it is simpler for us to compare the three adjacent
736 elements in the middle of the partition, those are the ones we
737 pick here (conveniently pointed at by u_right, pc_left, and
738 u_left). The values of the left, center, and right elements
739 are refered to as l c and r in the following comments.
740 */
741
742#ifdef QSORT_ORDER_GUESS
743 swapped = 0;
744#endif
745 s = qsort_cmp(u_right, pc_left);
746 if (s < 0) {
747 /* l < c */
748 s = qsort_cmp(pc_left, u_left);
749 /* if l < c, c < r - already in order - nothing to do */
750 if (s == 0) {
751 /* l < c, c == r - already in order, pc grows */
752 ++pc_right;
753 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
754 } else if (s > 0) {
755 /* l < c, c > r - need to know more */
756 s = qsort_cmp(u_right, u_left);
757 if (s < 0) {
758 /* l < c, c > r, l < r - swap c & r to get ordered */
759 qsort_swap(pc_left, u_left);
760 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
761 } else if (s == 0) {
762 /* l < c, c > r, l == r - swap c&r, grow pc */
763 qsort_swap(pc_left, u_left);
764 --pc_left;
765 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
766 } else {
767 /* l < c, c > r, l > r - make lcr into rlc to get ordered */
768 qsort_rotate(pc_left, u_right, u_left);
769 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
770 }
771 }
772 } else if (s == 0) {
773 /* l == c */
774 s = qsort_cmp(pc_left, u_left);
775 if (s < 0) {
776 /* l == c, c < r - already in order, grow pc */
777 --pc_left;
778 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
779 } else if (s == 0) {
780 /* l == c, c == r - already in order, grow pc both ways */
781 --pc_left;
782 ++pc_right;
783 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
784 } else {
785 /* l == c, c > r - swap l & r, grow pc */
786 qsort_swap(u_right, u_left);
787 ++pc_right;
788 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
789 }
790 } else {
791 /* l > c */
792 s = qsort_cmp(pc_left, u_left);
793 if (s < 0) {
794 /* l > c, c < r - need to know more */
795 s = qsort_cmp(u_right, u_left);
796 if (s < 0) {
797 /* l > c, c < r, l < r - swap l & c to get ordered */
798 qsort_swap(u_right, pc_left);
799 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
800 } else if (s == 0) {
801 /* l > c, c < r, l == r - swap l & c, grow pc */
802 qsort_swap(u_right, pc_left);
803 ++pc_right;
804 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
805 } else {
806 /* l > c, c < r, l > r - rotate lcr into crl to order */
807 qsort_rotate(u_right, pc_left, u_left);
808 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
809 }
810 } else if (s == 0) {
811 /* l > c, c == r - swap ends, grow pc */
812 qsort_swap(u_right, u_left);
813 --pc_left;
814 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
815 } else {
816 /* l > c, c > r - swap ends to get in order */
817 qsort_swap(u_right, u_left);
818 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
819 }
820 }
821 /* We now know the 3 middle elements have been compared and
822 arranged in the desired order, so we can shrink the uncompared
823 sets on both sides
824 */
825 --u_right;
826 ++u_left;
827 qsort_all_asserts(pc_left, pc_right, u_left, u_right);
828
829 /* The above massive nested if was the simple part :-). We now have
830 the middle 3 elements ordered and we need to scan through the
831 uncompared sets on either side, swapping elements that are on
832 the wrong side or simply shuffling equal elements around to get
833 all equal elements into the pivot chunk.
834 */
835
836 for ( ; ; ) {
837 int still_work_on_left;
838 int still_work_on_right;
839
840 /* Scan the uncompared values on the left. If I find a value
841 equal to the pivot value, move it over so it is adjacent to
842 the pivot chunk and expand the pivot chunk. If I find a value
843 less than the pivot value, then just leave it - its already
844 on the correct side of the partition. If I find a greater
845 value, then stop the scan.
846 */
847 while ((still_work_on_left = (u_right >= part_left))) {
848 s = qsort_cmp(u_right, pc_left);
849 if (s < 0) {
850 --u_right;
851 } else if (s == 0) {
852 --pc_left;
853 if (pc_left != u_right) {
854 qsort_swap(u_right, pc_left);
855 }
856 --u_right;
857 } else {
858 break;
859 }
860 qsort_assert(u_right < pc_left);
861 qsort_assert(pc_left <= pc_right);
862 qsort_assert(qsort_cmp(u_right + 1, pc_left) <= 0);
863 qsort_assert(qsort_cmp(pc_left, pc_right) == 0);
864 }
865
866 /* Do a mirror image scan of uncompared values on the right
867 */
868 while ((still_work_on_right = (u_left <= part_right))) {
869 s = qsort_cmp(pc_right, u_left);
870 if (s < 0) {
871 ++u_left;
872 } else if (s == 0) {
873 ++pc_right;
874 if (pc_right != u_left) {
875 qsort_swap(pc_right, u_left);
876 }
877 ++u_left;
878 } else {
879 break;
880 }
881 qsort_assert(u_left > pc_right);
882 qsort_assert(pc_left <= pc_right);
883 qsort_assert(qsort_cmp(pc_right, u_left - 1) <= 0);
884 qsort_assert(qsort_cmp(pc_left, pc_right) == 0);
885 }
886
887 if (still_work_on_left) {
888 /* I know I have a value on the left side which needs to be
889 on the right side, but I need to know more to decide
890 exactly the best thing to do with it.
891 */
892 if (still_work_on_right) {
893 /* I know I have values on both side which are out of
894 position. This is a big win because I kill two birds
895 with one swap (so to speak). I can advance the
896 uncompared pointers on both sides after swapping both
897 of them into the right place.
898 */
899 qsort_swap(u_right, u_left);
900 --u_right;
901 ++u_left;
902 qsort_all_asserts(pc_left, pc_right, u_left, u_right);
903 } else {
904 /* I have an out of position value on the left, but the
905 right is fully scanned, so I "slide" the pivot chunk
906 and any less-than values left one to make room for the
907 greater value over on the right. If the out of position
908 value is immediately adjacent to the pivot chunk (there
909 are no less-than values), I can do that with a swap,
910 otherwise, I have to rotate one of the less than values
911 into the former position of the out of position value
912 and the right end of the pivot chunk into the left end
913 (got all that?).
914 */
915 --pc_left;
916 if (pc_left == u_right) {
917 qsort_swap(u_right, pc_right);
918 qsort_all_asserts(pc_left, pc_right-1, u_left, u_right-1);
919 } else {
920 qsort_rotate(u_right, pc_left, pc_right);
921 qsort_all_asserts(pc_left, pc_right-1, u_left, u_right-1);
922 }
923 --pc_right;
924 --u_right;
925 }
926 } else if (still_work_on_right) {
927 /* Mirror image of complex case above: I have an out of
928 position value on the right, but the left is fully
929 scanned, so I need to shuffle things around to make room
930 for the right value on the left.
931 */
932 ++pc_right;
933 if (pc_right == u_left) {
934 qsort_swap(u_left, pc_left);
935 qsort_all_asserts(pc_left+1, pc_right, u_left+1, u_right);
936 } else {
937 qsort_rotate(pc_right, pc_left, u_left);
938 qsort_all_asserts(pc_left+1, pc_right, u_left+1, u_right);
939 }
940 ++pc_left;
941 ++u_left;
942 } else {
943 /* No more scanning required on either side of partition,
944 break out of loop and figure out next set of partitions
945 */
946 break;
947 }
948 }
949
950 /* The elements in the pivot chunk are now in the right place. They
951 will never move or be compared again. All I have to do is decide
952 what to do with the stuff to the left and right of the pivot
953 chunk.
954
955 Notes on the QSORT_ORDER_GUESS ifdef code:
956
957 1. If I just built these partitions without swapping any (or
958 very many) elements, there is a chance that the elements are
959 already ordered properly (being properly ordered will
960 certainly result in no swapping, but the converse can't be
961 proved :-).
962
963 2. A (properly written) insertion sort will run faster on
964 already ordered data than qsort will.
965
966 3. Perhaps there is some way to make a good guess about
967 switching to an insertion sort earlier than partition size 6
968 (for instance - we could save the partition size on the stack
969 and increase the size each time we find we didn't swap, thus
970 switching to insertion sort earlier for partitions with a
971 history of not swapping).
972
973 4. Naturally, if I just switch right away, it will make
974 artificial benchmarks with pure ascending (or descending)
975 data look really good, but is that a good reason in general?
976 Hard to say...
977 */
978
979#ifdef QSORT_ORDER_GUESS
980 if (swapped < 3) {
981#if QSORT_ORDER_GUESS == 1
982 qsort_break_even = (part_right - part_left) + 1;
983#endif
984#if QSORT_ORDER_GUESS == 2
985 qsort_break_even *= 2;
986#endif
987#if QSORT_ORDER_GUESS == 3
988 int prev_break = qsort_break_even;
989 qsort_break_even *= qsort_break_even;
990 if (qsort_break_even < prev_break) {
991 qsort_break_even = (part_right - part_left) + 1;
992 }
993#endif
994 } else {
995 qsort_break_even = QSORT_BREAK_EVEN;
996 }
997#endif
998
999 if (part_left < pc_left) {
1000 /* There are elements on the left which need more processing.
1001 Check the right as well before deciding what to do.
1002 */
1003 if (pc_right < part_right) {
1004 /* We have two partitions to be sorted. Stack the biggest one
1005 and process the smallest one on the next iteration. This
1006 minimizes the stack height by insuring that any additional
1007 stack entries must come from the smallest partition which
1008 (because it is smallest) will have the fewest
1009 opportunities to generate additional stack entries.
1010 */
1011 if ((part_right - pc_right) > (pc_left - part_left)) {
1012 /* stack the right partition, process the left */
1013 partition_stack[next_stack_entry].left = pc_right + 1;
1014 partition_stack[next_stack_entry].right = part_right;
1015#ifdef QSORT_ORDER_GUESS
1016 partition_stack[next_stack_entry].qsort_break_even = qsort_break_even;
1017#endif
1018 part_right = pc_left - 1;
1019 } else {
1020 /* stack the left partition, process the right */
1021 partition_stack[next_stack_entry].left = part_left;
1022 partition_stack[next_stack_entry].right = pc_left - 1;
1023#ifdef QSORT_ORDER_GUESS
1024 partition_stack[next_stack_entry].qsort_break_even = qsort_break_even;
1025#endif
1026 part_left = pc_right + 1;
1027 }
1028 qsort_assert(next_stack_entry < QSORT_MAX_STACK);
1029 ++next_stack_entry;
1030 } else {
1031 /* The elements on the left are the only remaining elements
1032 that need sorting, arrange for them to be processed as the
1033 next partition.
1034 */
1035 part_right = pc_left - 1;
1036 }
1037 } else if (pc_right < part_right) {
1038 /* There is only one chunk on the right to be sorted, make it
1039 the new partition and loop back around.
1040 */
1041 part_left = pc_right + 1;
1042 } else {
1043 /* This whole partition wound up in the pivot chunk, so
1044 we need to get a new partition off the stack.
1045 */
1046 if (next_stack_entry == 0) {
1047 /* the stack is empty - we are done */
1048 break;
1049 }
1050 --next_stack_entry;
1051 part_left = partition_stack[next_stack_entry].left;
1052 part_right = partition_stack[next_stack_entry].right;
1053#ifdef QSORT_ORDER_GUESS
1054 qsort_break_even = partition_stack[next_stack_entry].qsort_break_even;
1055#endif
1056 }
1057 } else {
1058 /* This partition is too small to fool with qsort complexity, just
1059 do an ordinary insertion sort to minimize overhead.
1060 */
1061 int i;
1062 /* Assume 1st element is in right place already, and start checking
1063 at 2nd element to see where it should be inserted.
1064 */
1065 for (i = part_left + 1; i <= part_right; ++i) {
1066 int j;
1067 /* Scan (backwards - just in case 'i' is already in right place)
1068 through the elements already sorted to see if the ith element
1069 belongs ahead of one of them.
1070 */
1071 for (j = i - 1; j >= part_left; --j) {
1072 if (qsort_cmp(i, j) >= 0) {
1073 /* i belongs right after j
1074 */
1075 break;
1076 }
1077 }
1078 ++j;
1079 if (j != i) {
1080 /* Looks like we really need to move some things
1081 */
1082 int k;
1083 temp = array[i];
1084 for (k = i - 1; k >= j; --k)
1085 array[k + 1] = array[k];
1086 array[j] = temp;
1087 }
1088 }
1089
1090 /* That partition is now sorted, grab the next one, or get out
1091 of the loop if there aren't any more.
1092 */
1093
1094 if (next_stack_entry == 0) {
1095 /* the stack is empty - we are done */
1096 break;
1097 }
1098 --next_stack_entry;
1099 part_left = partition_stack[next_stack_entry].left;
1100 part_right = partition_stack[next_stack_entry].right;
1101#ifdef QSORT_ORDER_GUESS
1102 qsort_break_even = partition_stack[next_stack_entry].qsort_break_even;
1103#endif
1104 }
1105 }
1106
1107 /* Believe it or not, the array is sorted at this point! */
1108}
1109
84d4ea48
JH
1110/* Stabilize what is, presumably, an otherwise unstable sort method.
1111 * We do that by allocating (or having on hand) an array of pointers
1112 * that is the same size as the original array of elements to be sorted.
1113 * We initialize this parallel array with the addresses of the original
1114 * array elements. This indirection can make you crazy.
1115 * Some pictures can help. After initializing, we have
1116 *
1117 * indir list1
1118 * +----+ +----+
1119 * | | --------------> | | ------> first element to be sorted
1120 * +----+ +----+
1121 * | | --------------> | | ------> second element to be sorted
1122 * +----+ +----+
1123 * | | --------------> | | ------> third element to be sorted
1124 * +----+ +----+
1125 * ...
1126 * +----+ +----+
1127 * | | --------------> | | ------> n-1st element to be sorted
1128 * +----+ +----+
1129 * | | --------------> | | ------> n-th element to be sorted
1130 * +----+ +----+
1131 *
1132 * During the sort phase, we leave the elements of list1 where they are,
1133 * and sort the pointers in the indirect array in the same order determined
1134 * by the original comparison routine on the elements pointed to.
1135 * Because we don't move the elements of list1 around through
1136 * this phase, we can break ties on elements that compare equal
1137 * using their address in the list1 array, ensuring stabilty.
1138 * This leaves us with something looking like
1139 *
1140 * indir list1
1141 * +----+ +----+
1142 * | | --+ +---> | | ------> first element to be sorted
1143 * +----+ | | +----+
1144 * | | --|-------|---> | | ------> second element to be sorted
1145 * +----+ | | +----+
1146 * | | --|-------+ +-> | | ------> third element to be sorted
1147 * +----+ | | +----+
1148 * ...
1149 * +----+ | | | | +----+
1150 * | | ---|-+ | +--> | | ------> n-1st element to be sorted
1151 * +----+ | | +----+
1152 * | | ---+ +----> | | ------> n-th element to be sorted
1153 * +----+ +----+
1154 *
1155 * where the i-th element of the indirect array points to the element
1156 * that should be i-th in the sorted array. After the sort phase,
1157 * we have to put the elements of list1 into the places
1158 * dictated by the indirect array.
1159 */
1160
1161static SVCOMPARE_t RealCmp;
1162
1163static I32
1164cmpindir(pTHX_ gptr a, gptr b)
1165{
1166 I32 sense;
1167 gptr *ap = (gptr *)a;
1168 gptr *bp = (gptr *)b;
1169
1170 if ((sense = RealCmp(aTHX_ *ap, *bp)) == 0)
1171 sense = (ap > bp) ? 1 : ((ap < bp) ? -1 : 0);
1172 return sense;
1173}
1174
1175STATIC void
1176S_qsortsv(pTHX_ gptr *list1, size_t nmemb, SVCOMPARE_t cmp)
1177{
1178 SV **hintsvp;
1179
c53fc8a6 1180 if (SORTHINTS(hintsvp) & HINT_SORT_STABLE) {
84d4ea48
JH
1181 register gptr **pp, *q;
1182 register size_t n, j, i;
1183 gptr *small[SMALLSORT], **indir, tmp;
1184 SVCOMPARE_t savecmp;
1185 if (nmemb <= 1) return; /* sorted trivially */
4eb872f6 1186
84d4ea48
JH
1187 /* Small arrays can use the stack, big ones must be allocated */
1188 if (nmemb <= SMALLSORT) indir = small;
1189 else { New(1799, indir, nmemb, gptr *); }
4eb872f6 1190
84d4ea48
JH
1191 /* Copy pointers to original array elements into indirect array */
1192 for (n = nmemb, pp = indir, q = list1; n--; ) *pp++ = q++;
4eb872f6 1193
84d4ea48
JH
1194 savecmp = RealCmp; /* Save current comparison routine, if any */
1195 RealCmp = cmp; /* Put comparison routine where cmpindir can find it */
4eb872f6 1196
84d4ea48
JH
1197 /* sort, with indirection */
1198 S_qsortsvu(aTHX_ (gptr *)indir, nmemb, cmpindir);
4eb872f6 1199
84d4ea48
JH
1200 pp = indir;
1201 q = list1;
1202 for (n = nmemb; n--; ) {
1203 /* Assert A: all elements of q with index > n are already
1204 * in place. This is vacuosly true at the start, and we
1205 * put element n where it belongs below (if it wasn't
1206 * already where it belonged). Assert B: we only move
1207 * elements that aren't where they belong,
1208 * so, by A, we never tamper with elements above n.
1209 */
1210 j = pp[n] - q; /* This sets j so that q[j] is
1211 * at pp[n]. *pp[j] belongs in
1212 * q[j], by construction.
1213 */
1214 if (n != j) { /* all's well if n == j */
1215 tmp = q[j]; /* save what's in q[j] */
1216 do {
1217 q[j] = *pp[j]; /* put *pp[j] where it belongs */
1218 i = pp[j] - q; /* the index in q of the element
1219 * just moved */
1220 pp[j] = q + j; /* this is ok now */
1221 } while ((j = i) != n);
1222 /* There are only finitely many (nmemb) addresses
1223 * in the pp array.
1224 * So we must eventually revisit an index we saw before.
1225 * Suppose the first revisited index is k != n.
1226 * An index is visited because something else belongs there.
1227 * If we visit k twice, then two different elements must
1228 * belong in the same place, which cannot be.
1229 * So j must get back to n, the loop terminates,
1230 * and we put the saved element where it belongs.
1231 */
1232 q[n] = tmp; /* put what belongs into
1233 * the n-th element */
1234 }
1235 }
1236
1237 /* free iff allocated */
1238 if (indir != small) { Safefree(indir); }
1239 /* restore prevailing comparison routine */
1240 RealCmp = savecmp;
c53fc8a6
JH
1241 } else {
1242 S_qsortsvu(aTHX_ list1, nmemb, cmp);
84d4ea48
JH
1243 }
1244}
4eb872f6
JL
1245
1246/*
84d4ea48
JH
1247=for apidoc sortsv
1248
1249Sort an array. Here is an example:
1250
4eb872f6 1251 sortsv(AvARRAY(av), av_len(av)+1, Perl_sv_cmp_locale);
84d4ea48
JH
1252
1253=cut
1254*/
4eb872f6 1255
84d4ea48
JH
1256void
1257Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp)
1258{
1259 void (*sortsvp)(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp) =
1260 S_mergesortsv;
1261 SV **hintsvp;
1262 I32 hints;
4eb872f6 1263
84d4ea48
JH
1264 if ((hints = SORTHINTS(hintsvp))) {
1265 if (hints & HINT_SORT_QUICKSORT)
1266 sortsvp = S_qsortsv;
1267 else {
1268 if (hints & HINT_SORT_MERGESORT)
1269 sortsvp = S_mergesortsv;
1270 else
1271 sortsvp = S_mergesortsv;
1272 }
1273 }
4eb872f6 1274
84d4ea48
JH
1275 sortsvp(aTHX_ array, nmemb, cmp);
1276}
1277
1278PP(pp_sort)
1279{
1280 dSP; dMARK; dORIGMARK;
1281 register SV **up;
1282 SV **myorigmark = ORIGMARK;
1283 register I32 max;
1284 HV *stash;
1285 GV *gv;
1286 CV *cv = 0;
1287 I32 gimme = GIMME;
1288 OP* nextop = PL_op->op_next;
1289 I32 overloading = 0;
1290 bool hasargs = FALSE;
1291 I32 is_xsub = 0;
1292
1293 if (gimme != G_ARRAY) {
1294 SP = MARK;
1295 RETPUSHUNDEF;
1296 }
1297
1298 ENTER;
1299 SAVEVPTR(PL_sortcop);
1300 if (PL_op->op_flags & OPf_STACKED) {
1301 if (PL_op->op_flags & OPf_SPECIAL) {
1302 OP *kid = cLISTOP->op_first->op_sibling; /* pass pushmark */
1303 kid = kUNOP->op_first; /* pass rv2gv */
1304 kid = kUNOP->op_first; /* pass leave */
1305 PL_sortcop = kid->op_next;
1306 stash = CopSTASH(PL_curcop);
1307 }
1308 else {
1309 cv = sv_2cv(*++MARK, &stash, &gv, 0);
1310 if (cv && SvPOK(cv)) {
1311 STRLEN n_a;
1312 char *proto = SvPV((SV*)cv, n_a);
1313 if (proto && strEQ(proto, "$$")) {
1314 hasargs = TRUE;
1315 }
1316 }
1317 if (!(cv && CvROOT(cv))) {
1318 if (cv && CvXSUB(cv)) {
1319 is_xsub = 1;
1320 }
1321 else if (gv) {
1322 SV *tmpstr = sv_newmortal();
1323 gv_efullname3(tmpstr, gv, Nullch);
1324 DIE(aTHX_ "Undefined sort subroutine \"%s\" called",
1325 SvPVX(tmpstr));
1326 }
1327 else {
1328 DIE(aTHX_ "Undefined subroutine in sort");
1329 }
1330 }
1331
1332 if (is_xsub)
1333 PL_sortcop = (OP*)cv;
1334 else {
1335 PL_sortcop = CvSTART(cv);
1336 SAVEVPTR(CvROOT(cv)->op_ppaddr);
1337 CvROOT(cv)->op_ppaddr = PL_ppaddr[OP_NULL];
1338
1339 SAVEVPTR(PL_curpad);
1340 PL_curpad = AvARRAY((AV*)AvARRAY(CvPADLIST(cv))[1]);
1341 }
1342 }
1343 }
1344 else {
1345 PL_sortcop = Nullop;
1346 stash = CopSTASH(PL_curcop);
1347 }
1348
1349 up = myorigmark + 1;
1350 while (MARK < SP) { /* This may or may not shift down one here. */
1351 /*SUPPRESS 560*/
1352 if ((*up = *++MARK)) { /* Weed out nulls. */
1353 SvTEMP_off(*up);
1354 if (!PL_sortcop && !SvPOK(*up)) {
1355 STRLEN n_a;
1356 if (SvAMAGIC(*up))
1357 overloading = 1;
1358 else
1359 (void)sv_2pv(*up, &n_a);
1360 }
1361 up++;
1362 }
1363 }
1364 max = --up - myorigmark;
1365 if (PL_sortcop) {
1366 if (max > 1) {
1367 PERL_CONTEXT *cx;
1368 SV** newsp;
1369 bool oldcatch = CATCH_GET;
1370
1371 SAVETMPS;
1372 SAVEOP();
1373
1374 CATCH_SET(TRUE);
1375 PUSHSTACKi(PERLSI_SORT);
1376 if (!hasargs && !is_xsub) {
1377 if (PL_sortstash != stash || !PL_firstgv || !PL_secondgv) {
1378 SAVESPTR(PL_firstgv);
1379 SAVESPTR(PL_secondgv);
1380 PL_firstgv = gv_fetchpv("a", TRUE, SVt_PV);
1381 PL_secondgv = gv_fetchpv("b", TRUE, SVt_PV);
1382 PL_sortstash = stash;
1383 }
1384#ifdef USE_5005THREADS
1385 sv_lock((SV *)PL_firstgv);
1386 sv_lock((SV *)PL_secondgv);
1387#endif
1388 SAVESPTR(GvSV(PL_firstgv));
1389 SAVESPTR(GvSV(PL_secondgv));
1390 }
1391
1392 PUSHBLOCK(cx, CXt_NULL, PL_stack_base);
1393 if (!(PL_op->op_flags & OPf_SPECIAL)) {
1394 cx->cx_type = CXt_SUB;
1395 cx->blk_gimme = G_SCALAR;
1396 PUSHSUB(cx);
1397 if (!CvDEPTH(cv))
1398 (void)SvREFCNT_inc(cv); /* in preparation for POPSUB */
1399 }
1400 PL_sortcxix = cxstack_ix;
1401
1402 if (hasargs && !is_xsub) {
1403 /* This is mostly copied from pp_entersub */
1404 AV *av = (AV*)PL_curpad[0];
1405
1406#ifndef USE_5005THREADS
1407 cx->blk_sub.savearray = GvAV(PL_defgv);
1408 GvAV(PL_defgv) = (AV*)SvREFCNT_inc(av);
1409#endif /* USE_5005THREADS */
1410 cx->blk_sub.oldcurpad = PL_curpad;
1411 cx->blk_sub.argarray = av;
1412 }
1413 sortsv((myorigmark+1), max,
1414 is_xsub ? sortcv_xsub : hasargs ? sortcv_stacked : sortcv);
1415
1416 POPBLOCK(cx,PL_curpm);
1417 PL_stack_sp = newsp;
1418 POPSTACK;
1419 CATCH_SET(oldcatch);
1420 }
1421 }
1422 else {
1423 if (max > 1) {
1424 MEXTEND(SP, 20); /* Can't afford stack realloc on signal. */
1425 sortsv(ORIGMARK+1, max,
1426 (PL_op->op_private & OPpSORT_NUMERIC)
1427 ? ( (PL_op->op_private & OPpSORT_INTEGER)
1428 ? ( overloading ? amagic_i_ncmp : sv_i_ncmp)
1429 : ( overloading ? amagic_ncmp : sv_ncmp))
1430 : ( IN_LOCALE_RUNTIME
1431 ? ( overloading
1432 ? amagic_cmp_locale
1433 : sv_cmp_locale_static)
1434 : ( overloading ? amagic_cmp : sv_cmp_static)));
1435 if (PL_op->op_private & OPpSORT_REVERSE) {
1436 SV **p = ORIGMARK+1;
1437 SV **q = ORIGMARK+max;
1438 while (p < q) {
1439 SV *tmp = *p;
1440 *p++ = *q;
1441 *q-- = tmp;
1442 }
1443 }
1444 }
1445 }
1446 LEAVE;
1447 PL_stack_sp = ORIGMARK + max;
1448 return nextop;
1449}
1450
1451static I32
1452sortcv(pTHX_ SV *a, SV *b)
1453{
1454 I32 oldsaveix = PL_savestack_ix;
1455 I32 oldscopeix = PL_scopestack_ix;
1456 I32 result;
1457 GvSV(PL_firstgv) = a;
1458 GvSV(PL_secondgv) = b;
1459 PL_stack_sp = PL_stack_base;
1460 PL_op = PL_sortcop;
1461 CALLRUNOPS(aTHX);
1462 if (PL_stack_sp != PL_stack_base + 1)
1463 Perl_croak(aTHX_ "Sort subroutine didn't return single value");
1464 if (!SvNIOKp(*PL_stack_sp))
1465 Perl_croak(aTHX_ "Sort subroutine didn't return a numeric value");
1466 result = SvIV(*PL_stack_sp);
1467 while (PL_scopestack_ix > oldscopeix) {
1468 LEAVE;
1469 }
1470 leave_scope(oldsaveix);
1471 return result;
1472}
1473
1474static I32
1475sortcv_stacked(pTHX_ SV *a, SV *b)
1476{
1477 I32 oldsaveix = PL_savestack_ix;
1478 I32 oldscopeix = PL_scopestack_ix;
1479 I32 result;
1480 AV *av;
1481
1482#ifdef USE_5005THREADS
1483 av = (AV*)PL_curpad[0];
1484#else
1485 av = GvAV(PL_defgv);
1486#endif
1487
1488 if (AvMAX(av) < 1) {
1489 SV** ary = AvALLOC(av);
1490 if (AvARRAY(av) != ary) {
1491 AvMAX(av) += AvARRAY(av) - AvALLOC(av);
1492 SvPVX(av) = (char*)ary;
1493 }
1494 if (AvMAX(av) < 1) {
1495 AvMAX(av) = 1;
1496 Renew(ary,2,SV*);
1497 SvPVX(av) = (char*)ary;
1498 }
1499 }
1500 AvFILLp(av) = 1;
1501
1502 AvARRAY(av)[0] = a;
1503 AvARRAY(av)[1] = b;
1504 PL_stack_sp = PL_stack_base;
1505 PL_op = PL_sortcop;
1506 CALLRUNOPS(aTHX);
1507 if (PL_stack_sp != PL_stack_base + 1)
1508 Perl_croak(aTHX_ "Sort subroutine didn't return single value");
1509 if (!SvNIOKp(*PL_stack_sp))
1510 Perl_croak(aTHX_ "Sort subroutine didn't return a numeric value");
1511 result = SvIV(*PL_stack_sp);
1512 while (PL_scopestack_ix > oldscopeix) {
1513 LEAVE;
1514 }
1515 leave_scope(oldsaveix);
1516 return result;
1517}
1518
1519static I32
1520sortcv_xsub(pTHX_ SV *a, SV *b)
1521{
1522 dSP;
1523 I32 oldsaveix = PL_savestack_ix;
1524 I32 oldscopeix = PL_scopestack_ix;
1525 I32 result;
1526 CV *cv=(CV*)PL_sortcop;
1527
1528 SP = PL_stack_base;
1529 PUSHMARK(SP);
1530 EXTEND(SP, 2);
1531 *++SP = a;
1532 *++SP = b;
1533 PUTBACK;
1534 (void)(*CvXSUB(cv))(aTHX_ cv);
1535 if (PL_stack_sp != PL_stack_base + 1)
1536 Perl_croak(aTHX_ "Sort subroutine didn't return single value");
1537 if (!SvNIOKp(*PL_stack_sp))
1538 Perl_croak(aTHX_ "Sort subroutine didn't return a numeric value");
1539 result = SvIV(*PL_stack_sp);
1540 while (PL_scopestack_ix > oldscopeix) {
1541 LEAVE;
1542 }
1543 leave_scope(oldsaveix);
1544 return result;
1545}
1546
1547
1548static I32
1549sv_ncmp(pTHX_ SV *a, SV *b)
1550{
1551 NV nv1 = SvNV(a);
1552 NV nv2 = SvNV(b);
1553 return nv1 < nv2 ? -1 : nv1 > nv2 ? 1 : 0;
1554}
1555
1556static I32
1557sv_i_ncmp(pTHX_ SV *a, SV *b)
1558{
1559 IV iv1 = SvIV(a);
1560 IV iv2 = SvIV(b);
1561 return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0;
1562}
1563#define tryCALL_AMAGICbin(left,right,meth,svp) STMT_START { \
1564 *svp = Nullsv; \
1565 if (PL_amagic_generation) { \
1566 if (SvAMAGIC(left)||SvAMAGIC(right))\
1567 *svp = amagic_call(left, \
1568 right, \
1569 CAT2(meth,_amg), \
1570 0); \
1571 } \
1572 } STMT_END
1573
1574static I32
1575amagic_ncmp(pTHX_ register SV *a, register SV *b)
1576{
1577 SV *tmpsv;
1578 tryCALL_AMAGICbin(a,b,ncmp,&tmpsv);
1579 if (tmpsv) {
1580 NV d;
4eb872f6 1581
84d4ea48
JH
1582 if (SvIOK(tmpsv)) {
1583 I32 i = SvIVX(tmpsv);
1584 if (i > 0)
1585 return 1;
1586 return i? -1 : 0;
1587 }
1588 d = SvNV(tmpsv);
1589 if (d > 0)
1590 return 1;
1591 return d? -1 : 0;
1592 }
1593 return sv_ncmp(aTHX_ a, b);
1594}
1595
1596static I32
1597amagic_i_ncmp(pTHX_ register SV *a, register SV *b)
1598{
1599 SV *tmpsv;
1600 tryCALL_AMAGICbin(a,b,ncmp,&tmpsv);
1601 if (tmpsv) {
1602 NV d;
4eb872f6 1603
84d4ea48
JH
1604 if (SvIOK(tmpsv)) {
1605 I32 i = SvIVX(tmpsv);
1606 if (i > 0)
1607 return 1;
1608 return i? -1 : 0;
1609 }
1610 d = SvNV(tmpsv);
1611 if (d > 0)
1612 return 1;
1613 return d? -1 : 0;
1614 }
1615 return sv_i_ncmp(aTHX_ a, b);
1616}
1617
1618static I32
1619amagic_cmp(pTHX_ register SV *str1, register SV *str2)
1620{
1621 SV *tmpsv;
1622 tryCALL_AMAGICbin(str1,str2,scmp,&tmpsv);
1623 if (tmpsv) {
1624 NV d;
4eb872f6 1625
84d4ea48
JH
1626 if (SvIOK(tmpsv)) {
1627 I32 i = SvIVX(tmpsv);
1628 if (i > 0)
1629 return 1;
1630 return i? -1 : 0;
1631 }
1632 d = SvNV(tmpsv);
1633 if (d > 0)
1634 return 1;
1635 return d? -1 : 0;
1636 }
1637 return sv_cmp(str1, str2);
1638}
1639
1640static I32
1641amagic_cmp_locale(pTHX_ register SV *str1, register SV *str2)
1642{
1643 SV *tmpsv;
1644 tryCALL_AMAGICbin(str1,str2,scmp,&tmpsv);
1645 if (tmpsv) {
1646 NV d;
4eb872f6 1647
84d4ea48
JH
1648 if (SvIOK(tmpsv)) {
1649 I32 i = SvIVX(tmpsv);
1650 if (i > 0)
1651 return 1;
1652 return i? -1 : 0;
1653 }
1654 d = SvNV(tmpsv);
1655 if (d > 0)
1656 return 1;
1657 return d? -1 : 0;
1658 }
1659 return sv_cmp_locale(str1, str2);
1660}
1661
1662