This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5
locale.c: Rmv duplicate strlen()
[perl5.git] / pp_sort.c
CommitLineData
84d4ea48
JH
1/* pp_sort.c
2 *
1129b882
NC
3 * Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
4 * 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by Larry Wall and others
84d4ea48
JH
5 *
6 * You may distribute under the terms of either the GNU General Public
7 * License or the Artistic License, as specified in the README file.
8 *
9 */
10
11/*
4ac71550
TC
12 * ...they shuffled back towards the rear of the line. 'No, not at the
13 * rear!' the slave-driver shouted. 'Three files up. And stay there...
14 *
15 * [p.931 of _The Lord of the Rings_, VI/ii: "The Land of Shadow"]
84d4ea48
JH
16 */
17
166f8a29
DM
18/* This file contains pp ("push/pop") functions that
19 * execute the opcodes that make up a perl program. A typical pp function
20 * expects to find its arguments on the stack, and usually pushes its
21 * results onto the stack, hence the 'pp' terminology. Each OP structure
22 * contains a pointer to the relevant pp_foo() function.
23 *
24 * This particular file just contains pp_sort(), which is complex
25 * enough to merit its own file! See the other pp*.c files for the rest of
26 * the pp_ functions.
27 */
28
84d4ea48
JH
29#include "EXTERN.h"
30#define PERL_IN_PP_SORT_C
31#include "perl.h"
32
c53fc8a6 33#ifndef SMALLSORT
7ea738a9 34#define SMALLSORT (200)
c53fc8a6
JH
35#endif
36
84d4ea48
JH
37/*
38 * The mergesort implementation is by Peter M. Mcilroy <pmcilroy@lucent.com>.
39 *
40 * The original code was written in conjunction with BSD Computer Software
41 * Research Group at University of California, Berkeley.
42 *
393db44d
JL
43 * See also: "Optimistic Sorting and Information Theoretic Complexity"
44 * Peter McIlroy
45 * SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms),
46 * pp 467-474, Austin, Texas, 25-27 January 1993.
84d4ea48 47 *
393db44d 48 * The integration to Perl is by John P. Linderman <jpl.jpl@gmail.com>.
84d4ea48
JH
49 *
50 * The code can be distributed under the same terms as Perl itself.
51 *
52 */
53
84d4ea48 54
7ea738a9
TK
55typedef char * aptr; /* pointer for arithmetic on sizes */
56typedef SV * gptr; /* pointers in our lists */
84d4ea48
JH
57
58/* Binary merge internal sort, with a few special mods
59** for the special perl environment it now finds itself in.
60**
61** Things that were once options have been hotwired
62** to values suitable for this use. In particular, we'll always
63** initialize looking for natural runs, we'll always produce stable
64** output, and we'll always do Peter McIlroy's binary merge.
65*/
66
67/* Pointer types for arithmetic and storage and convenience casts */
68
7ea738a9
TK
69#define APTR(P) ((aptr)(P))
70#define GPTP(P) ((gptr *)(P))
84d4ea48
JH
71#define GPPP(P) ((gptr **)(P))
72
73
74/* byte offset from pointer P to (larger) pointer Q */
7ea738a9 75#define BYTEOFF(P, Q) (APTR(Q) - APTR(P))
84d4ea48
JH
76
77#define PSIZE sizeof(gptr)
78
79/* If PSIZE is power of 2, make PSHIFT that power, if that helps */
80
7ea738a9
TK
81#ifdef PSHIFT
82#define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT))
83#define PNBYTE(N) ((N) << (PSHIFT))
84#define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N)))
84d4ea48
JH
85#else
86/* Leave optimization to compiler */
7ea738a9
TK
87#define PNELEM(P, Q) (GPTP(Q) - GPTP(P))
88#define PNBYTE(N) ((N) * (PSIZE))
89#define PINDEX(P, N) (GPTP(P) + (N))
84d4ea48
JH
90#endif
91
92/* Pointer into other corresponding to pointer into this */
7ea738a9 93#define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P))
84d4ea48
JH
94
95#define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim)
96
97
486ec47a 98/* Runs are identified by a pointer in the auxiliary list.
84d4ea48
JH
99** The pointer is at the start of the list,
100** and it points to the start of the next list.
101** NEXT is used as an lvalue, too.
102*/
103
7ea738a9 104#define NEXT(P) (*GPPP(P))
84d4ea48
JH
105
106
107/* PTHRESH is the minimum number of pairs with the same sense to justify
108** checking for a run and extending it. Note that PTHRESH counts PAIRS,
109** not just elements, so PTHRESH == 8 means a run of 16.
110*/
111
7ea738a9 112#define PTHRESH (8)
84d4ea48
JH
113
114/* RTHRESH is the number of elements in a run that must compare low
115** to the low element from the opposing run before we justify
116** doing a binary rampup instead of single stepping.
117** In random input, N in a row low should only happen with
118** probability 2^(1-N), so we can risk that we are dealing
119** with orderly input without paying much when we aren't.
120*/
121
122#define RTHRESH (6)
123
124
125/*
126** Overview of algorithm and variables.
127** The array of elements at list1 will be organized into runs of length 2,
128** or runs of length >= 2 * PTHRESH. We only try to form long runs when
129** PTHRESH adjacent pairs compare in the same way, suggesting overall order.
130**
131** Unless otherwise specified, pair pointers address the first of two elements.
132**
a0288114
AL
133** b and b+1 are a pair that compare with sense "sense".
134** b is the "bottom" of adjacent pairs that might form a longer run.
84d4ea48
JH
135**
136** p2 parallels b in the list2 array, where runs are defined by
137** a pointer chain.
138**
a0288114 139** t represents the "top" of the adjacent pairs that might extend
84d4ea48
JH
140** the run beginning at b. Usually, t addresses a pair
141** that compares with opposite sense from (b,b+1).
142** However, it may also address a singleton element at the end of list1,
a0288114 143** or it may be equal to "last", the first element beyond list1.
84d4ea48
JH
144**
145** r addresses the Nth pair following b. If this would be beyond t,
146** we back it off to t. Only when r is less than t do we consider the
147** run long enough to consider checking.
148**
149** q addresses a pair such that the pairs at b through q already form a run.
150** Often, q will equal b, indicating we only are sure of the pair itself.
151** However, a search on the previous cycle may have revealed a longer run,
152** so q may be greater than b.
153**
154** p is used to work back from a candidate r, trying to reach q,
155** which would mean b through r would be a run. If we discover such a run,
156** we start q at r and try to push it further towards t.
157** If b through r is NOT a run, we detect the wrong order at (p-1,p).
158** In any event, after the check (if any), we have two main cases.
159**
160** 1) Short run. b <= q < p <= r <= t.
7ea738a9
TK
161** b through q is a run (perhaps trivial)
162** q through p are uninteresting pairs
163** p through r is a run
84d4ea48
JH
164**
165** 2) Long run. b < r <= q < t.
7ea738a9 166** b through q is a run (of length >= 2 * PTHRESH)
84d4ea48
JH
167**
168** Note that degenerate cases are not only possible, but likely.
169** For example, if the pair following b compares with opposite sense,
170** then b == q < p == r == t.
171*/
172
173
044d25c7 174PERL_STATIC_FORCE_INLINE IV __attribute__always_inline__
d4c19fe8 175dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp)
84d4ea48 176{
957d8989 177 I32 sense;
eb578fdb
KW
178 gptr *b, *p, *q, *t, *p2;
179 gptr *last, *r;
957d8989 180 IV runs = 0;
84d4ea48
JH
181
182 b = list1;
183 last = PINDEX(b, nmemb);
184 sense = (cmp(aTHX_ *b, *(b+1)) > 0);
185 for (p2 = list2; b < last; ) {
7ea738a9
TK
186 /* We just started, or just reversed sense.
187 ** Set t at end of pairs with the prevailing sense.
188 */
189 for (p = b+2, t = p; ++p < last; t = ++p) {
190 if ((cmp(aTHX_ *t, *p) > 0) != sense) break;
191 }
192 q = b;
193 /* Having laid out the playing field, look for long runs */
194 do {
195 p = r = b + (2 * PTHRESH);
196 if (r >= t) p = r = t; /* too short to care about */
197 else {
198 while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) &&
199 ((p -= 2) > q)) {}
200 if (p <= q) {
201 /* b through r is a (long) run.
202 ** Extend it as far as possible.
203 */
204 p = q = r;
205 while (((p += 2) < t) &&
206 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p;
207 r = p = q + 2; /* no simple pairs, no after-run */
208 }
209 }
210 if (q > b) { /* run of greater than 2 at b */
211 gptr *savep = p;
212
213 p = q += 2;
214 /* pick up singleton, if possible */
215 if ((p == t) &&
216 ((t + 1) == last) &&
217 ((cmp(aTHX_ *(p-1), *p) > 0) == sense))
218 savep = r = p = q = last;
219 p2 = NEXT(p2) = p2 + (p - b); ++runs;
220 if (sense)
221 while (b < --p) {
222 const gptr c = *b;
223 *b++ = *p;
224 *p = c;
225 }
226 p = savep;
227 }
228 while (q < p) { /* simple pairs */
229 p2 = NEXT(p2) = p2 + 2; ++runs;
230 if (sense) {
231 const gptr c = *q++;
232 *(q-1) = *q;
233 *q++ = c;
234 } else q += 2;
235 }
236 if (((b = p) == t) && ((t+1) == last)) {
237 NEXT(p2) = p2 + 1; ++runs;
238 b++;
239 }
240 q = r;
241 } while (b < t);
242 sense = !sense;
84d4ea48 243 }
957d8989 244 return runs;
84d4ea48
JH
245}
246
247
3fe0b9a9 248/* The original merge sort, in use since 5.7, was as fast as, or faster than,
957d8989 249 * qsort on many platforms, but slower than qsort, conspicuously so,
3fe0b9a9 250 * on others. The most likely explanation was platform-specific
957d8989
JL
251 * differences in cache sizes and relative speeds.
252 *
253 * The quicksort divide-and-conquer algorithm guarantees that, as the
254 * problem is subdivided into smaller and smaller parts, the parts
255 * fit into smaller (and faster) caches. So it doesn't matter how
256 * many levels of cache exist, quicksort will "find" them, and,
e62b3022 257 * as long as smaller is faster, take advantage of them.
957d8989 258 *
3fe0b9a9 259 * By contrast, consider how the original mergesort algorithm worked.
957d8989
JL
260 * Suppose we have five runs (each typically of length 2 after dynprep).
261 *
262 * pass base aux
263 * 0 1 2 3 4 5
264 * 1 12 34 5
265 * 2 1234 5
266 * 3 12345
267 * 4 12345
268 *
269 * Adjacent pairs are merged in "grand sweeps" through the input.
270 * This means, on pass 1, the records in runs 1 and 2 aren't revisited until
271 * runs 3 and 4 are merged and the runs from run 5 have been copied.
272 * The only cache that matters is one large enough to hold *all* the input.
273 * On some platforms, this may be many times slower than smaller caches.
274 *
275 * The following pseudo-code uses the same basic merge algorithm,
276 * but in a divide-and-conquer way.
277 *
278 * # merge $runs runs at offset $offset of list $list1 into $list2.
279 * # all unmerged runs ($runs == 1) originate in list $base.
280 * sub mgsort2 {
281 * my ($offset, $runs, $base, $list1, $list2) = @_;
282 *
283 * if ($runs == 1) {
284 * if ($list1 is $base) copy run to $list2
285 * return offset of end of list (or copy)
286 * } else {
287 * $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1)
288 * mgsort2($off2, $runs/2, $base, $list2, $list1)
289 * merge the adjacent runs at $offset of $list1 into $list2
290 * return the offset of the end of the merged runs
291 * }
292 * }
293 * mgsort2(0, $runs, $base, $aux, $base);
294 *
295 * For our 5 runs, the tree of calls looks like
296 *
297 * 5
298 * 3 2
299 * 2 1 1 1
300 * 1 1
301 *
302 * 1 2 3 4 5
303 *
304 * and the corresponding activity looks like
305 *
306 * copy runs 1 and 2 from base to aux
307 * merge runs 1 and 2 from aux to base
308 * (run 3 is where it belongs, no copy needed)
309 * merge runs 12 and 3 from base to aux
310 * (runs 4 and 5 are where they belong, no copy needed)
311 * merge runs 4 and 5 from base to aux
312 * merge runs 123 and 45 from aux to base
313 *
314 * Note that we merge runs 1 and 2 immediately after copying them,
315 * while they are still likely to be in fast cache. Similarly,
316 * run 3 is merged with run 12 while it still may be lingering in cache.
317 * This implementation should therefore enjoy much of the cache-friendly
318 * behavior that quicksort does. In addition, it does less copying
319 * than the original mergesort implementation (only runs 1 and 2 are copied)
320 * and the "balancing" of merges is better (merged runs comprise more nearly
321 * equal numbers of original runs).
322 *
323 * The actual cache-friendly implementation will use a pseudo-stack
324 * to avoid recursion, and will unroll processing of runs of length 2,
325 * but it is otherwise similar to the recursive implementation.
957d8989
JL
326 */
327
328typedef struct {
7ea738a9
TK
329 IV offset; /* offset of 1st of 2 runs at this level */
330 IV runs; /* how many runs must be combined into 1 */
331} off_runs; /* pseudo-stack element */
957d8989 332
044d25c7
TK
333PERL_STATIC_FORCE_INLINE void
334S_sortsv_flags_impl(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
957d8989 335{
551405c4 336 IV i, run, offset;
957d8989 337 I32 sense, level;
eb578fdb 338 gptr *f1, *f2, *t, *b, *p;
957d8989 339 int iwhich;
551405c4 340 gptr *aux;
957d8989
JL
341 gptr *p1;
342 gptr small[SMALLSORT];
343 gptr *which[3];
344 off_runs stack[60], *stackp;
345
aa4119bb 346 PERL_UNUSED_ARG(flags);
044d25c7 347 PERL_ARGS_ASSERT_SORTSV_FLAGS_IMPL;
7ea738a9 348 if (nmemb <= 1) return; /* sorted trivially */
6c3fb703 349
7ea738a9
TK
350 if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */
351 else { Newx(aux,nmemb,gptr); } /* allocate auxiliary array */
957d8989
JL
352 level = 0;
353 stackp = stack;
354 stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp);
355 stackp->offset = offset = 0;
356 which[0] = which[2] = base;
357 which[1] = aux;
358 for (;;) {
7ea738a9
TK
359 /* On levels where both runs have be constructed (stackp->runs == 0),
360 * merge them, and note the offset of their end, in case the offset
361 * is needed at the next level up. Hop up a level, and,
362 * as long as stackp->runs is 0, keep merging.
363 */
364 IV runs = stackp->runs;
365 if (runs == 0) {
366 gptr *list1, *list2;
367 iwhich = level & 1;
368 list1 = which[iwhich]; /* area where runs are now */
369 list2 = which[++iwhich]; /* area for merged runs */
370 do {
371 gptr *l1, *l2, *tp2;
372 offset = stackp->offset;
373 f1 = p1 = list1 + offset; /* start of first run */
374 p = tp2 = list2 + offset; /* where merged run will go */
375 t = NEXT(p); /* where first run ends */
376 f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */
377 t = NEXT(t); /* where second runs ends */
378 l2 = POTHER(t, list2, list1); /* ... on the other side */
379 offset = PNELEM(list2, t);
380 while (f1 < l1 && f2 < l2) {
381 /* If head 1 is larger than head 2, find ALL the elements
382 ** in list 2 strictly less than head1, write them all,
383 ** then head 1. Then compare the new heads, and repeat,
384 ** until one or both lists are exhausted.
385 **
386 ** In all comparisons (after establishing
387 ** which head to merge) the item to merge
388 ** (at pointer q) is the first operand of
389 ** the comparison. When we want to know
390 ** if "q is strictly less than the other",
391 ** we can't just do
392 ** cmp(q, other) < 0
393 ** because stability demands that we treat equality
394 ** as high when q comes from l2, and as low when
395 ** q was from l1. So we ask the question by doing
396 ** cmp(q, other) <= sense
397 ** and make sense == 0 when equality should look low,
398 ** and -1 when equality should look high.
399 */
400
401 gptr *q;
402 if (cmp(aTHX_ *f1, *f2) <= 0) {
403 q = f2; b = f1; t = l1;
404 sense = -1;
405 } else {
406 q = f1; b = f2; t = l2;
407 sense = 0;
408 }
409
410
411 /* ramp up
412 **
413 ** Leave t at something strictly
414 ** greater than q (or at the end of the list),
415 ** and b at something strictly less than q.
416 */
417 for (i = 1, run = 0 ;;) {
418 if ((p = PINDEX(b, i)) >= t) {
419 /* off the end */
420 if (((p = PINDEX(t, -1)) > b) &&
421 (cmp(aTHX_ *q, *p) <= sense))
422 t = p;
423 else b = p;
424 break;
425 } else if (cmp(aTHX_ *q, *p) <= sense) {
426 t = p;
427 break;
428 } else b = p;
429 if (++run >= RTHRESH) i += i;
430 }
431
432
433 /* q is known to follow b and must be inserted before t.
434 ** Increment b, so the range of possibilities is [b,t).
435 ** Round binary split down, to favor early appearance.
436 ** Adjust b and t until q belongs just before t.
437 */
438
439 b++;
440 while (b < t) {
441 p = PINDEX(b, (PNELEM(b, t) - 1) / 2);
442 if (cmp(aTHX_ *q, *p) <= sense) {
443 t = p;
444 } else b = p + 1;
445 }
446
447
448 /* Copy all the strictly low elements */
449
450 if (q == f1) {
451 FROMTOUPTO(f2, tp2, t);
452 *tp2++ = *f1++;
453 } else {
454 FROMTOUPTO(f1, tp2, t);
455 *tp2++ = *f2++;
456 }
457 }
458
459
460 /* Run out remaining list */
461 if (f1 == l1) {
462 if (f2 < l2) FROMTOUPTO(f2, tp2, l2);
463 } else FROMTOUPTO(f1, tp2, l1);
464 p1 = NEXT(p1) = POTHER(tp2, list2, list1);
465
466 if (--level == 0) goto done;
467 --stackp;
468 t = list1; list1 = list2; list2 = t; /* swap lists */
469 } while ((runs = stackp->runs) == 0);
470 }
471
472
473 stackp->runs = 0; /* current run will finish level */
474 /* While there are more than 2 runs remaining,
475 * turn them into exactly 2 runs (at the "other" level),
476 * each made up of approximately half the runs.
477 * Stack the second half for later processing,
478 * and set about producing the first half now.
479 */
480 while (runs > 2) {
481 ++level;
482 ++stackp;
483 stackp->offset = offset;
484 runs -= stackp->runs = runs / 2;
485 }
486 /* We must construct a single run from 1 or 2 runs.
487 * All the original runs are in which[0] == base.
488 * The run we construct must end up in which[level&1].
489 */
490 iwhich = level & 1;
491 if (runs == 1) {
492 /* Constructing a single run from a single run.
493 * If it's where it belongs already, there's nothing to do.
494 * Otherwise, copy it to where it belongs.
495 * A run of 1 is either a singleton at level 0,
496 * or the second half of a split 3. In neither event
497 * is it necessary to set offset. It will be set by the merge
498 * that immediately follows.
499 */
500 if (iwhich) { /* Belongs in aux, currently in base */
501 f1 = b = PINDEX(base, offset); /* where list starts */
502 f2 = PINDEX(aux, offset); /* where list goes */
503 t = NEXT(f2); /* where list will end */
504 offset = PNELEM(aux, t); /* offset thereof */
505 t = PINDEX(base, offset); /* where it currently ends */
506 FROMTOUPTO(f1, f2, t); /* copy */
507 NEXT(b) = t; /* set up parallel pointer */
508 } else if (level == 0) goto done; /* single run at level 0 */
509 } else {
510 /* Constructing a single run from two runs.
511 * The merge code at the top will do that.
512 * We need only make sure the two runs are in the "other" array,
513 * so they'll end up in the correct array after the merge.
514 */
515 ++level;
516 ++stackp;
517 stackp->offset = offset;
518 stackp->runs = 0; /* take care of both runs, trigger merge */
519 if (!iwhich) { /* Merged runs belong in aux, copy 1st */
520 f1 = b = PINDEX(base, offset); /* where first run starts */
521 f2 = PINDEX(aux, offset); /* where it will be copied */
522 t = NEXT(f2); /* where first run will end */
523 offset = PNELEM(aux, t); /* offset thereof */
524 p = PINDEX(base, offset); /* end of first run */
525 t = NEXT(t); /* where second run will end */
526 t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */
527 FROMTOUPTO(f1, f2, t); /* copy both runs */
528 NEXT(b) = p; /* paralleled pointer for 1st */
529 NEXT(p) = t; /* ... and for second */
530 }
531 }
957d8989 532 }
7b52d656 533 done:
7ea738a9 534 if (aux != small) Safefree(aux); /* free iff allocated */
044d25c7 535
957d8989
JL
536 return;
537}
538
84d4ea48 539/*
044d25c7
TK
540=for apidoc sortsv_flags
541
542In-place sort an array of SV pointers with the given comparison routine,
543with various SORTf_* flag options.
544
545=cut
546*/
547void
548Perl_sortsv_flags(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
549{
550 PERL_ARGS_ASSERT_SORTSV_FLAGS;
551
552 sortsv_flags_impl(base, nmemb, cmp, flags);
553}
554
555/*
556 * Each of sortsv_* functions contains an inlined copy of
557 * sortsv_flags_impl() with an inlined comparator. Basically, we are
558 * emulating C++ templates by using __attribute__((always_inline)).
559 *
560 * The purpose of that is to avoid the function call overhead inside
561 * the sorting routine, which calls the comparison function multiple
562 * times per sorted item.
563 */
564
565static void
566sortsv_amagic_i_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
567{
568 sortsv_flags_impl(base, nmemb, S_amagic_i_ncmp, flags);
569}
570
571static void
572sortsv_amagic_i_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
573{
574 sortsv_flags_impl(base, nmemb, S_amagic_i_ncmp_desc, flags);
575}
576
577static void
578sortsv_i_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
579{
580 sortsv_flags_impl(base, nmemb, S_sv_i_ncmp, flags);
581}
582
583static void
584sortsv_i_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
585{
586 sortsv_flags_impl(base, nmemb, S_sv_i_ncmp_desc, flags);
587}
588
589static void
590sortsv_amagic_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
591{
592 sortsv_flags_impl(base, nmemb, S_amagic_ncmp, flags);
593}
594
595static void
596sortsv_amagic_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
597{
598 sortsv_flags_impl(base, nmemb, S_amagic_ncmp_desc, flags);
599}
600
601static void
602sortsv_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
603{
604 sortsv_flags_impl(base, nmemb, S_sv_ncmp, flags);
605}
606
607static void
608sortsv_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
609{
610 sortsv_flags_impl(base, nmemb, S_sv_ncmp_desc, flags);
611}
612
613static void
614sortsv_amagic_cmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
615{
616 sortsv_flags_impl(base, nmemb, S_amagic_cmp, flags);
617}
618
619static void
620sortsv_amagic_cmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
621{
622 sortsv_flags_impl(base, nmemb, S_amagic_cmp_desc, flags);
623}
624
625static void
626sortsv_cmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
627{
628 sortsv_flags_impl(base, nmemb, Perl_sv_cmp, flags);
629}
630
631static void
632sortsv_cmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
633{
634 sortsv_flags_impl(base, nmemb, S_cmp_desc, flags);
635}
636
637#ifdef USE_LOCALE_COLLATE
638
639static void
640sortsv_amagic_cmp_locale(pTHX_ gptr *base, size_t nmemb, U32 flags)
641{
642 sortsv_flags_impl(base, nmemb, S_amagic_cmp_locale, flags);
643}
644
645static void
646sortsv_amagic_cmp_locale_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
647{
648 sortsv_flags_impl(base, nmemb, S_amagic_cmp_locale_desc, flags);
649}
650
651static void
652sortsv_cmp_locale(pTHX_ gptr *base, size_t nmemb, U32 flags)
653{
654 sortsv_flags_impl(base, nmemb, Perl_sv_cmp_locale, flags);
655}
656
657static void
658sortsv_cmp_locale_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
659{
660 sortsv_flags_impl(base, nmemb, S_cmp_locale_desc, flags);
661}
662
663#endif
664
665/*
ccfc67b7 666
84d4ea48
JH
667=for apidoc sortsv
668
8f5d5a51 669In-place sort an array of SV pointers with the given comparison routine.
84d4ea48 670
796b6530 671Currently this always uses mergesort. See C<L</sortsv_flags>> for a more
7b9ef140 672flexible routine.
78210658 673
84d4ea48
JH
674=cut
675*/
4eb872f6 676
84d4ea48
JH
677void
678Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp)
679{
7918f24d
NC
680 PERL_ARGS_ASSERT_SORTSV;
681
7b9ef140 682 sortsv_flags(array, nmemb, cmp, 0);
6c3fb703
NC
683}
684
4d562308
SF
685#define SvNSIOK(sv) ((SvFLAGS(sv) & SVf_NOK) || ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK))
686#define SvSIOK(sv) ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK)
687#define SvNSIV(sv) ( SvNOK(sv) ? SvNVX(sv) : ( SvSIOK(sv) ? SvIVX(sv) : sv_2nv(sv) ) )
688
84d4ea48
JH
689PP(pp_sort)
690{
49799453 691 dMARK; dORIGMARK;
eb578fdb 692 SV **p1 = ORIGMARK+1, **p2;
c70927a6 693 SSize_t max, i;
7d49f689 694 AV* av = NULL;
84d4ea48 695 GV *gv;
cbbf8932 696 CV *cv = NULL;
1c23e2bd 697 U8 gimme = GIMME_V;
0bcc34c2 698 OP* const nextop = PL_op->op_next;
84d4ea48 699 I32 overloading = 0;
2c204b7a 700 bool hasargs = FALSE; /* the sort sub has proto($$)? */
2b66f6d3 701 bool copytmps;
84d4ea48 702 I32 is_xsub = 0;
901017d6
AL
703 const U8 priv = PL_op->op_private;
704 const U8 flags = PL_op->op_flags;
7b9ef140 705 U32 sort_flags = 0;
044d25c7 706 I32 all_SIVs = 1, descending = 0;
84d4ea48 707
7b9ef140 708 if ((priv & OPpSORT_DESCEND) != 0)
044d25c7 709 descending = 1;
7b9ef140 710
eb7e169e 711 if (gimme != G_LIST) {
49799453
DM
712 rpp_popfree_to(mark);
713 rpp_xpush_1(&PL_sv_undef);
714 return NORMAL;
84d4ea48
JH
715 }
716
717 ENTER;
718 SAVEVPTR(PL_sortcop);
43442f48
DM
719
720 /* Important flag meanings:
721 *
722 * OPf_STACKED sort <function_name> args
723 *
724 * (OPf_STACKED
725 * |OPf_SPECIAL) sort { <block> } args
726 *
727 * ---- standard block; e.g. sort { $a <=> $b } args
728 *
729 *
730 * OPpSORT_NUMERIC { $a <=> $b } (as opposed to $a cmp $b)
731 * OPpSORT_INTEGER ditto in scope of 'use integer'
732 * OPpSORT_DESCEND { $b <=> $a }
733 * OPpSORT_REVERSE @a= reverse sort ....;
734 * OPpSORT_INPLACE @a = sort @a;
735 */
736
471178c0 737 if (flags & OPf_STACKED) {
7ea738a9 738 if (flags & OPf_SPECIAL) {
e6dae479 739 OP *nullop = OpSIBLING(cLISTOP->op_first); /* pass pushmark */
932bca29 740 assert(nullop->op_type == OP_NULL);
7ea738a9
TK
741 PL_sortcop = nullop->op_next;
742 }
743 else {
49799453 744 /* sort <function_name> list */
7ea738a9
TK
745 GV *autogv = NULL;
746 HV *stash;
49799453
DM
747 SV *fn = *++MARK;
748 cv = sv_2cv(fn, &stash, &gv, GV_ADD);
749
750 /* want to remove the function name from the stack,
751 * but mustn't trigger cv being freed at the same time.
752 * Normally the name is a PV while cv is CV (duh!) but
753 * for lexical subs, fn can already be the CV (but is kept
754 * alive by a reference from the pad */
9a500794 755#ifdef PERL_RC_STACK
49799453
DM
756 assert(fn != (SV*)cv || SvREFCNT(fn) > 1);
757 SvREFCNT_dec(fn);
758#endif
759 *MARK = NULL;
760
7ea738a9
TK
761 check_cv:
762 if (cv && SvPOK(cv)) {
763 const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv));
764 if (proto && strEQ(proto, "$$")) {
765 hasargs = TRUE;
766 }
767 }
768 if (cv && CvISXSUB(cv) && CvXSUB(cv)) {
769 is_xsub = 1;
770 }
771 else if (!(cv && CvROOT(cv))) {
772 if (gv) {
773 goto autoload;
774 }
775 else if (!CvANON(cv) && (gv = CvGV(cv))) {
776 if (cv != GvCV(gv)) cv = GvCV(gv);
777 autoload:
778 if (!autogv && (
779 autogv = gv_autoload_pvn(
780 GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv),
781 GvNAMEUTF8(gv) ? SVf_UTF8 : 0
782 )
783 )) {
784 cv = GvCVu(autogv);
785 goto check_cv;
786 }
787 else {
788 SV *tmpstr = sv_newmortal();
789 gv_efullname3(tmpstr, gv, NULL);
790 DIE(aTHX_ "Undefined sort subroutine \"%" SVf "\" called",
791 SVfARG(tmpstr));
792 }
793 }
794 else {
795 DIE(aTHX_ "Undefined subroutine in sort");
796 }
797 }
798
799 if (is_xsub)
800 PL_sortcop = (OP*)cv;
801 else
802 PL_sortcop = CvSTART(cv);
803 }
84d4ea48
JH
804 }
805 else {
7ea738a9 806 PL_sortcop = NULL;
84d4ea48
JH
807 }
808
84721d61
DM
809 /* optimiser converts "@a = sort @a" to "sort \@a". In this case,
810 * push (@a) onto stack, then assign result back to @a at the end of
811 * this function */
0723351e 812 if (priv & OPpSORT_INPLACE) {
49799453
DM
813 assert( MARK+1 == PL_stack_sp
814 && *PL_stack_sp
815 && SvTYPE(*PL_stack_sp) == SVt_PVAV);
7ea738a9 816 (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */
49799453 817 av = MUTABLE_AV((*PL_stack_sp));
84721d61
DM
818 if (SvREADONLY(av))
819 Perl_croak_no_modify();
7ea738a9 820 max = AvFILL(av) + 1;
49799453
DM
821
822 I32 oldmark = MARK - PL_stack_base;
823 rpp_extend(max);
824 MARK = PL_stack_base + oldmark;
825
7ea738a9
TK
826 if (SvMAGICAL(av)) {
827 for (i=0; i < max; i++) {
828 SV **svp = av_fetch(av, i, FALSE);
49799453
DM
829 SV *sv;
830 if (svp) {
831 sv = *svp;
9a500794 832#ifdef PERL_RC_STACK
49799453
DM
833 SvREFCNT_inc_simple_void_NN(sv);
834#endif
835 }
836 else
837 sv = NULL;
838 *++PL_stack_sp = sv;
7ea738a9
TK
839 }
840 }
84721d61
DM
841 else {
842 SV **svp = AvARRAY(av);
843 assert(svp || max == 0);
49799453
DM
844 for (i = 0; i < max; i++) {
845 SV *sv = *svp++;
9a500794 846#ifdef PERL_RC_STACK
49799453
DM
847 SvREFCNT_inc_simple_void(sv);
848#endif
849 *++PL_stack_sp = sv;
850 }
7ea738a9 851 }
49799453
DM
852 p1 = p2 = PL_stack_sp - (max-1);
853 /* we've kept av on the stacck (just below the pushed contents) so
854 * that a reference-counted stack keeps a reference to it for now
855 */
856 assert((SV*)av == p1[-1]);
fe1bc4cf
DM
857 }
858 else {
7ea738a9 859 p2 = MARK+1;
49799453 860 max = PL_stack_sp - MARK;
7ea738a9 861 }
fe1bc4cf 862
83a44efe
SF
863 /* shuffle stack down, removing optional initial cv (p1!=p2), plus
864 * any nulls; also stringify or converting to integer or number as
865 * required any args */
49799453
DM
866
867 /* no ref-counted SVs at base to be overwritten */
868 assert(p1 == p2 || (p1+1 == p2 && !*p1));
869
ff859a7f 870 copytmps = cBOOL(PL_sortcop);
fe1bc4cf 871 for (i=max; i > 0 ; i--) {
49799453
DM
872 SV *sv = *p2++;
873 if (sv) { /* Weed out nulls. */
874 if (copytmps && SvPADTMP(sv)) {
875 SV *nsv = sv_mortalcopy(sv);
9a500794 876#ifdef PERL_RC_STACK
49799453
DM
877 SvREFCNT_dec_NN(sv);
878 SvREFCNT_inc_simple_void_NN(nsv);
879#endif
880 sv = nsv;
7ea738a9 881 }
49799453 882 SvTEMP_off(sv);
7ea738a9
TK
883 if (!PL_sortcop) {
884 if (priv & OPpSORT_NUMERIC) {
885 if (priv & OPpSORT_INTEGER) {
49799453
DM
886 if (!SvIOK(sv))
887 (void)sv_2iv_flags(sv, SV_GMAGIC|SV_SKIP_OVERLOAD);
7ea738a9
TK
888 }
889 else {
49799453
DM
890 if (!SvNSIOK(sv))
891 (void)sv_2nv_flags(sv, SV_GMAGIC|SV_SKIP_OVERLOAD);
892 if (all_SIVs && !SvSIOK(sv))
7ea738a9
TK
893 all_SIVs = 0;
894 }
895 }
896 else {
49799453
DM
897 if (!SvPOK(sv))
898 (void)sv_2pv_flags(sv, 0,
7ea738a9
TK
899 SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD);
900 }
49799453 901 if (SvAMAGIC(sv))
7ea738a9 902 overloading = 1;
60779a30 903 }
49799453 904 *p1++ = sv;
7ea738a9
TK
905 }
906 else
907 max--;
84d4ea48 908 }
49799453 909
fe1bc4cf 910 if (max > 1) {
7ea738a9
TK
911 SV **start;
912 if (PL_sortcop) {
913 PERL_CONTEXT *cx;
914 const bool oldcatch = CATCH_GET;
8ae997c5 915 I32 old_savestack_ix = PL_savestack_ix;
84d4ea48 916
7ea738a9
TK
917 SAVEOP();
918
919 CATCH_SET(TRUE);
ba1af285 920 push_stackinfo(PERLSI_SORT, 1);
49799453 921
7ea738a9 922 if (!hasargs && !is_xsub) {
49799453 923 /* standard perl sub with values passed as $a and $b */
7ea738a9
TK
924 SAVEGENERICSV(PL_firstgv);
925 SAVEGENERICSV(PL_secondgv);
926 PL_firstgv = MUTABLE_GV(SvREFCNT_inc(
927 gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV)
928 ));
929 PL_secondgv = MUTABLE_GV(SvREFCNT_inc(
930 gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV)
931 ));
dc9ef998
TC
932 /* make sure the GP isn't removed out from under us for
933 * the SAVESPTR() */
934 save_gp(PL_firstgv, 0);
935 save_gp(PL_secondgv, 0);
936 /* we don't want modifications localized */
937 GvINTRO_off(PL_firstgv);
938 GvINTRO_off(PL_secondgv);
7ea738a9
TK
939 SAVEGENERICSV(GvSV(PL_firstgv));
940 SvREFCNT_inc(GvSV(PL_firstgv));
941 SAVEGENERICSV(GvSV(PL_secondgv));
942 SvREFCNT_inc(GvSV(PL_secondgv));
943 }
84d4ea48 944
33411212 945 gimme = G_SCALAR;
7ea738a9
TK
946 cx = cx_pushblock(CXt_NULL, gimme, PL_stack_base, old_savestack_ix);
947 if (!(flags & OPf_SPECIAL)) {
948 cx->cx_type = CXt_SUB|CXp_MULTICALL;
949 cx_pushsub(cx, cv, NULL, hasargs);
950 if (!is_xsub) {
951 PADLIST * const padlist = CvPADLIST(cv);
952
953 if (++CvDEPTH(cv) >= 2)
954 pad_push(padlist, CvDEPTH(cv));
955 PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv));
956
957 if (hasargs) {
958 /* This is mostly copied from pp_entersub */
dc75098e 959 AV * const av0 = MUTABLE_AV(PAD_SVl(0));
7ea738a9
TK
960
961 cx->blk_sub.savearray = GvAV(PL_defgv);
dc75098e 962 GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av0));
7ea738a9
TK
963 }
964
965 }
966 }
486430a5 967
7ea738a9 968 start = p1 - max;
3edfb5c3 969 Perl_sortsv_flags(aTHX_ start, max,
7ea738a9
TK
970 (is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv),
971 sort_flags);
84d4ea48 972
4df352a8 973 /* Reset cx, in case the context stack has been reallocated. */
4ebe6e95 974 cx = CX_CUR();
4df352a8 975
49799453
DM
976 /* the code used to think this could be > 0 */
977 assert(cx->blk_oldsp == 0);
978
979 rpp_popfree_to(PL_stack_base);
4df352a8 980
2f450c1b 981 CX_LEAVE_SCOPE(cx);
7ea738a9 982 if (!(flags & OPf_SPECIAL)) {
4df352a8 983 assert(CxTYPE(cx) == CXt_SUB);
a73d8813 984 cx_popsub(cx);
7ea738a9 985 }
2f450c1b 986 else
4df352a8 987 assert(CxTYPE(cx) == CXt_NULL);
2f450c1b 988 /* there isn't a POPNULL ! */
1dfbe6b4 989
7ea738a9 990 cx_popblock(cx);
5da525e9 991 CX_POP(cx);
026c6b9c 992 pop_stackinfo();
7ea738a9
TK
993 CATCH_SET(oldcatch);
994 }
995 else {
49799453
DM
996 /* call one of the built-in sort functions */
997
998 /* XXX this extend has been here since perl5.000. With safe
999 * signals, I don't think it's needed any more - DAPM.
1000 MEXTEND(SP, 20); Can't afford stack realloc on signal.
1001 */
1002 start = p1 - max;
433b3e2b
TK
1003 if (priv & OPpSORT_NUMERIC) {
1004 if ((priv & OPpSORT_INTEGER) || all_SIVs) {
1005 if (overloading)
044d25c7
TK
1006 if (descending)
1007 sortsv_amagic_i_ncmp_desc(aTHX_ start, max, sort_flags);
1008 else
1009 sortsv_amagic_i_ncmp(aTHX_ start, max, sort_flags);
433b3e2b 1010 else
044d25c7
TK
1011 if (descending)
1012 sortsv_i_ncmp_desc(aTHX_ start, max, sort_flags);
1013 else
1014 sortsv_i_ncmp(aTHX_ start, max, sort_flags);
433b3e2b
TK
1015 }
1016 else {
1017 if (overloading)
044d25c7
TK
1018 if (descending)
1019 sortsv_amagic_ncmp_desc(aTHX_ start, max, sort_flags);
1020 else
1021 sortsv_amagic_ncmp(aTHX_ start, max, sort_flags);
433b3e2b 1022 else
044d25c7
TK
1023 if (descending)
1024 sortsv_ncmp_desc(aTHX_ start, max, sort_flags);
1025 else
1026 sortsv_ncmp(aTHX_ start, max, sort_flags);
433b3e2b
TK
1027 }
1028 }
130c5df3 1029#ifdef USE_LOCALE_COLLATE
433b3e2b
TK
1030 else if(IN_LC_RUNTIME(LC_COLLATE)) {
1031 if (overloading)
044d25c7
TK
1032 if (descending)
1033 sortsv_amagic_cmp_locale_desc(aTHX_ start, max, sort_flags);
1034 else
1035 sortsv_amagic_cmp_locale(aTHX_ start, max, sort_flags);
433b3e2b 1036 else
044d25c7
TK
1037 if (descending)
1038 sortsv_cmp_locale_desc(aTHX_ start, max, sort_flags);
1039 else
1040 sortsv_cmp_locale(aTHX_ start, max, sort_flags);
433b3e2b 1041 }
130c5df3 1042#endif
433b3e2b
TK
1043 else {
1044 if (overloading)
044d25c7
TK
1045 if (descending)
1046 sortsv_amagic_cmp_desc(aTHX_ start, max, sort_flags);
1047 else
1048 sortsv_amagic_cmp(aTHX_ start, max, sort_flags);
433b3e2b 1049 else
044d25c7
TK
1050 if (descending)
1051 sortsv_cmp_desc(aTHX_ start, max, sort_flags);
1052 else
1053 sortsv_cmp(aTHX_ start, max, sort_flags);
433b3e2b 1054 }
7ea738a9
TK
1055 }
1056 if ((priv & OPpSORT_REVERSE) != 0) {
1057 SV **q = start+max-1;
1058 while (start < q) {
1059 SV * const tmp = *start;
1060 *start++ = *q;
1061 *q-- = tmp;
1062 }
1063 }
84d4ea48 1064 }
84721d61 1065
49799453
DM
1066 if (!av) {
1067 LEAVE;
1068 PL_stack_sp = ORIGMARK + max;
1069 return nextop;
1070 }
1071
1072 /* OPpSORT_INPLACE: copy back result to the array */
1073 {
1074 SV** const base = MARK+2;
99c9ca9e 1075 SSize_t max_minus_one = max - 1; /* attempt to work around mingw bug */
49799453
DM
1076
1077 /* we left the AV there so on a refcounted stack it wouldn't be
1078 * prematurely freed */
1079 assert(base[-1] == (SV*)av);
1080
84721d61 1081 if (SvMAGICAL(av)) {
49799453
DM
1082 for (i = 0; i <= max_minus_one; i++) {
1083 SV *sv = base[i];
1084 base[i] = newSVsv(sv);
9a500794 1085#ifdef PERL_RC_STACK
49799453
DM
1086 SvREFCNT_dec_NN(sv);
1087#endif
1088 }
84721d61 1089 av_clear(av);
99c9ca9e
YO
1090 if (max_minus_one >= 0)
1091 av_extend(av, max_minus_one);
1092 for (i=0; i <= max_minus_one; i++) {
84721d61
DM
1093 SV * const sv = base[i];
1094 SV ** const didstore = av_store(av, i, sv);
1095 if (SvSMAGICAL(sv))
1096 mg_set(sv);
9a500794 1097#ifdef PERL_RC_STACK
49799453
DM
1098 if (didstore)
1099 SvREFCNT_inc_simple_void_NN(sv);
1100#else
84721d61
DM
1101 if (!didstore)
1102 sv_2mortal(sv);
49799453 1103#endif
84721d61
DM
1104 }
1105 }
1106 else {
1107 /* the elements of av are likely to be the same as the
1108 * (non-refcounted) elements on the stack, just in a different
1109 * order. However, its possible that someone's messed with av
49799453
DM
1110 * in the meantime.
1111 * So to avoid freeing most/all the stack elements when
1112 * doing av_clear(), first bump the count on each element.
1113 * In addition, normally a *copy* of each sv should be
1114 * assigned to each array element; but if the only reference
1115 * to that sv was from the array, then we can skip the copy.
1116 *
1117 * For a refcounted stack, it's not necessary to bump the
1118 * refcounts initially, as the stack itself keeps the
1119 * elements alive during av_clear().
1120 *
84721d61 1121 */
99c9ca9e 1122 for (i = 0; i <= max_minus_one; i++) {
45c198c1
DM
1123 SV *sv = base[i];
1124 assert(sv);
9a500794 1125#ifdef PERL_RC_STACK
49799453
DM
1126 if (SvREFCNT(sv) > 2) {
1127 base[i] = newSVsv(sv);
1128 SvREFCNT_dec_NN(sv);
1129 }
1130#else
45c198c1
DM
1131 if (SvREFCNT(sv) > 1)
1132 base[i] = newSVsv(sv);
1133 else
1134 SvREFCNT_inc_simple_void_NN(sv);
49799453 1135#endif
45c198c1 1136 }
84721d61 1137 av_clear(av);
99c9ca9e
YO
1138 if (max_minus_one >= 0) {
1139 av_extend(av, max_minus_one);
84721d61
DM
1140 Copy(base, AvARRAY(av), max, SV*);
1141 }
99c9ca9e 1142 AvFILLp(av) = max_minus_one;
84721d61
DM
1143 AvREIFY_off(av);
1144 AvREAL_on(av);
9a500794 1145#ifdef PERL_RC_STACK
49799453
DM
1146 /* the AV now contributes 1 refcnt to each element */
1147 for (i = 0; i <= max_minus_one; i++)
1148 SvREFCNT_inc_void_NN(base[i]);
1149#endif
84721d61 1150 }
49799453
DM
1151 /* sort is only ever optimised with OPpSORT_INPLACE when the
1152 * (@a = sort @a) is in void context. (As an aside: the context
1153 * flag aught to be copied to the sort op: then we could assert
1154 * here that it's void).
1155 * Thus we can simply discard the stack elements now: their
1156 * reference counts have already claimed by av.
1157 */
1158 PL_stack_sp = ORIGMARK;
9a500794 1159#ifdef PERL_RC_STACK
49799453
DM
1160 SvREFCNT_dec_NN(av);
1161#endif
1162 LEAVE;
1163 return nextop;
fe1bc4cf 1164 }
84d4ea48
JH
1165}
1166
bdd09b42
DM
1167
1168/* call a traditional perl compare function, setting $a and $b */
1169
84d4ea48 1170static I32
31e9e0a3 1171S_sortcv(pTHX_ SV *const a, SV *const b)
84d4ea48 1172{
901017d6 1173 const I32 oldsaveix = PL_savestack_ix;
84d4ea48 1174 I32 result;
ad021bfb 1175 PMOP * const pm = PL_curpm;
a9ea019a 1176 COP * const cop = PL_curcop;
16ada235 1177 SV *olda, *oldb;
7918f24d
NC
1178
1179 PERL_ARGS_ASSERT_SORTCV;
1180
9a500794 1181#ifdef PERL_RC_STACK
bdd09b42
DM
1182 assert(rpp_stack_is_rc());
1183#endif
1184
16ada235
Z
1185 olda = GvSV(PL_firstgv);
1186 GvSV(PL_firstgv) = SvREFCNT_inc_simple_NN(a);
1187 SvREFCNT_dec(olda);
1188 oldb = GvSV(PL_secondgv);
1189 GvSV(PL_secondgv) = SvREFCNT_inc_simple_NN(b);
1190 SvREFCNT_dec(oldb);
bdd09b42 1191 assert(PL_stack_sp == PL_stack_base);
84d4ea48
JH
1192 PL_op = PL_sortcop;
1193 CALLRUNOPS(aTHX);
a9ea019a 1194 PL_curcop = cop;
33411212
DM
1195 /* entry zero of a stack is always PL_sv_undef, which
1196 * simplifies converting a '()' return into undef in scalar context */
1197 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1198 result = SvIV(*PL_stack_sp);
bdd09b42 1199 rpp_popfree_to(PL_stack_base);
626ed49c 1200
53d3542d 1201 LEAVE_SCOPE(oldsaveix);
ad021bfb 1202 PL_curpm = pm;
84d4ea48
JH
1203 return result;
1204}
1205
bdd09b42
DM
1206
1207/* call a perl compare function that has a ($$) prototype, setting @_ */
1208
84d4ea48 1209static I32
31e9e0a3 1210S_sortcv_stacked(pTHX_ SV *const a, SV *const b)
84d4ea48 1211{
901017d6 1212 const I32 oldsaveix = PL_savestack_ix;
84d4ea48 1213 I32 result;
901017d6 1214 AV * const av = GvAV(PL_defgv);
ad021bfb 1215 PMOP * const pm = PL_curpm;
a9ea019a 1216 COP * const cop = PL_curcop;
84d4ea48 1217
7918f24d
NC
1218 PERL_ARGS_ASSERT_SORTCV_STACKED;
1219
9a500794 1220#ifdef PERL_RC_STACK
bdd09b42
DM
1221 assert(rpp_stack_is_rc());
1222#endif
1223
2c204b7a
DM
1224#ifdef PERL_RC_STACK
1225 assert(AvREAL(av));
1226 av_clear(av);
1227#else
8f443ca6 1228 if (AvREAL(av)) {
7ea738a9
TK
1229 av_clear(av);
1230 AvREAL_off(av);
1231 AvREIFY_on(av);
8f443ca6 1232 }
2c204b7a
DM
1233#endif
1234
84d4ea48 1235 if (AvMAX(av) < 1) {
7ea738a9
TK
1236 SV **ary = AvALLOC(av);
1237 if (AvARRAY(av) != ary) {
1238 AvMAX(av) += AvARRAY(av) - AvALLOC(av);
1239 AvARRAY(av) = ary;
1240 }
1241 if (AvMAX(av) < 1) {
1242 Renew(ary,2,SV*);
1243 AvMAX(av) = 1;
1244 AvARRAY(av) = ary;
1245 AvALLOC(av) = ary;
1246 }
84d4ea48
JH
1247 }
1248 AvFILLp(av) = 1;
1249
1250 AvARRAY(av)[0] = a;
1251 AvARRAY(av)[1] = b;
2c204b7a
DM
1252#ifdef PERL_RC_STACK
1253 SvREFCNT_inc_simple_void_NN(a);
1254 SvREFCNT_inc_simple_void_NN(b);
1255#endif
bdd09b42 1256 assert(PL_stack_sp == PL_stack_base);
84d4ea48
JH
1257 PL_op = PL_sortcop;
1258 CALLRUNOPS(aTHX);
a9ea019a 1259 PL_curcop = cop;
33411212
DM
1260 /* entry zero of a stack is always PL_sv_undef, which
1261 * simplifies converting a '()' return into undef in scalar context */
1262 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1263 result = SvIV(*PL_stack_sp);
bdd09b42 1264 rpp_popfree_to(PL_stack_base);
626ed49c 1265
53d3542d 1266 LEAVE_SCOPE(oldsaveix);
ad021bfb 1267 PL_curpm = pm;
84d4ea48
JH
1268 return result;
1269}
1270
bdd09b42
DM
1271
1272/* call an XS compare function. (The two args are always passed on the
1273 * stack, regardless of whether it has a ($$) prototype or not.) */
1274
84d4ea48 1275static I32
31e9e0a3 1276S_sortcv_xsub(pTHX_ SV *const a, SV *const b)
84d4ea48 1277{
901017d6 1278 const I32 oldsaveix = PL_savestack_ix;
ea726b52 1279 CV * const cv=MUTABLE_CV(PL_sortcop);
84d4ea48 1280 I32 result;
ad021bfb 1281 PMOP * const pm = PL_curpm;
84d4ea48 1282
7918f24d
NC
1283 PERL_ARGS_ASSERT_SORTCV_XSUB;
1284
9a500794 1285#ifdef PERL_RC_STACK
bdd09b42
DM
1286 assert(rpp_stack_is_rc());
1287#endif
1288
1289 assert(PL_stack_sp == PL_stack_base);
1290 PUSHMARK(PL_stack_sp);
1291 rpp_xpush_2(a, b);
1292
9d5d54aa 1293 rpp_invoke_xs(cv);
bdd09b42 1294
33411212
DM
1295 /* entry zero of a stack is always PL_sv_undef, which
1296 * simplifies converting a '()' return into undef in scalar context */
1297 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
84d4ea48 1298 result = SvIV(*PL_stack_sp);
bdd09b42 1299 rpp_popfree_to(PL_stack_base);
33411212 1300
53d3542d 1301 LEAVE_SCOPE(oldsaveix);
ad021bfb 1302 PL_curpm = pm;
84d4ea48
JH
1303 return result;
1304}
1305
1306
044d25c7 1307PERL_STATIC_FORCE_INLINE I32
31e9e0a3 1308S_sv_ncmp(pTHX_ SV *const a, SV *const b)
84d4ea48 1309{
427fbfe8 1310 I32 cmp = do_ncmp(a, b);
7918f24d
NC
1311
1312 PERL_ARGS_ASSERT_SV_NCMP;
1313
427fbfe8 1314 if (cmp == 2) {
7ea738a9
TK
1315 if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL);
1316 return 0;
f3dab52a 1317 }
427fbfe8
TC
1318
1319 return cmp;
84d4ea48
JH
1320}
1321
044d25c7
TK
1322PERL_STATIC_FORCE_INLINE I32
1323S_sv_ncmp_desc(pTHX_ SV *const a, SV *const b)
1324{
1325 PERL_ARGS_ASSERT_SV_NCMP_DESC;
1326
1327 return -S_sv_ncmp(aTHX_ a, b);
1328}
1329
1330PERL_STATIC_FORCE_INLINE I32
31e9e0a3 1331S_sv_i_ncmp(pTHX_ SV *const a, SV *const b)
84d4ea48 1332{
901017d6
AL
1333 const IV iv1 = SvIV(a);
1334 const IV iv2 = SvIV(b);
7918f24d
NC
1335
1336 PERL_ARGS_ASSERT_SV_I_NCMP;
1337
84d4ea48
JH
1338 return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0;
1339}
901017d6 1340
044d25c7
TK
1341PERL_STATIC_FORCE_INLINE I32
1342S_sv_i_ncmp_desc(pTHX_ SV *const a, SV *const b)
1343{
1344 PERL_ARGS_ASSERT_SV_I_NCMP_DESC;
1345
1346 return -S_sv_i_ncmp(aTHX_ a, b);
1347}
1348
901017d6 1349#define tryCALL_AMAGICbin(left,right,meth) \
79a8d529 1350 (SvAMAGIC(left)||SvAMAGIC(right)) \
7ea738a9
TK
1351 ? amagic_call(left, right, meth, 0) \
1352 : NULL;
84d4ea48 1353
659c4b96 1354#define SORT_NORMAL_RETURN_VALUE(val) (((val) > 0) ? 1 : ((val) ? -1 : 0))
eeb9de02 1355
044d25c7 1356PERL_STATIC_FORCE_INLINE I32
5aaab254 1357S_amagic_ncmp(pTHX_ SV *const a, SV *const b)
84d4ea48 1358{
31d632c3 1359 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg);
7918f24d
NC
1360
1361 PERL_ARGS_ASSERT_AMAGIC_NCMP;
1362
84d4ea48 1363 if (tmpsv) {
84d4ea48 1364 if (SvIOK(tmpsv)) {
901017d6 1365 const I32 i = SvIVX(tmpsv);
eeb9de02 1366 return SORT_NORMAL_RETURN_VALUE(i);
84d4ea48 1367 }
7ea738a9
TK
1368 else {
1369 const NV d = SvNV(tmpsv);
1370 return SORT_NORMAL_RETURN_VALUE(d);
1371 }
84d4ea48 1372 }
f0f5dc9d 1373 return S_sv_ncmp(aTHX_ a, b);
84d4ea48
JH
1374}
1375
044d25c7
TK
1376PERL_STATIC_FORCE_INLINE I32
1377S_amagic_ncmp_desc(pTHX_ SV *const a, SV *const b)
1378{
1379 PERL_ARGS_ASSERT_AMAGIC_NCMP_DESC;
1380
1381 return -S_amagic_ncmp(aTHX_ a, b);
1382}
1383
1384PERL_STATIC_FORCE_INLINE I32
5aaab254 1385S_amagic_i_ncmp(pTHX_ SV *const a, SV *const b)
84d4ea48 1386{
31d632c3 1387 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg);
7918f24d
NC
1388
1389 PERL_ARGS_ASSERT_AMAGIC_I_NCMP;
1390
84d4ea48 1391 if (tmpsv) {
84d4ea48 1392 if (SvIOK(tmpsv)) {
901017d6 1393 const I32 i = SvIVX(tmpsv);
eeb9de02 1394 return SORT_NORMAL_RETURN_VALUE(i);
84d4ea48 1395 }
7ea738a9
TK
1396 else {
1397 const NV d = SvNV(tmpsv);
1398 return SORT_NORMAL_RETURN_VALUE(d);
1399 }
84d4ea48 1400 }
f0f5dc9d 1401 return S_sv_i_ncmp(aTHX_ a, b);
84d4ea48
JH
1402}
1403
044d25c7
TK
1404PERL_STATIC_FORCE_INLINE I32
1405S_amagic_i_ncmp_desc(pTHX_ SV *const a, SV *const b)
1406{
1407 PERL_ARGS_ASSERT_AMAGIC_I_NCMP_DESC;
1408
1409 return -S_amagic_i_ncmp(aTHX_ a, b);
1410}
1411
1412PERL_STATIC_FORCE_INLINE I32
5aaab254 1413S_amagic_cmp(pTHX_ SV *const str1, SV *const str2)
84d4ea48 1414{
31d632c3 1415 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg);
7918f24d
NC
1416
1417 PERL_ARGS_ASSERT_AMAGIC_CMP;
1418
84d4ea48 1419 if (tmpsv) {
84d4ea48 1420 if (SvIOK(tmpsv)) {
901017d6 1421 const I32 i = SvIVX(tmpsv);
eeb9de02 1422 return SORT_NORMAL_RETURN_VALUE(i);
84d4ea48 1423 }
7ea738a9
TK
1424 else {
1425 const NV d = SvNV(tmpsv);
1426 return SORT_NORMAL_RETURN_VALUE(d);
1427 }
84d4ea48
JH
1428 }
1429 return sv_cmp(str1, str2);
1430}
1431
044d25c7
TK
1432PERL_STATIC_FORCE_INLINE I32
1433S_amagic_cmp_desc(pTHX_ SV *const str1, SV *const str2)
1434{
1435 PERL_ARGS_ASSERT_AMAGIC_CMP_DESC;
1436
1437 return -S_amagic_cmp(aTHX_ str1, str2);
1438}
1439
1440PERL_STATIC_FORCE_INLINE I32
1441S_cmp_desc(pTHX_ SV *const str1, SV *const str2)
1442{
1443 PERL_ARGS_ASSERT_CMP_DESC;
1444
1445 return -sv_cmp(str1, str2);
1446}
1447
91191cf7
KW
1448#ifdef USE_LOCALE_COLLATE
1449
044d25c7 1450PERL_STATIC_FORCE_INLINE I32
5aaab254 1451S_amagic_cmp_locale(pTHX_ SV *const str1, SV *const str2)
84d4ea48 1452{
31d632c3 1453 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg);
7918f24d
NC
1454
1455 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE;
1456
84d4ea48 1457 if (tmpsv) {
84d4ea48 1458 if (SvIOK(tmpsv)) {
901017d6 1459 const I32 i = SvIVX(tmpsv);
eeb9de02 1460 return SORT_NORMAL_RETURN_VALUE(i);
84d4ea48 1461 }
7ea738a9
TK
1462 else {
1463 const NV d = SvNV(tmpsv);
1464 return SORT_NORMAL_RETURN_VALUE(d);
1465 }
84d4ea48
JH
1466 }
1467 return sv_cmp_locale(str1, str2);
1468}
241d1a3b 1469
044d25c7
TK
1470PERL_STATIC_FORCE_INLINE I32
1471S_amagic_cmp_locale_desc(pTHX_ SV *const str1, SV *const str2)
1472{
1473 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE_DESC;
1474
1475 return -S_amagic_cmp_locale(aTHX_ str1, str2);
1476}
1477
1478PERL_STATIC_FORCE_INLINE I32
1479S_cmp_locale_desc(pTHX_ SV *const str1, SV *const str2)
1480{
1481 PERL_ARGS_ASSERT_CMP_LOCALE_DESC;
1482
1483 return -sv_cmp_locale(str1, str2);
1484}
1485
91191cf7
KW
1486#endif
1487
241d1a3b 1488/*
14d04a33 1489 * ex: set ts=8 sts=4 sw=4 et:
37442d52 1490 */