This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5
B/Deparse.pm: Rework deparsing of UTF-8 tr///
[perl5.git] / pp_sort.c
CommitLineData
84d4ea48
JH
1/* pp_sort.c
2 *
1129b882
NC
3 * Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
4 * 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by Larry Wall and others
84d4ea48
JH
5 *
6 * You may distribute under the terms of either the GNU General Public
7 * License or the Artistic License, as specified in the README file.
8 *
9 */
10
11/*
4ac71550
TC
12 * ...they shuffled back towards the rear of the line. 'No, not at the
13 * rear!' the slave-driver shouted. 'Three files up. And stay there...
14 *
15 * [p.931 of _The Lord of the Rings_, VI/ii: "The Land of Shadow"]
84d4ea48
JH
16 */
17
166f8a29
DM
18/* This file contains pp ("push/pop") functions that
19 * execute the opcodes that make up a perl program. A typical pp function
20 * expects to find its arguments on the stack, and usually pushes its
21 * results onto the stack, hence the 'pp' terminology. Each OP structure
22 * contains a pointer to the relevant pp_foo() function.
23 *
24 * This particular file just contains pp_sort(), which is complex
25 * enough to merit its own file! See the other pp*.c files for the rest of
26 * the pp_ functions.
27 */
28
84d4ea48
JH
29#include "EXTERN.h"
30#define PERL_IN_PP_SORT_C
31#include "perl.h"
32
c53fc8a6 33#ifndef SMALLSORT
7ea738a9 34#define SMALLSORT (200)
c53fc8a6
JH
35#endif
36
23a85c22 37/* Flags for sortsv_flags */
044d25c7
TK
38#define SORTf_STABLE 1
39#define SORTf_UNSTABLE 2
7b9ef140 40
84d4ea48
JH
41/*
42 * The mergesort implementation is by Peter M. Mcilroy <pmcilroy@lucent.com>.
43 *
44 * The original code was written in conjunction with BSD Computer Software
45 * Research Group at University of California, Berkeley.
46 *
393db44d
JL
47 * See also: "Optimistic Sorting and Information Theoretic Complexity"
48 * Peter McIlroy
49 * SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms),
50 * pp 467-474, Austin, Texas, 25-27 January 1993.
84d4ea48 51 *
393db44d 52 * The integration to Perl is by John P. Linderman <jpl.jpl@gmail.com>.
84d4ea48
JH
53 *
54 * The code can be distributed under the same terms as Perl itself.
55 *
56 */
57
84d4ea48 58
7ea738a9
TK
59typedef char * aptr; /* pointer for arithmetic on sizes */
60typedef SV * gptr; /* pointers in our lists */
84d4ea48
JH
61
62/* Binary merge internal sort, with a few special mods
63** for the special perl environment it now finds itself in.
64**
65** Things that were once options have been hotwired
66** to values suitable for this use. In particular, we'll always
67** initialize looking for natural runs, we'll always produce stable
68** output, and we'll always do Peter McIlroy's binary merge.
69*/
70
71/* Pointer types for arithmetic and storage and convenience casts */
72
7ea738a9
TK
73#define APTR(P) ((aptr)(P))
74#define GPTP(P) ((gptr *)(P))
84d4ea48
JH
75#define GPPP(P) ((gptr **)(P))
76
77
78/* byte offset from pointer P to (larger) pointer Q */
7ea738a9 79#define BYTEOFF(P, Q) (APTR(Q) - APTR(P))
84d4ea48
JH
80
81#define PSIZE sizeof(gptr)
82
83/* If PSIZE is power of 2, make PSHIFT that power, if that helps */
84
7ea738a9
TK
85#ifdef PSHIFT
86#define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT))
87#define PNBYTE(N) ((N) << (PSHIFT))
88#define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N)))
84d4ea48
JH
89#else
90/* Leave optimization to compiler */
7ea738a9
TK
91#define PNELEM(P, Q) (GPTP(Q) - GPTP(P))
92#define PNBYTE(N) ((N) * (PSIZE))
93#define PINDEX(P, N) (GPTP(P) + (N))
84d4ea48
JH
94#endif
95
96/* Pointer into other corresponding to pointer into this */
7ea738a9 97#define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P))
84d4ea48
JH
98
99#define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim)
100
101
486ec47a 102/* Runs are identified by a pointer in the auxiliary list.
84d4ea48
JH
103** The pointer is at the start of the list,
104** and it points to the start of the next list.
105** NEXT is used as an lvalue, too.
106*/
107
7ea738a9 108#define NEXT(P) (*GPPP(P))
84d4ea48
JH
109
110
111/* PTHRESH is the minimum number of pairs with the same sense to justify
112** checking for a run and extending it. Note that PTHRESH counts PAIRS,
113** not just elements, so PTHRESH == 8 means a run of 16.
114*/
115
7ea738a9 116#define PTHRESH (8)
84d4ea48
JH
117
118/* RTHRESH is the number of elements in a run that must compare low
119** to the low element from the opposing run before we justify
120** doing a binary rampup instead of single stepping.
121** In random input, N in a row low should only happen with
122** probability 2^(1-N), so we can risk that we are dealing
123** with orderly input without paying much when we aren't.
124*/
125
126#define RTHRESH (6)
127
128
129/*
130** Overview of algorithm and variables.
131** The array of elements at list1 will be organized into runs of length 2,
132** or runs of length >= 2 * PTHRESH. We only try to form long runs when
133** PTHRESH adjacent pairs compare in the same way, suggesting overall order.
134**
135** Unless otherwise specified, pair pointers address the first of two elements.
136**
a0288114
AL
137** b and b+1 are a pair that compare with sense "sense".
138** b is the "bottom" of adjacent pairs that might form a longer run.
84d4ea48
JH
139**
140** p2 parallels b in the list2 array, where runs are defined by
141** a pointer chain.
142**
a0288114 143** t represents the "top" of the adjacent pairs that might extend
84d4ea48
JH
144** the run beginning at b. Usually, t addresses a pair
145** that compares with opposite sense from (b,b+1).
146** However, it may also address a singleton element at the end of list1,
a0288114 147** or it may be equal to "last", the first element beyond list1.
84d4ea48
JH
148**
149** r addresses the Nth pair following b. If this would be beyond t,
150** we back it off to t. Only when r is less than t do we consider the
151** run long enough to consider checking.
152**
153** q addresses a pair such that the pairs at b through q already form a run.
154** Often, q will equal b, indicating we only are sure of the pair itself.
155** However, a search on the previous cycle may have revealed a longer run,
156** so q may be greater than b.
157**
158** p is used to work back from a candidate r, trying to reach q,
159** which would mean b through r would be a run. If we discover such a run,
160** we start q at r and try to push it further towards t.
161** If b through r is NOT a run, we detect the wrong order at (p-1,p).
162** In any event, after the check (if any), we have two main cases.
163**
164** 1) Short run. b <= q < p <= r <= t.
7ea738a9
TK
165** b through q is a run (perhaps trivial)
166** q through p are uninteresting pairs
167** p through r is a run
84d4ea48
JH
168**
169** 2) Long run. b < r <= q < t.
7ea738a9 170** b through q is a run (of length >= 2 * PTHRESH)
84d4ea48
JH
171**
172** Note that degenerate cases are not only possible, but likely.
173** For example, if the pair following b compares with opposite sense,
174** then b == q < p == r == t.
175*/
176
177
044d25c7 178PERL_STATIC_FORCE_INLINE IV __attribute__always_inline__
d4c19fe8 179dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp)
84d4ea48 180{
957d8989 181 I32 sense;
eb578fdb
KW
182 gptr *b, *p, *q, *t, *p2;
183 gptr *last, *r;
957d8989 184 IV runs = 0;
84d4ea48
JH
185
186 b = list1;
187 last = PINDEX(b, nmemb);
188 sense = (cmp(aTHX_ *b, *(b+1)) > 0);
189 for (p2 = list2; b < last; ) {
7ea738a9
TK
190 /* We just started, or just reversed sense.
191 ** Set t at end of pairs with the prevailing sense.
192 */
193 for (p = b+2, t = p; ++p < last; t = ++p) {
194 if ((cmp(aTHX_ *t, *p) > 0) != sense) break;
195 }
196 q = b;
197 /* Having laid out the playing field, look for long runs */
198 do {
199 p = r = b + (2 * PTHRESH);
200 if (r >= t) p = r = t; /* too short to care about */
201 else {
202 while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) &&
203 ((p -= 2) > q)) {}
204 if (p <= q) {
205 /* b through r is a (long) run.
206 ** Extend it as far as possible.
207 */
208 p = q = r;
209 while (((p += 2) < t) &&
210 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p;
211 r = p = q + 2; /* no simple pairs, no after-run */
212 }
213 }
214 if (q > b) { /* run of greater than 2 at b */
215 gptr *savep = p;
216
217 p = q += 2;
218 /* pick up singleton, if possible */
219 if ((p == t) &&
220 ((t + 1) == last) &&
221 ((cmp(aTHX_ *(p-1), *p) > 0) == sense))
222 savep = r = p = q = last;
223 p2 = NEXT(p2) = p2 + (p - b); ++runs;
224 if (sense)
225 while (b < --p) {
226 const gptr c = *b;
227 *b++ = *p;
228 *p = c;
229 }
230 p = savep;
231 }
232 while (q < p) { /* simple pairs */
233 p2 = NEXT(p2) = p2 + 2; ++runs;
234 if (sense) {
235 const gptr c = *q++;
236 *(q-1) = *q;
237 *q++ = c;
238 } else q += 2;
239 }
240 if (((b = p) == t) && ((t+1) == last)) {
241 NEXT(p2) = p2 + 1; ++runs;
242 b++;
243 }
244 q = r;
245 } while (b < t);
246 sense = !sense;
84d4ea48 247 }
957d8989 248 return runs;
84d4ea48
JH
249}
250
251
3fe0b9a9 252/* The original merge sort, in use since 5.7, was as fast as, or faster than,
957d8989 253 * qsort on many platforms, but slower than qsort, conspicuously so,
3fe0b9a9 254 * on others. The most likely explanation was platform-specific
957d8989
JL
255 * differences in cache sizes and relative speeds.
256 *
257 * The quicksort divide-and-conquer algorithm guarantees that, as the
258 * problem is subdivided into smaller and smaller parts, the parts
259 * fit into smaller (and faster) caches. So it doesn't matter how
260 * many levels of cache exist, quicksort will "find" them, and,
e62b3022 261 * as long as smaller is faster, take advantage of them.
957d8989 262 *
3fe0b9a9 263 * By contrast, consider how the original mergesort algorithm worked.
957d8989
JL
264 * Suppose we have five runs (each typically of length 2 after dynprep).
265 *
266 * pass base aux
267 * 0 1 2 3 4 5
268 * 1 12 34 5
269 * 2 1234 5
270 * 3 12345
271 * 4 12345
272 *
273 * Adjacent pairs are merged in "grand sweeps" through the input.
274 * This means, on pass 1, the records in runs 1 and 2 aren't revisited until
275 * runs 3 and 4 are merged and the runs from run 5 have been copied.
276 * The only cache that matters is one large enough to hold *all* the input.
277 * On some platforms, this may be many times slower than smaller caches.
278 *
279 * The following pseudo-code uses the same basic merge algorithm,
280 * but in a divide-and-conquer way.
281 *
282 * # merge $runs runs at offset $offset of list $list1 into $list2.
283 * # all unmerged runs ($runs == 1) originate in list $base.
284 * sub mgsort2 {
285 * my ($offset, $runs, $base, $list1, $list2) = @_;
286 *
287 * if ($runs == 1) {
288 * if ($list1 is $base) copy run to $list2
289 * return offset of end of list (or copy)
290 * } else {
291 * $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1)
292 * mgsort2($off2, $runs/2, $base, $list2, $list1)
293 * merge the adjacent runs at $offset of $list1 into $list2
294 * return the offset of the end of the merged runs
295 * }
296 * }
297 * mgsort2(0, $runs, $base, $aux, $base);
298 *
299 * For our 5 runs, the tree of calls looks like
300 *
301 * 5
302 * 3 2
303 * 2 1 1 1
304 * 1 1
305 *
306 * 1 2 3 4 5
307 *
308 * and the corresponding activity looks like
309 *
310 * copy runs 1 and 2 from base to aux
311 * merge runs 1 and 2 from aux to base
312 * (run 3 is where it belongs, no copy needed)
313 * merge runs 12 and 3 from base to aux
314 * (runs 4 and 5 are where they belong, no copy needed)
315 * merge runs 4 and 5 from base to aux
316 * merge runs 123 and 45 from aux to base
317 *
318 * Note that we merge runs 1 and 2 immediately after copying them,
319 * while they are still likely to be in fast cache. Similarly,
320 * run 3 is merged with run 12 while it still may be lingering in cache.
321 * This implementation should therefore enjoy much of the cache-friendly
322 * behavior that quicksort does. In addition, it does less copying
323 * than the original mergesort implementation (only runs 1 and 2 are copied)
324 * and the "balancing" of merges is better (merged runs comprise more nearly
325 * equal numbers of original runs).
326 *
327 * The actual cache-friendly implementation will use a pseudo-stack
328 * to avoid recursion, and will unroll processing of runs of length 2,
329 * but it is otherwise similar to the recursive implementation.
957d8989
JL
330 */
331
332typedef struct {
7ea738a9
TK
333 IV offset; /* offset of 1st of 2 runs at this level */
334 IV runs; /* how many runs must be combined into 1 */
335} off_runs; /* pseudo-stack element */
957d8989 336
044d25c7
TK
337PERL_STATIC_FORCE_INLINE void
338S_sortsv_flags_impl(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
957d8989 339{
551405c4 340 IV i, run, offset;
957d8989 341 I32 sense, level;
eb578fdb 342 gptr *f1, *f2, *t, *b, *p;
957d8989 343 int iwhich;
551405c4 344 gptr *aux;
957d8989
JL
345 gptr *p1;
346 gptr small[SMALLSORT];
347 gptr *which[3];
348 off_runs stack[60], *stackp;
349
aa4119bb 350 PERL_UNUSED_ARG(flags);
044d25c7 351 PERL_ARGS_ASSERT_SORTSV_FLAGS_IMPL;
7ea738a9 352 if (nmemb <= 1) return; /* sorted trivially */
6c3fb703 353
7ea738a9
TK
354 if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */
355 else { Newx(aux,nmemb,gptr); } /* allocate auxiliary array */
957d8989
JL
356 level = 0;
357 stackp = stack;
358 stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp);
359 stackp->offset = offset = 0;
360 which[0] = which[2] = base;
361 which[1] = aux;
362 for (;;) {
7ea738a9
TK
363 /* On levels where both runs have be constructed (stackp->runs == 0),
364 * merge them, and note the offset of their end, in case the offset
365 * is needed at the next level up. Hop up a level, and,
366 * as long as stackp->runs is 0, keep merging.
367 */
368 IV runs = stackp->runs;
369 if (runs == 0) {
370 gptr *list1, *list2;
371 iwhich = level & 1;
372 list1 = which[iwhich]; /* area where runs are now */
373 list2 = which[++iwhich]; /* area for merged runs */
374 do {
375 gptr *l1, *l2, *tp2;
376 offset = stackp->offset;
377 f1 = p1 = list1 + offset; /* start of first run */
378 p = tp2 = list2 + offset; /* where merged run will go */
379 t = NEXT(p); /* where first run ends */
380 f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */
381 t = NEXT(t); /* where second runs ends */
382 l2 = POTHER(t, list2, list1); /* ... on the other side */
383 offset = PNELEM(list2, t);
384 while (f1 < l1 && f2 < l2) {
385 /* If head 1 is larger than head 2, find ALL the elements
386 ** in list 2 strictly less than head1, write them all,
387 ** then head 1. Then compare the new heads, and repeat,
388 ** until one or both lists are exhausted.
389 **
390 ** In all comparisons (after establishing
391 ** which head to merge) the item to merge
392 ** (at pointer q) is the first operand of
393 ** the comparison. When we want to know
394 ** if "q is strictly less than the other",
395 ** we can't just do
396 ** cmp(q, other) < 0
397 ** because stability demands that we treat equality
398 ** as high when q comes from l2, and as low when
399 ** q was from l1. So we ask the question by doing
400 ** cmp(q, other) <= sense
401 ** and make sense == 0 when equality should look low,
402 ** and -1 when equality should look high.
403 */
404
405 gptr *q;
406 if (cmp(aTHX_ *f1, *f2) <= 0) {
407 q = f2; b = f1; t = l1;
408 sense = -1;
409 } else {
410 q = f1; b = f2; t = l2;
411 sense = 0;
412 }
413
414
415 /* ramp up
416 **
417 ** Leave t at something strictly
418 ** greater than q (or at the end of the list),
419 ** and b at something strictly less than q.
420 */
421 for (i = 1, run = 0 ;;) {
422 if ((p = PINDEX(b, i)) >= t) {
423 /* off the end */
424 if (((p = PINDEX(t, -1)) > b) &&
425 (cmp(aTHX_ *q, *p) <= sense))
426 t = p;
427 else b = p;
428 break;
429 } else if (cmp(aTHX_ *q, *p) <= sense) {
430 t = p;
431 break;
432 } else b = p;
433 if (++run >= RTHRESH) i += i;
434 }
435
436
437 /* q is known to follow b and must be inserted before t.
438 ** Increment b, so the range of possibilities is [b,t).
439 ** Round binary split down, to favor early appearance.
440 ** Adjust b and t until q belongs just before t.
441 */
442
443 b++;
444 while (b < t) {
445 p = PINDEX(b, (PNELEM(b, t) - 1) / 2);
446 if (cmp(aTHX_ *q, *p) <= sense) {
447 t = p;
448 } else b = p + 1;
449 }
450
451
452 /* Copy all the strictly low elements */
453
454 if (q == f1) {
455 FROMTOUPTO(f2, tp2, t);
456 *tp2++ = *f1++;
457 } else {
458 FROMTOUPTO(f1, tp2, t);
459 *tp2++ = *f2++;
460 }
461 }
462
463
464 /* Run out remaining list */
465 if (f1 == l1) {
466 if (f2 < l2) FROMTOUPTO(f2, tp2, l2);
467 } else FROMTOUPTO(f1, tp2, l1);
468 p1 = NEXT(p1) = POTHER(tp2, list2, list1);
469
470 if (--level == 0) goto done;
471 --stackp;
472 t = list1; list1 = list2; list2 = t; /* swap lists */
473 } while ((runs = stackp->runs) == 0);
474 }
475
476
477 stackp->runs = 0; /* current run will finish level */
478 /* While there are more than 2 runs remaining,
479 * turn them into exactly 2 runs (at the "other" level),
480 * each made up of approximately half the runs.
481 * Stack the second half for later processing,
482 * and set about producing the first half now.
483 */
484 while (runs > 2) {
485 ++level;
486 ++stackp;
487 stackp->offset = offset;
488 runs -= stackp->runs = runs / 2;
489 }
490 /* We must construct a single run from 1 or 2 runs.
491 * All the original runs are in which[0] == base.
492 * The run we construct must end up in which[level&1].
493 */
494 iwhich = level & 1;
495 if (runs == 1) {
496 /* Constructing a single run from a single run.
497 * If it's where it belongs already, there's nothing to do.
498 * Otherwise, copy it to where it belongs.
499 * A run of 1 is either a singleton at level 0,
500 * or the second half of a split 3. In neither event
501 * is it necessary to set offset. It will be set by the merge
502 * that immediately follows.
503 */
504 if (iwhich) { /* Belongs in aux, currently in base */
505 f1 = b = PINDEX(base, offset); /* where list starts */
506 f2 = PINDEX(aux, offset); /* where list goes */
507 t = NEXT(f2); /* where list will end */
508 offset = PNELEM(aux, t); /* offset thereof */
509 t = PINDEX(base, offset); /* where it currently ends */
510 FROMTOUPTO(f1, f2, t); /* copy */
511 NEXT(b) = t; /* set up parallel pointer */
512 } else if (level == 0) goto done; /* single run at level 0 */
513 } else {
514 /* Constructing a single run from two runs.
515 * The merge code at the top will do that.
516 * We need only make sure the two runs are in the "other" array,
517 * so they'll end up in the correct array after the merge.
518 */
519 ++level;
520 ++stackp;
521 stackp->offset = offset;
522 stackp->runs = 0; /* take care of both runs, trigger merge */
523 if (!iwhich) { /* Merged runs belong in aux, copy 1st */
524 f1 = b = PINDEX(base, offset); /* where first run starts */
525 f2 = PINDEX(aux, offset); /* where it will be copied */
526 t = NEXT(f2); /* where first run will end */
527 offset = PNELEM(aux, t); /* offset thereof */
528 p = PINDEX(base, offset); /* end of first run */
529 t = NEXT(t); /* where second run will end */
530 t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */
531 FROMTOUPTO(f1, f2, t); /* copy both runs */
532 NEXT(b) = p; /* paralleled pointer for 1st */
533 NEXT(p) = t; /* ... and for second */
534 }
535 }
957d8989 536 }
7b52d656 537 done:
7ea738a9 538 if (aux != small) Safefree(aux); /* free iff allocated */
044d25c7 539
957d8989
JL
540 return;
541}
542
84d4ea48 543/*
044d25c7
TK
544=head1 SV Manipulation Functions
545
546=for apidoc sortsv_flags
547
548In-place sort an array of SV pointers with the given comparison routine,
549with various SORTf_* flag options.
550
551=cut
552*/
553void
554Perl_sortsv_flags(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
555{
556 PERL_ARGS_ASSERT_SORTSV_FLAGS;
557
558 sortsv_flags_impl(base, nmemb, cmp, flags);
559}
560
561/*
562 * Each of sortsv_* functions contains an inlined copy of
563 * sortsv_flags_impl() with an inlined comparator. Basically, we are
564 * emulating C++ templates by using __attribute__((always_inline)).
565 *
566 * The purpose of that is to avoid the function call overhead inside
567 * the sorting routine, which calls the comparison function multiple
568 * times per sorted item.
569 */
570
571static void
572sortsv_amagic_i_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
573{
574 sortsv_flags_impl(base, nmemb, S_amagic_i_ncmp, flags);
575}
576
577static void
578sortsv_amagic_i_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
579{
580 sortsv_flags_impl(base, nmemb, S_amagic_i_ncmp_desc, flags);
581}
582
583static void
584sortsv_i_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
585{
586 sortsv_flags_impl(base, nmemb, S_sv_i_ncmp, flags);
587}
588
589static void
590sortsv_i_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
591{
592 sortsv_flags_impl(base, nmemb, S_sv_i_ncmp_desc, flags);
593}
594
595static void
596sortsv_amagic_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
597{
598 sortsv_flags_impl(base, nmemb, S_amagic_ncmp, flags);
599}
600
601static void
602sortsv_amagic_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
603{
604 sortsv_flags_impl(base, nmemb, S_amagic_ncmp_desc, flags);
605}
606
607static void
608sortsv_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
609{
610 sortsv_flags_impl(base, nmemb, S_sv_ncmp, flags);
611}
612
613static void
614sortsv_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
615{
616 sortsv_flags_impl(base, nmemb, S_sv_ncmp_desc, flags);
617}
618
619static void
620sortsv_amagic_cmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
621{
622 sortsv_flags_impl(base, nmemb, S_amagic_cmp, flags);
623}
624
625static void
626sortsv_amagic_cmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
627{
628 sortsv_flags_impl(base, nmemb, S_amagic_cmp_desc, flags);
629}
630
631static void
632sortsv_cmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
633{
634 sortsv_flags_impl(base, nmemb, Perl_sv_cmp, flags);
635}
636
637static void
638sortsv_cmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
639{
640 sortsv_flags_impl(base, nmemb, S_cmp_desc, flags);
641}
642
643#ifdef USE_LOCALE_COLLATE
644
645static void
646sortsv_amagic_cmp_locale(pTHX_ gptr *base, size_t nmemb, U32 flags)
647{
648 sortsv_flags_impl(base, nmemb, S_amagic_cmp_locale, flags);
649}
650
651static void
652sortsv_amagic_cmp_locale_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
653{
654 sortsv_flags_impl(base, nmemb, S_amagic_cmp_locale_desc, flags);
655}
656
657static void
658sortsv_cmp_locale(pTHX_ gptr *base, size_t nmemb, U32 flags)
659{
660 sortsv_flags_impl(base, nmemb, Perl_sv_cmp_locale, flags);
661}
662
663static void
664sortsv_cmp_locale_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
665{
666 sortsv_flags_impl(base, nmemb, S_cmp_locale_desc, flags);
667}
668
669#endif
670
671/*
ccfc67b7
JH
672=head1 Array Manipulation Functions
673
84d4ea48
JH
674=for apidoc sortsv
675
8f5d5a51 676In-place sort an array of SV pointers with the given comparison routine.
84d4ea48 677
796b6530 678Currently this always uses mergesort. See C<L</sortsv_flags>> for a more
7b9ef140 679flexible routine.
78210658 680
84d4ea48
JH
681=cut
682*/
4eb872f6 683
84d4ea48
JH
684void
685Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp)
686{
7918f24d
NC
687 PERL_ARGS_ASSERT_SORTSV;
688
7b9ef140 689 sortsv_flags(array, nmemb, cmp, 0);
6c3fb703
NC
690}
691
4d562308
SF
692#define SvNSIOK(sv) ((SvFLAGS(sv) & SVf_NOK) || ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK))
693#define SvSIOK(sv) ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK)
694#define SvNSIV(sv) ( SvNOK(sv) ? SvNVX(sv) : ( SvSIOK(sv) ? SvIVX(sv) : sv_2nv(sv) ) )
695
84d4ea48
JH
696PP(pp_sort)
697{
20b7effb 698 dSP; dMARK; dORIGMARK;
eb578fdb 699 SV **p1 = ORIGMARK+1, **p2;
c70927a6 700 SSize_t max, i;
7d49f689 701 AV* av = NULL;
84d4ea48 702 GV *gv;
cbbf8932 703 CV *cv = NULL;
1c23e2bd 704 U8 gimme = GIMME_V;
0bcc34c2 705 OP* const nextop = PL_op->op_next;
84d4ea48
JH
706 I32 overloading = 0;
707 bool hasargs = FALSE;
2b66f6d3 708 bool copytmps;
84d4ea48 709 I32 is_xsub = 0;
901017d6
AL
710 const U8 priv = PL_op->op_private;
711 const U8 flags = PL_op->op_flags;
7b9ef140 712 U32 sort_flags = 0;
044d25c7 713 I32 all_SIVs = 1, descending = 0;
84d4ea48 714
7b9ef140 715 if ((priv & OPpSORT_DESCEND) != 0)
044d25c7 716 descending = 1;
7b9ef140 717 if ((priv & OPpSORT_STABLE) != 0)
7ea738a9 718 sort_flags |= SORTf_STABLE;
afe59f35 719 if ((priv & OPpSORT_UNSTABLE) != 0)
7ea738a9 720 sort_flags |= SORTf_UNSTABLE;
7b9ef140 721
84d4ea48 722 if (gimme != G_ARRAY) {
7ea738a9
TK
723 SP = MARK;
724 EXTEND(SP,1);
725 RETPUSHUNDEF;
84d4ea48
JH
726 }
727
728 ENTER;
729 SAVEVPTR(PL_sortcop);
471178c0 730 if (flags & OPf_STACKED) {
7ea738a9 731 if (flags & OPf_SPECIAL) {
e6dae479 732 OP *nullop = OpSIBLING(cLISTOP->op_first); /* pass pushmark */
932bca29 733 assert(nullop->op_type == OP_NULL);
7ea738a9
TK
734 PL_sortcop = nullop->op_next;
735 }
736 else {
737 GV *autogv = NULL;
738 HV *stash;
739 cv = sv_2cv(*++MARK, &stash, &gv, GV_ADD);
740 check_cv:
741 if (cv && SvPOK(cv)) {
742 const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv));
743 if (proto && strEQ(proto, "$$")) {
744 hasargs = TRUE;
745 }
746 }
747 if (cv && CvISXSUB(cv) && CvXSUB(cv)) {
748 is_xsub = 1;
749 }
750 else if (!(cv && CvROOT(cv))) {
751 if (gv) {
752 goto autoload;
753 }
754 else if (!CvANON(cv) && (gv = CvGV(cv))) {
755 if (cv != GvCV(gv)) cv = GvCV(gv);
756 autoload:
757 if (!autogv && (
758 autogv = gv_autoload_pvn(
759 GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv),
760 GvNAMEUTF8(gv) ? SVf_UTF8 : 0
761 )
762 )) {
763 cv = GvCVu(autogv);
764 goto check_cv;
765 }
766 else {
767 SV *tmpstr = sv_newmortal();
768 gv_efullname3(tmpstr, gv, NULL);
769 DIE(aTHX_ "Undefined sort subroutine \"%" SVf "\" called",
770 SVfARG(tmpstr));
771 }
772 }
773 else {
774 DIE(aTHX_ "Undefined subroutine in sort");
775 }
776 }
777
778 if (is_xsub)
779 PL_sortcop = (OP*)cv;
780 else
781 PL_sortcop = CvSTART(cv);
782 }
84d4ea48
JH
783 }
784 else {
7ea738a9 785 PL_sortcop = NULL;
84d4ea48
JH
786 }
787
84721d61
DM
788 /* optimiser converts "@a = sort @a" to "sort \@a". In this case,
789 * push (@a) onto stack, then assign result back to @a at the end of
790 * this function */
0723351e 791 if (priv & OPpSORT_INPLACE) {
7ea738a9
TK
792 assert( MARK+1 == SP && *SP && SvTYPE(*SP) == SVt_PVAV);
793 (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */
794 av = MUTABLE_AV((*SP));
84721d61
DM
795 if (SvREADONLY(av))
796 Perl_croak_no_modify();
7ea738a9 797 max = AvFILL(av) + 1;
84721d61 798 MEXTEND(SP, max);
7ea738a9
TK
799 if (SvMAGICAL(av)) {
800 for (i=0; i < max; i++) {
801 SV **svp = av_fetch(av, i, FALSE);
802 *SP++ = (svp) ? *svp : NULL;
803 }
804 }
84721d61
DM
805 else {
806 SV **svp = AvARRAY(av);
807 assert(svp || max == 0);
7ea738a9 808 for (i = 0; i < max; i++)
84721d61 809 *SP++ = *svp++;
7ea738a9 810 }
84721d61
DM
811 SP--;
812 p1 = p2 = SP - (max-1);
fe1bc4cf
DM
813 }
814 else {
7ea738a9
TK
815 p2 = MARK+1;
816 max = SP - MARK;
817 }
fe1bc4cf 818
83a44efe
SF
819 /* shuffle stack down, removing optional initial cv (p1!=p2), plus
820 * any nulls; also stringify or converting to integer or number as
821 * required any args */
ff859a7f 822 copytmps = cBOOL(PL_sortcop);
fe1bc4cf 823 for (i=max; i > 0 ; i--) {
7ea738a9
TK
824 if ((*p1 = *p2++)) { /* Weed out nulls. */
825 if (copytmps && SvPADTMP(*p1)) {
826 *p1 = sv_mortalcopy(*p1);
827 }
828 SvTEMP_off(*p1);
829 if (!PL_sortcop) {
830 if (priv & OPpSORT_NUMERIC) {
831 if (priv & OPpSORT_INTEGER) {
832 if (!SvIOK(*p1))
833 (void)sv_2iv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
834 }
835 else {
836 if (!SvNSIOK(*p1))
837 (void)sv_2nv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
838 if (all_SIVs && !SvSIOK(*p1))
839 all_SIVs = 0;
840 }
841 }
842 else {
843 if (!SvPOK(*p1))
844 (void)sv_2pv_flags(*p1, 0,
845 SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD);
846 }
847 if (SvAMAGIC(*p1))
848 overloading = 1;
60779a30 849 }
7ea738a9
TK
850 p1++;
851 }
852 else
853 max--;
84d4ea48 854 }
fe1bc4cf 855 if (max > 1) {
7ea738a9
TK
856 SV **start;
857 if (PL_sortcop) {
858 PERL_CONTEXT *cx;
859 const bool oldcatch = CATCH_GET;
8ae997c5 860 I32 old_savestack_ix = PL_savestack_ix;
84d4ea48 861
7ea738a9
TK
862 SAVEOP();
863
864 CATCH_SET(TRUE);
865 PUSHSTACKi(PERLSI_SORT);
866 if (!hasargs && !is_xsub) {
867 SAVEGENERICSV(PL_firstgv);
868 SAVEGENERICSV(PL_secondgv);
869 PL_firstgv = MUTABLE_GV(SvREFCNT_inc(
870 gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV)
871 ));
872 PL_secondgv = MUTABLE_GV(SvREFCNT_inc(
873 gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV)
874 ));
dc9ef998
TC
875 /* make sure the GP isn't removed out from under us for
876 * the SAVESPTR() */
877 save_gp(PL_firstgv, 0);
878 save_gp(PL_secondgv, 0);
879 /* we don't want modifications localized */
880 GvINTRO_off(PL_firstgv);
881 GvINTRO_off(PL_secondgv);
7ea738a9
TK
882 SAVEGENERICSV(GvSV(PL_firstgv));
883 SvREFCNT_inc(GvSV(PL_firstgv));
884 SAVEGENERICSV(GvSV(PL_secondgv));
885 SvREFCNT_inc(GvSV(PL_secondgv));
886 }
84d4ea48 887
33411212 888 gimme = G_SCALAR;
7ea738a9
TK
889 cx = cx_pushblock(CXt_NULL, gimme, PL_stack_base, old_savestack_ix);
890 if (!(flags & OPf_SPECIAL)) {
891 cx->cx_type = CXt_SUB|CXp_MULTICALL;
892 cx_pushsub(cx, cv, NULL, hasargs);
893 if (!is_xsub) {
894 PADLIST * const padlist = CvPADLIST(cv);
895
896 if (++CvDEPTH(cv) >= 2)
897 pad_push(padlist, CvDEPTH(cv));
898 PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv));
899
900 if (hasargs) {
901 /* This is mostly copied from pp_entersub */
902 AV * const av = MUTABLE_AV(PAD_SVl(0));
903
904 cx->blk_sub.savearray = GvAV(PL_defgv);
905 GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av));
906 }
907
908 }
909 }
486430a5 910
7ea738a9 911 start = p1 - max;
3edfb5c3 912 Perl_sortsv_flags(aTHX_ start, max,
7ea738a9
TK
913 (is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv),
914 sort_flags);
84d4ea48 915
4df352a8 916 /* Reset cx, in case the context stack has been reallocated. */
4ebe6e95 917 cx = CX_CUR();
4df352a8 918
7ea738a9 919 PL_stack_sp = PL_stack_base + cx->blk_oldsp;
4df352a8 920
2f450c1b 921 CX_LEAVE_SCOPE(cx);
7ea738a9 922 if (!(flags & OPf_SPECIAL)) {
4df352a8 923 assert(CxTYPE(cx) == CXt_SUB);
a73d8813 924 cx_popsub(cx);
7ea738a9 925 }
2f450c1b 926 else
4df352a8 927 assert(CxTYPE(cx) == CXt_NULL);
2f450c1b 928 /* there isn't a POPNULL ! */
1dfbe6b4 929
7ea738a9 930 cx_popblock(cx);
5da525e9 931 CX_POP(cx);
7ea738a9
TK
932 POPSTACK;
933 CATCH_SET(oldcatch);
934 }
935 else {
936 MEXTEND(SP, 20); /* Can't afford stack realloc on signal. */
937 start = ORIGMARK+1;
433b3e2b
TK
938 if (priv & OPpSORT_NUMERIC) {
939 if ((priv & OPpSORT_INTEGER) || all_SIVs) {
940 if (overloading)
044d25c7
TK
941 if (descending)
942 sortsv_amagic_i_ncmp_desc(aTHX_ start, max, sort_flags);
943 else
944 sortsv_amagic_i_ncmp(aTHX_ start, max, sort_flags);
433b3e2b 945 else
044d25c7
TK
946 if (descending)
947 sortsv_i_ncmp_desc(aTHX_ start, max, sort_flags);
948 else
949 sortsv_i_ncmp(aTHX_ start, max, sort_flags);
433b3e2b
TK
950 }
951 else {
952 if (overloading)
044d25c7
TK
953 if (descending)
954 sortsv_amagic_ncmp_desc(aTHX_ start, max, sort_flags);
955 else
956 sortsv_amagic_ncmp(aTHX_ start, max, sort_flags);
433b3e2b 957 else
044d25c7
TK
958 if (descending)
959 sortsv_ncmp_desc(aTHX_ start, max, sort_flags);
960 else
961 sortsv_ncmp(aTHX_ start, max, sort_flags);
433b3e2b
TK
962 }
963 }
130c5df3 964#ifdef USE_LOCALE_COLLATE
433b3e2b
TK
965 else if(IN_LC_RUNTIME(LC_COLLATE)) {
966 if (overloading)
044d25c7
TK
967 if (descending)
968 sortsv_amagic_cmp_locale_desc(aTHX_ start, max, sort_flags);
969 else
970 sortsv_amagic_cmp_locale(aTHX_ start, max, sort_flags);
433b3e2b 971 else
044d25c7
TK
972 if (descending)
973 sortsv_cmp_locale_desc(aTHX_ start, max, sort_flags);
974 else
975 sortsv_cmp_locale(aTHX_ start, max, sort_flags);
433b3e2b 976 }
130c5df3 977#endif
433b3e2b
TK
978 else {
979 if (overloading)
044d25c7
TK
980 if (descending)
981 sortsv_amagic_cmp_desc(aTHX_ start, max, sort_flags);
982 else
983 sortsv_amagic_cmp(aTHX_ start, max, sort_flags);
433b3e2b 984 else
044d25c7
TK
985 if (descending)
986 sortsv_cmp_desc(aTHX_ start, max, sort_flags);
987 else
988 sortsv_cmp(aTHX_ start, max, sort_flags);
433b3e2b 989 }
7ea738a9
TK
990 }
991 if ((priv & OPpSORT_REVERSE) != 0) {
992 SV **q = start+max-1;
993 while (start < q) {
994 SV * const tmp = *start;
995 *start++ = *q;
996 *q-- = tmp;
997 }
998 }
84d4ea48 999 }
84721d61
DM
1000
1001 if (av) {
1002 /* copy back result to the array */
1003 SV** const base = MARK+1;
99c9ca9e 1004 SSize_t max_minus_one = max - 1; /* attempt to work around mingw bug */
84721d61 1005 if (SvMAGICAL(av)) {
99c9ca9e 1006 for (i = 0; i <= max_minus_one; i++)
84721d61
DM
1007 base[i] = newSVsv(base[i]);
1008 av_clear(av);
99c9ca9e
YO
1009 if (max_minus_one >= 0)
1010 av_extend(av, max_minus_one);
1011 for (i=0; i <= max_minus_one; i++) {
84721d61
DM
1012 SV * const sv = base[i];
1013 SV ** const didstore = av_store(av, i, sv);
1014 if (SvSMAGICAL(sv))
1015 mg_set(sv);
1016 if (!didstore)
1017 sv_2mortal(sv);
1018 }
1019 }
1020 else {
1021 /* the elements of av are likely to be the same as the
1022 * (non-refcounted) elements on the stack, just in a different
1023 * order. However, its possible that someone's messed with av
1024 * in the meantime. So bump and unbump the relevant refcounts
1025 * first.
1026 */
99c9ca9e 1027 for (i = 0; i <= max_minus_one; i++) {
45c198c1
DM
1028 SV *sv = base[i];
1029 assert(sv);
1030 if (SvREFCNT(sv) > 1)
1031 base[i] = newSVsv(sv);
1032 else
1033 SvREFCNT_inc_simple_void_NN(sv);
1034 }
84721d61 1035 av_clear(av);
99c9ca9e
YO
1036 if (max_minus_one >= 0) {
1037 av_extend(av, max_minus_one);
84721d61
DM
1038 Copy(base, AvARRAY(av), max, SV*);
1039 }
99c9ca9e 1040 AvFILLp(av) = max_minus_one;
84721d61
DM
1041 AvREIFY_off(av);
1042 AvREAL_on(av);
1043 }
fe1bc4cf 1044 }
84d4ea48 1045 LEAVE;
84721d61 1046 PL_stack_sp = ORIGMARK + max;
84d4ea48
JH
1047 return nextop;
1048}
1049
1050static I32
31e9e0a3 1051S_sortcv(pTHX_ SV *const a, SV *const b)
84d4ea48 1052{
901017d6 1053 const I32 oldsaveix = PL_savestack_ix;
84d4ea48 1054 I32 result;
ad021bfb 1055 PMOP * const pm = PL_curpm;
a9ea019a 1056 COP * const cop = PL_curcop;
16ada235 1057 SV *olda, *oldb;
7918f24d
NC
1058
1059 PERL_ARGS_ASSERT_SORTCV;
1060
16ada235
Z
1061 olda = GvSV(PL_firstgv);
1062 GvSV(PL_firstgv) = SvREFCNT_inc_simple_NN(a);
1063 SvREFCNT_dec(olda);
1064 oldb = GvSV(PL_secondgv);
1065 GvSV(PL_secondgv) = SvREFCNT_inc_simple_NN(b);
1066 SvREFCNT_dec(oldb);
84d4ea48
JH
1067 PL_stack_sp = PL_stack_base;
1068 PL_op = PL_sortcop;
1069 CALLRUNOPS(aTHX);
a9ea019a 1070 PL_curcop = cop;
33411212
DM
1071 /* entry zero of a stack is always PL_sv_undef, which
1072 * simplifies converting a '()' return into undef in scalar context */
1073 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1074 result = SvIV(*PL_stack_sp);
626ed49c 1075
53d3542d 1076 LEAVE_SCOPE(oldsaveix);
ad021bfb 1077 PL_curpm = pm;
84d4ea48
JH
1078 return result;
1079}
1080
1081static I32
31e9e0a3 1082S_sortcv_stacked(pTHX_ SV *const a, SV *const b)
84d4ea48 1083{
901017d6 1084 const I32 oldsaveix = PL_savestack_ix;
84d4ea48 1085 I32 result;
901017d6 1086 AV * const av = GvAV(PL_defgv);
ad021bfb 1087 PMOP * const pm = PL_curpm;
a9ea019a 1088 COP * const cop = PL_curcop;
84d4ea48 1089
7918f24d
NC
1090 PERL_ARGS_ASSERT_SORTCV_STACKED;
1091
8f443ca6 1092 if (AvREAL(av)) {
7ea738a9
TK
1093 av_clear(av);
1094 AvREAL_off(av);
1095 AvREIFY_on(av);
8f443ca6 1096 }
84d4ea48 1097 if (AvMAX(av) < 1) {
7ea738a9
TK
1098 SV **ary = AvALLOC(av);
1099 if (AvARRAY(av) != ary) {
1100 AvMAX(av) += AvARRAY(av) - AvALLOC(av);
1101 AvARRAY(av) = ary;
1102 }
1103 if (AvMAX(av) < 1) {
1104 Renew(ary,2,SV*);
1105 AvMAX(av) = 1;
1106 AvARRAY(av) = ary;
1107 AvALLOC(av) = ary;
1108 }
84d4ea48
JH
1109 }
1110 AvFILLp(av) = 1;
1111
1112 AvARRAY(av)[0] = a;
1113 AvARRAY(av)[1] = b;
1114 PL_stack_sp = PL_stack_base;
1115 PL_op = PL_sortcop;
1116 CALLRUNOPS(aTHX);
a9ea019a 1117 PL_curcop = cop;
33411212
DM
1118 /* entry zero of a stack is always PL_sv_undef, which
1119 * simplifies converting a '()' return into undef in scalar context */
1120 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1121 result = SvIV(*PL_stack_sp);
626ed49c 1122
53d3542d 1123 LEAVE_SCOPE(oldsaveix);
ad021bfb 1124 PL_curpm = pm;
84d4ea48
JH
1125 return result;
1126}
1127
1128static I32
31e9e0a3 1129S_sortcv_xsub(pTHX_ SV *const a, SV *const b)
84d4ea48 1130{
20b7effb 1131 dSP;
901017d6 1132 const I32 oldsaveix = PL_savestack_ix;
ea726b52 1133 CV * const cv=MUTABLE_CV(PL_sortcop);
84d4ea48 1134 I32 result;
ad021bfb 1135 PMOP * const pm = PL_curpm;
84d4ea48 1136
7918f24d
NC
1137 PERL_ARGS_ASSERT_SORTCV_XSUB;
1138
84d4ea48
JH
1139 SP = PL_stack_base;
1140 PUSHMARK(SP);
1141 EXTEND(SP, 2);
1142 *++SP = a;
1143 *++SP = b;
1144 PUTBACK;
1145 (void)(*CvXSUB(cv))(aTHX_ cv);
33411212
DM
1146 /* entry zero of a stack is always PL_sv_undef, which
1147 * simplifies converting a '()' return into undef in scalar context */
1148 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
84d4ea48 1149 result = SvIV(*PL_stack_sp);
33411212 1150
53d3542d 1151 LEAVE_SCOPE(oldsaveix);
ad021bfb 1152 PL_curpm = pm;
84d4ea48
JH
1153 return result;
1154}
1155
1156
044d25c7 1157PERL_STATIC_FORCE_INLINE I32
31e9e0a3 1158S_sv_ncmp(pTHX_ SV *const a, SV *const b)
84d4ea48 1159{
427fbfe8 1160 I32 cmp = do_ncmp(a, b);
7918f24d
NC
1161
1162 PERL_ARGS_ASSERT_SV_NCMP;
1163
427fbfe8 1164 if (cmp == 2) {
7ea738a9
TK
1165 if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL);
1166 return 0;
f3dab52a 1167 }
427fbfe8
TC
1168
1169 return cmp;
84d4ea48
JH
1170}
1171
044d25c7
TK
1172PERL_STATIC_FORCE_INLINE I32
1173S_sv_ncmp_desc(pTHX_ SV *const a, SV *const b)
1174{
1175 PERL_ARGS_ASSERT_SV_NCMP_DESC;
1176
1177 return -S_sv_ncmp(aTHX_ a, b);
1178}
1179
1180PERL_STATIC_FORCE_INLINE I32
31e9e0a3 1181S_sv_i_ncmp(pTHX_ SV *const a, SV *const b)
84d4ea48 1182{
901017d6
AL
1183 const IV iv1 = SvIV(a);
1184 const IV iv2 = SvIV(b);
7918f24d
NC
1185
1186 PERL_ARGS_ASSERT_SV_I_NCMP;
1187
84d4ea48
JH
1188 return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0;
1189}
901017d6 1190
044d25c7
TK
1191PERL_STATIC_FORCE_INLINE I32
1192S_sv_i_ncmp_desc(pTHX_ SV *const a, SV *const b)
1193{
1194 PERL_ARGS_ASSERT_SV_I_NCMP_DESC;
1195
1196 return -S_sv_i_ncmp(aTHX_ a, b);
1197}
1198
901017d6 1199#define tryCALL_AMAGICbin(left,right,meth) \
79a8d529 1200 (SvAMAGIC(left)||SvAMAGIC(right)) \
7ea738a9
TK
1201 ? amagic_call(left, right, meth, 0) \
1202 : NULL;
84d4ea48 1203
659c4b96 1204#define SORT_NORMAL_RETURN_VALUE(val) (((val) > 0) ? 1 : ((val) ? -1 : 0))
eeb9de02 1205
044d25c7 1206PERL_STATIC_FORCE_INLINE I32
5aaab254 1207S_amagic_ncmp(pTHX_ SV *const a, SV *const b)
84d4ea48 1208{
31d632c3 1209 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg);
7918f24d
NC
1210
1211 PERL_ARGS_ASSERT_AMAGIC_NCMP;
1212
84d4ea48 1213 if (tmpsv) {
84d4ea48 1214 if (SvIOK(tmpsv)) {
901017d6 1215 const I32 i = SvIVX(tmpsv);
eeb9de02 1216 return SORT_NORMAL_RETURN_VALUE(i);
84d4ea48 1217 }
7ea738a9
TK
1218 else {
1219 const NV d = SvNV(tmpsv);
1220 return SORT_NORMAL_RETURN_VALUE(d);
1221 }
84d4ea48 1222 }
f0f5dc9d 1223 return S_sv_ncmp(aTHX_ a, b);
84d4ea48
JH
1224}
1225
044d25c7
TK
1226PERL_STATIC_FORCE_INLINE I32
1227S_amagic_ncmp_desc(pTHX_ SV *const a, SV *const b)
1228{
1229 PERL_ARGS_ASSERT_AMAGIC_NCMP_DESC;
1230
1231 return -S_amagic_ncmp(aTHX_ a, b);
1232}
1233
1234PERL_STATIC_FORCE_INLINE I32
5aaab254 1235S_amagic_i_ncmp(pTHX_ SV *const a, SV *const b)
84d4ea48 1236{
31d632c3 1237 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg);
7918f24d
NC
1238
1239 PERL_ARGS_ASSERT_AMAGIC_I_NCMP;
1240
84d4ea48 1241 if (tmpsv) {
84d4ea48 1242 if (SvIOK(tmpsv)) {
901017d6 1243 const I32 i = SvIVX(tmpsv);
eeb9de02 1244 return SORT_NORMAL_RETURN_VALUE(i);
84d4ea48 1245 }
7ea738a9
TK
1246 else {
1247 const NV d = SvNV(tmpsv);
1248 return SORT_NORMAL_RETURN_VALUE(d);
1249 }
84d4ea48 1250 }
f0f5dc9d 1251 return S_sv_i_ncmp(aTHX_ a, b);
84d4ea48
JH
1252}
1253
044d25c7
TK
1254PERL_STATIC_FORCE_INLINE I32
1255S_amagic_i_ncmp_desc(pTHX_ SV *const a, SV *const b)
1256{
1257 PERL_ARGS_ASSERT_AMAGIC_I_NCMP_DESC;
1258
1259 return -S_amagic_i_ncmp(aTHX_ a, b);
1260}
1261
1262PERL_STATIC_FORCE_INLINE I32
5aaab254 1263S_amagic_cmp(pTHX_ SV *const str1, SV *const str2)
84d4ea48 1264{
31d632c3 1265 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg);
7918f24d
NC
1266
1267 PERL_ARGS_ASSERT_AMAGIC_CMP;
1268
84d4ea48 1269 if (tmpsv) {
84d4ea48 1270 if (SvIOK(tmpsv)) {
901017d6 1271 const I32 i = SvIVX(tmpsv);
eeb9de02 1272 return SORT_NORMAL_RETURN_VALUE(i);
84d4ea48 1273 }
7ea738a9
TK
1274 else {
1275 const NV d = SvNV(tmpsv);
1276 return SORT_NORMAL_RETURN_VALUE(d);
1277 }
84d4ea48
JH
1278 }
1279 return sv_cmp(str1, str2);
1280}
1281
044d25c7
TK
1282PERL_STATIC_FORCE_INLINE I32
1283S_amagic_cmp_desc(pTHX_ SV *const str1, SV *const str2)
1284{
1285 PERL_ARGS_ASSERT_AMAGIC_CMP_DESC;
1286
1287 return -S_amagic_cmp(aTHX_ str1, str2);
1288}
1289
1290PERL_STATIC_FORCE_INLINE I32
1291S_cmp_desc(pTHX_ SV *const str1, SV *const str2)
1292{
1293 PERL_ARGS_ASSERT_CMP_DESC;
1294
1295 return -sv_cmp(str1, str2);
1296}
1297
91191cf7
KW
1298#ifdef USE_LOCALE_COLLATE
1299
044d25c7 1300PERL_STATIC_FORCE_INLINE I32
5aaab254 1301S_amagic_cmp_locale(pTHX_ SV *const str1, SV *const str2)
84d4ea48 1302{
31d632c3 1303 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg);
7918f24d
NC
1304
1305 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE;
1306
84d4ea48 1307 if (tmpsv) {
84d4ea48 1308 if (SvIOK(tmpsv)) {
901017d6 1309 const I32 i = SvIVX(tmpsv);
eeb9de02 1310 return SORT_NORMAL_RETURN_VALUE(i);
84d4ea48 1311 }
7ea738a9
TK
1312 else {
1313 const NV d = SvNV(tmpsv);
1314 return SORT_NORMAL_RETURN_VALUE(d);
1315 }
84d4ea48
JH
1316 }
1317 return sv_cmp_locale(str1, str2);
1318}
241d1a3b 1319
044d25c7
TK
1320PERL_STATIC_FORCE_INLINE I32
1321S_amagic_cmp_locale_desc(pTHX_ SV *const str1, SV *const str2)
1322{
1323 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE_DESC;
1324
1325 return -S_amagic_cmp_locale(aTHX_ str1, str2);
1326}
1327
1328PERL_STATIC_FORCE_INLINE I32
1329S_cmp_locale_desc(pTHX_ SV *const str1, SV *const str2)
1330{
1331 PERL_ARGS_ASSERT_CMP_LOCALE_DESC;
1332
1333 return -sv_cmp_locale(str1, str2);
1334}
1335
91191cf7
KW
1336#endif
1337
241d1a3b 1338/*
14d04a33 1339 * ex: set ts=8 sts=4 sw=4 et:
37442d52 1340 */