This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5
parts/inc/utf8: make sure var is def'd before using
[perl5.git] / pp_sort.c
... / ...
CommitLineData
1/* pp_sort.c
2 *
3 * Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
4 * 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by Larry Wall and others
5 *
6 * You may distribute under the terms of either the GNU General Public
7 * License or the Artistic License, as specified in the README file.
8 *
9 */
10
11/*
12 * ...they shuffled back towards the rear of the line. 'No, not at the
13 * rear!' the slave-driver shouted. 'Three files up. And stay there...
14 *
15 * [p.931 of _The Lord of the Rings_, VI/ii: "The Land of Shadow"]
16 */
17
18/* This file contains pp ("push/pop") functions that
19 * execute the opcodes that make up a perl program. A typical pp function
20 * expects to find its arguments on the stack, and usually pushes its
21 * results onto the stack, hence the 'pp' terminology. Each OP structure
22 * contains a pointer to the relevant pp_foo() function.
23 *
24 * This particular file just contains pp_sort(), which is complex
25 * enough to merit its own file! See the other pp*.c files for the rest of
26 * the pp_ functions.
27 */
28
29#include "EXTERN.h"
30#define PERL_IN_PP_SORT_C
31#include "perl.h"
32
33#define sv_cmp_static Perl_sv_cmp
34#define sv_cmp_locale_static Perl_sv_cmp_locale
35
36#ifndef SMALLSORT
37#define SMALLSORT (200)
38#endif
39
40/* Flags for qsortsv and mergesortsv */
41#define SORTf_DESC 1
42#define SORTf_STABLE 2
43#define SORTf_UNSTABLE 8
44
45/*
46 * The mergesort implementation is by Peter M. Mcilroy <pmcilroy@lucent.com>.
47 *
48 * The original code was written in conjunction with BSD Computer Software
49 * Research Group at University of California, Berkeley.
50 *
51 * See also: "Optimistic Sorting and Information Theoretic Complexity"
52 * Peter McIlroy
53 * SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms),
54 * pp 467-474, Austin, Texas, 25-27 January 1993.
55 *
56 * The integration to Perl is by John P. Linderman <jpl.jpl@gmail.com>.
57 *
58 * The code can be distributed under the same terms as Perl itself.
59 *
60 */
61
62
63typedef char * aptr; /* pointer for arithmetic on sizes */
64typedef SV * gptr; /* pointers in our lists */
65
66/* Binary merge internal sort, with a few special mods
67** for the special perl environment it now finds itself in.
68**
69** Things that were once options have been hotwired
70** to values suitable for this use. In particular, we'll always
71** initialize looking for natural runs, we'll always produce stable
72** output, and we'll always do Peter McIlroy's binary merge.
73*/
74
75/* Pointer types for arithmetic and storage and convenience casts */
76
77#define APTR(P) ((aptr)(P))
78#define GPTP(P) ((gptr *)(P))
79#define GPPP(P) ((gptr **)(P))
80
81
82/* byte offset from pointer P to (larger) pointer Q */
83#define BYTEOFF(P, Q) (APTR(Q) - APTR(P))
84
85#define PSIZE sizeof(gptr)
86
87/* If PSIZE is power of 2, make PSHIFT that power, if that helps */
88
89#ifdef PSHIFT
90#define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT))
91#define PNBYTE(N) ((N) << (PSHIFT))
92#define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N)))
93#else
94/* Leave optimization to compiler */
95#define PNELEM(P, Q) (GPTP(Q) - GPTP(P))
96#define PNBYTE(N) ((N) * (PSIZE))
97#define PINDEX(P, N) (GPTP(P) + (N))
98#endif
99
100/* Pointer into other corresponding to pointer into this */
101#define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P))
102
103#define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim)
104
105
106/* Runs are identified by a pointer in the auxiliary list.
107** The pointer is at the start of the list,
108** and it points to the start of the next list.
109** NEXT is used as an lvalue, too.
110*/
111
112#define NEXT(P) (*GPPP(P))
113
114
115/* PTHRESH is the minimum number of pairs with the same sense to justify
116** checking for a run and extending it. Note that PTHRESH counts PAIRS,
117** not just elements, so PTHRESH == 8 means a run of 16.
118*/
119
120#define PTHRESH (8)
121
122/* RTHRESH is the number of elements in a run that must compare low
123** to the low element from the opposing run before we justify
124** doing a binary rampup instead of single stepping.
125** In random input, N in a row low should only happen with
126** probability 2^(1-N), so we can risk that we are dealing
127** with orderly input without paying much when we aren't.
128*/
129
130#define RTHRESH (6)
131
132
133/*
134** Overview of algorithm and variables.
135** The array of elements at list1 will be organized into runs of length 2,
136** or runs of length >= 2 * PTHRESH. We only try to form long runs when
137** PTHRESH adjacent pairs compare in the same way, suggesting overall order.
138**
139** Unless otherwise specified, pair pointers address the first of two elements.
140**
141** b and b+1 are a pair that compare with sense "sense".
142** b is the "bottom" of adjacent pairs that might form a longer run.
143**
144** p2 parallels b in the list2 array, where runs are defined by
145** a pointer chain.
146**
147** t represents the "top" of the adjacent pairs that might extend
148** the run beginning at b. Usually, t addresses a pair
149** that compares with opposite sense from (b,b+1).
150** However, it may also address a singleton element at the end of list1,
151** or it may be equal to "last", the first element beyond list1.
152**
153** r addresses the Nth pair following b. If this would be beyond t,
154** we back it off to t. Only when r is less than t do we consider the
155** run long enough to consider checking.
156**
157** q addresses a pair such that the pairs at b through q already form a run.
158** Often, q will equal b, indicating we only are sure of the pair itself.
159** However, a search on the previous cycle may have revealed a longer run,
160** so q may be greater than b.
161**
162** p is used to work back from a candidate r, trying to reach q,
163** which would mean b through r would be a run. If we discover such a run,
164** we start q at r and try to push it further towards t.
165** If b through r is NOT a run, we detect the wrong order at (p-1,p).
166** In any event, after the check (if any), we have two main cases.
167**
168** 1) Short run. b <= q < p <= r <= t.
169** b through q is a run (perhaps trivial)
170** q through p are uninteresting pairs
171** p through r is a run
172**
173** 2) Long run. b < r <= q < t.
174** b through q is a run (of length >= 2 * PTHRESH)
175**
176** Note that degenerate cases are not only possible, but likely.
177** For example, if the pair following b compares with opposite sense,
178** then b == q < p == r == t.
179*/
180
181
182static IV
183dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp)
184{
185 I32 sense;
186 gptr *b, *p, *q, *t, *p2;
187 gptr *last, *r;
188 IV runs = 0;
189
190 b = list1;
191 last = PINDEX(b, nmemb);
192 sense = (cmp(aTHX_ *b, *(b+1)) > 0);
193 for (p2 = list2; b < last; ) {
194 /* We just started, or just reversed sense.
195 ** Set t at end of pairs with the prevailing sense.
196 */
197 for (p = b+2, t = p; ++p < last; t = ++p) {
198 if ((cmp(aTHX_ *t, *p) > 0) != sense) break;
199 }
200 q = b;
201 /* Having laid out the playing field, look for long runs */
202 do {
203 p = r = b + (2 * PTHRESH);
204 if (r >= t) p = r = t; /* too short to care about */
205 else {
206 while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) &&
207 ((p -= 2) > q)) {}
208 if (p <= q) {
209 /* b through r is a (long) run.
210 ** Extend it as far as possible.
211 */
212 p = q = r;
213 while (((p += 2) < t) &&
214 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p;
215 r = p = q + 2; /* no simple pairs, no after-run */
216 }
217 }
218 if (q > b) { /* run of greater than 2 at b */
219 gptr *savep = p;
220
221 p = q += 2;
222 /* pick up singleton, if possible */
223 if ((p == t) &&
224 ((t + 1) == last) &&
225 ((cmp(aTHX_ *(p-1), *p) > 0) == sense))
226 savep = r = p = q = last;
227 p2 = NEXT(p2) = p2 + (p - b); ++runs;
228 if (sense)
229 while (b < --p) {
230 const gptr c = *b;
231 *b++ = *p;
232 *p = c;
233 }
234 p = savep;
235 }
236 while (q < p) { /* simple pairs */
237 p2 = NEXT(p2) = p2 + 2; ++runs;
238 if (sense) {
239 const gptr c = *q++;
240 *(q-1) = *q;
241 *q++ = c;
242 } else q += 2;
243 }
244 if (((b = p) == t) && ((t+1) == last)) {
245 NEXT(p2) = p2 + 1; ++runs;
246 b++;
247 }
248 q = r;
249 } while (b < t);
250 sense = !sense;
251 }
252 return runs;
253}
254
255
256/* The original merge sort, in use since 5.7, was as fast as, or faster than,
257 * qsort on many platforms, but slower than qsort, conspicuously so,
258 * on others. The most likely explanation was platform-specific
259 * differences in cache sizes and relative speeds.
260 *
261 * The quicksort divide-and-conquer algorithm guarantees that, as the
262 * problem is subdivided into smaller and smaller parts, the parts
263 * fit into smaller (and faster) caches. So it doesn't matter how
264 * many levels of cache exist, quicksort will "find" them, and,
265 * as long as smaller is faster, take advantage of them.
266 *
267 * By contrast, consider how the original mergesort algorithm worked.
268 * Suppose we have five runs (each typically of length 2 after dynprep).
269 *
270 * pass base aux
271 * 0 1 2 3 4 5
272 * 1 12 34 5
273 * 2 1234 5
274 * 3 12345
275 * 4 12345
276 *
277 * Adjacent pairs are merged in "grand sweeps" through the input.
278 * This means, on pass 1, the records in runs 1 and 2 aren't revisited until
279 * runs 3 and 4 are merged and the runs from run 5 have been copied.
280 * The only cache that matters is one large enough to hold *all* the input.
281 * On some platforms, this may be many times slower than smaller caches.
282 *
283 * The following pseudo-code uses the same basic merge algorithm,
284 * but in a divide-and-conquer way.
285 *
286 * # merge $runs runs at offset $offset of list $list1 into $list2.
287 * # all unmerged runs ($runs == 1) originate in list $base.
288 * sub mgsort2 {
289 * my ($offset, $runs, $base, $list1, $list2) = @_;
290 *
291 * if ($runs == 1) {
292 * if ($list1 is $base) copy run to $list2
293 * return offset of end of list (or copy)
294 * } else {
295 * $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1)
296 * mgsort2($off2, $runs/2, $base, $list2, $list1)
297 * merge the adjacent runs at $offset of $list1 into $list2
298 * return the offset of the end of the merged runs
299 * }
300 * }
301 * mgsort2(0, $runs, $base, $aux, $base);
302 *
303 * For our 5 runs, the tree of calls looks like
304 *
305 * 5
306 * 3 2
307 * 2 1 1 1
308 * 1 1
309 *
310 * 1 2 3 4 5
311 *
312 * and the corresponding activity looks like
313 *
314 * copy runs 1 and 2 from base to aux
315 * merge runs 1 and 2 from aux to base
316 * (run 3 is where it belongs, no copy needed)
317 * merge runs 12 and 3 from base to aux
318 * (runs 4 and 5 are where they belong, no copy needed)
319 * merge runs 4 and 5 from base to aux
320 * merge runs 123 and 45 from aux to base
321 *
322 * Note that we merge runs 1 and 2 immediately after copying them,
323 * while they are still likely to be in fast cache. Similarly,
324 * run 3 is merged with run 12 while it still may be lingering in cache.
325 * This implementation should therefore enjoy much of the cache-friendly
326 * behavior that quicksort does. In addition, it does less copying
327 * than the original mergesort implementation (only runs 1 and 2 are copied)
328 * and the "balancing" of merges is better (merged runs comprise more nearly
329 * equal numbers of original runs).
330 *
331 * The actual cache-friendly implementation will use a pseudo-stack
332 * to avoid recursion, and will unroll processing of runs of length 2,
333 * but it is otherwise similar to the recursive implementation.
334 */
335
336typedef struct {
337 IV offset; /* offset of 1st of 2 runs at this level */
338 IV runs; /* how many runs must be combined into 1 */
339} off_runs; /* pseudo-stack element */
340
341
342static I32
343cmp_desc(pTHX_ gptr const a, gptr const b)
344{
345 return -PL_sort_RealCmp(aTHX_ a, b);
346}
347
348/*
349=head1 SV Manipulation Functions
350
351=for apidoc sortsv_flags
352
353In-place sort an array of SV pointers with the given comparison routine,
354with various SORTf_* flag options.
355
356=cut
357*/
358void
359Perl_sortsv_flags(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
360{
361 IV i, run, offset;
362 I32 sense, level;
363 gptr *f1, *f2, *t, *b, *p;
364 int iwhich;
365 gptr *aux;
366 gptr *p1;
367 gptr small[SMALLSORT];
368 gptr *which[3];
369 off_runs stack[60], *stackp;
370 SVCOMPARE_t savecmp = NULL;
371
372 PERL_ARGS_ASSERT_SORTSV_FLAGS;
373 if (nmemb <= 1) return; /* sorted trivially */
374
375 if ((flags & SORTf_DESC) != 0) {
376 savecmp = PL_sort_RealCmp; /* Save current comparison routine, if any */
377 PL_sort_RealCmp = cmp; /* Put comparison routine where cmp_desc can find it */
378 cmp = cmp_desc;
379 }
380
381 if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */
382 else { Newx(aux,nmemb,gptr); } /* allocate auxiliary array */
383 level = 0;
384 stackp = stack;
385 stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp);
386 stackp->offset = offset = 0;
387 which[0] = which[2] = base;
388 which[1] = aux;
389 for (;;) {
390 /* On levels where both runs have be constructed (stackp->runs == 0),
391 * merge them, and note the offset of their end, in case the offset
392 * is needed at the next level up. Hop up a level, and,
393 * as long as stackp->runs is 0, keep merging.
394 */
395 IV runs = stackp->runs;
396 if (runs == 0) {
397 gptr *list1, *list2;
398 iwhich = level & 1;
399 list1 = which[iwhich]; /* area where runs are now */
400 list2 = which[++iwhich]; /* area for merged runs */
401 do {
402 gptr *l1, *l2, *tp2;
403 offset = stackp->offset;
404 f1 = p1 = list1 + offset; /* start of first run */
405 p = tp2 = list2 + offset; /* where merged run will go */
406 t = NEXT(p); /* where first run ends */
407 f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */
408 t = NEXT(t); /* where second runs ends */
409 l2 = POTHER(t, list2, list1); /* ... on the other side */
410 offset = PNELEM(list2, t);
411 while (f1 < l1 && f2 < l2) {
412 /* If head 1 is larger than head 2, find ALL the elements
413 ** in list 2 strictly less than head1, write them all,
414 ** then head 1. Then compare the new heads, and repeat,
415 ** until one or both lists are exhausted.
416 **
417 ** In all comparisons (after establishing
418 ** which head to merge) the item to merge
419 ** (at pointer q) is the first operand of
420 ** the comparison. When we want to know
421 ** if "q is strictly less than the other",
422 ** we can't just do
423 ** cmp(q, other) < 0
424 ** because stability demands that we treat equality
425 ** as high when q comes from l2, and as low when
426 ** q was from l1. So we ask the question by doing
427 ** cmp(q, other) <= sense
428 ** and make sense == 0 when equality should look low,
429 ** and -1 when equality should look high.
430 */
431
432 gptr *q;
433 if (cmp(aTHX_ *f1, *f2) <= 0) {
434 q = f2; b = f1; t = l1;
435 sense = -1;
436 } else {
437 q = f1; b = f2; t = l2;
438 sense = 0;
439 }
440
441
442 /* ramp up
443 **
444 ** Leave t at something strictly
445 ** greater than q (or at the end of the list),
446 ** and b at something strictly less than q.
447 */
448 for (i = 1, run = 0 ;;) {
449 if ((p = PINDEX(b, i)) >= t) {
450 /* off the end */
451 if (((p = PINDEX(t, -1)) > b) &&
452 (cmp(aTHX_ *q, *p) <= sense))
453 t = p;
454 else b = p;
455 break;
456 } else if (cmp(aTHX_ *q, *p) <= sense) {
457 t = p;
458 break;
459 } else b = p;
460 if (++run >= RTHRESH) i += i;
461 }
462
463
464 /* q is known to follow b and must be inserted before t.
465 ** Increment b, so the range of possibilities is [b,t).
466 ** Round binary split down, to favor early appearance.
467 ** Adjust b and t until q belongs just before t.
468 */
469
470 b++;
471 while (b < t) {
472 p = PINDEX(b, (PNELEM(b, t) - 1) / 2);
473 if (cmp(aTHX_ *q, *p) <= sense) {
474 t = p;
475 } else b = p + 1;
476 }
477
478
479 /* Copy all the strictly low elements */
480
481 if (q == f1) {
482 FROMTOUPTO(f2, tp2, t);
483 *tp2++ = *f1++;
484 } else {
485 FROMTOUPTO(f1, tp2, t);
486 *tp2++ = *f2++;
487 }
488 }
489
490
491 /* Run out remaining list */
492 if (f1 == l1) {
493 if (f2 < l2) FROMTOUPTO(f2, tp2, l2);
494 } else FROMTOUPTO(f1, tp2, l1);
495 p1 = NEXT(p1) = POTHER(tp2, list2, list1);
496
497 if (--level == 0) goto done;
498 --stackp;
499 t = list1; list1 = list2; list2 = t; /* swap lists */
500 } while ((runs = stackp->runs) == 0);
501 }
502
503
504 stackp->runs = 0; /* current run will finish level */
505 /* While there are more than 2 runs remaining,
506 * turn them into exactly 2 runs (at the "other" level),
507 * each made up of approximately half the runs.
508 * Stack the second half for later processing,
509 * and set about producing the first half now.
510 */
511 while (runs > 2) {
512 ++level;
513 ++stackp;
514 stackp->offset = offset;
515 runs -= stackp->runs = runs / 2;
516 }
517 /* We must construct a single run from 1 or 2 runs.
518 * All the original runs are in which[0] == base.
519 * The run we construct must end up in which[level&1].
520 */
521 iwhich = level & 1;
522 if (runs == 1) {
523 /* Constructing a single run from a single run.
524 * If it's where it belongs already, there's nothing to do.
525 * Otherwise, copy it to where it belongs.
526 * A run of 1 is either a singleton at level 0,
527 * or the second half of a split 3. In neither event
528 * is it necessary to set offset. It will be set by the merge
529 * that immediately follows.
530 */
531 if (iwhich) { /* Belongs in aux, currently in base */
532 f1 = b = PINDEX(base, offset); /* where list starts */
533 f2 = PINDEX(aux, offset); /* where list goes */
534 t = NEXT(f2); /* where list will end */
535 offset = PNELEM(aux, t); /* offset thereof */
536 t = PINDEX(base, offset); /* where it currently ends */
537 FROMTOUPTO(f1, f2, t); /* copy */
538 NEXT(b) = t; /* set up parallel pointer */
539 } else if (level == 0) goto done; /* single run at level 0 */
540 } else {
541 /* Constructing a single run from two runs.
542 * The merge code at the top will do that.
543 * We need only make sure the two runs are in the "other" array,
544 * so they'll end up in the correct array after the merge.
545 */
546 ++level;
547 ++stackp;
548 stackp->offset = offset;
549 stackp->runs = 0; /* take care of both runs, trigger merge */
550 if (!iwhich) { /* Merged runs belong in aux, copy 1st */
551 f1 = b = PINDEX(base, offset); /* where first run starts */
552 f2 = PINDEX(aux, offset); /* where it will be copied */
553 t = NEXT(f2); /* where first run will end */
554 offset = PNELEM(aux, t); /* offset thereof */
555 p = PINDEX(base, offset); /* end of first run */
556 t = NEXT(t); /* where second run will end */
557 t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */
558 FROMTOUPTO(f1, f2, t); /* copy both runs */
559 NEXT(b) = p; /* paralleled pointer for 1st */
560 NEXT(p) = t; /* ... and for second */
561 }
562 }
563 }
564 done:
565 if (aux != small) Safefree(aux); /* free iff allocated */
566 if (savecmp != NULL) {
567 PL_sort_RealCmp = savecmp; /* Restore current comparison routine, if any */
568 }
569 return;
570}
571
572/*
573 * The quicksort implementation was derived from source code contributed
574 * by Tom Horsley.
575 *
576 * NOTE: this code was derived from Tom Horsley's qsort replacement
577 * and should not be confused with the original code.
578 */
579
580/* Copyright (C) Tom Horsley, 1997. All rights reserved.
581
582 Permission granted to distribute under the same terms as perl which are
583 (briefly):
584
585 This program is free software; you can redistribute it and/or modify
586 it under the terms of either:
587
588 a) the GNU General Public License as published by the Free
589 Software Foundation; either version 1, or (at your option) any
590 later version, or
591
592 b) the "Artistic License" which comes with this Kit.
593
594 Details on the perl license can be found in the perl source code which
595 may be located via the www.perl.com web page.
596
597 This is the most wonderfulest possible qsort I can come up with (and
598 still be mostly portable) My (limited) tests indicate it consistently
599 does about 20% fewer calls to compare than does the qsort in the Visual
600 C++ library, other vendors may vary.
601
602 Some of the ideas in here can be found in "Algorithms" by Sedgewick,
603 others I invented myself (or more likely re-invented since they seemed
604 pretty obvious once I watched the algorithm operate for a while).
605
606 Most of this code was written while watching the Marlins sweep the Giants
607 in the 1997 National League Playoffs - no Braves fans allowed to use this
608 code (just kidding :-).
609
610 I realize that if I wanted to be true to the perl tradition, the only
611 comment in this file would be something like:
612
613 ...they shuffled back towards the rear of the line. 'No, not at the
614 rear!' the slave-driver shouted. 'Three files up. And stay there...
615
616 However, I really needed to violate that tradition just so I could keep
617 track of what happens myself, not to mention some poor fool trying to
618 understand this years from now :-).
619*/
620
621/* ********************************************************** Configuration */
622
623#ifndef QSORT_ORDER_GUESS
624#define QSORT_ORDER_GUESS 2 /* Select doubling version of the netBSD trick */
625#endif
626
627/* QSORT_MAX_STACK is the largest number of partitions that can be stacked up for
628 future processing - a good max upper bound is log base 2 of memory size
629 (32 on 32 bit machines, 64 on 64 bit machines, etc). In reality can
630 safely be smaller than that since the program is taking up some space and
631 most operating systems only let you grab some subset of contiguous
632 memory (not to mention that you are normally sorting data larger than
633 1 byte element size :-).
634*/
635#ifndef QSORT_MAX_STACK
636#define QSORT_MAX_STACK 32
637#endif
638
639/* QSORT_BREAK_EVEN is the size of the largest partition we should insertion sort.
640 Anything bigger and we use qsort. If you make this too small, the qsort
641 will probably break (or become less efficient), because it doesn't expect
642 the middle element of a partition to be the same as the right or left -
643 you have been warned).
644*/
645#ifndef QSORT_BREAK_EVEN
646#define QSORT_BREAK_EVEN 6
647#endif
648
649/* QSORT_PLAY_SAFE is the size of the largest partition we're willing
650 to go quadratic on. We innoculate larger partitions against
651 quadratic behavior by shuffling them before sorting. This is not
652 an absolute guarantee of non-quadratic behavior, but it would take
653 staggeringly bad luck to pick extreme elements as the pivot
654 from randomized data.
655*/
656#ifndef QSORT_PLAY_SAFE
657#define QSORT_PLAY_SAFE 255
658#endif
659
660/* ************************************************************* Data Types */
661
662/* hold left and right index values of a partition waiting to be sorted (the
663 partition includes both left and right - right is NOT one past the end or
664 anything like that).
665*/
666struct partition_stack_entry {
667 int left;
668 int right;
669#ifdef QSORT_ORDER_GUESS
670 int qsort_break_even;
671#endif
672};
673
674/* ******************************************************* Shorthand Macros */
675
676/* Note that these macros will be used from inside the qsort function where
677 we happen to know that the variable 'elt_size' contains the size of an
678 array element and the variable 'temp' points to enough space to hold a
679 temp element and the variable 'array' points to the array being sorted
680 and 'compare' is the pointer to the compare routine.
681
682 Also note that there are very many highly architecture specific ways
683 these might be sped up, but this is simply the most generally portable
684 code I could think of.
685*/
686
687/* Return < 0 == 0 or > 0 as the value of elt1 is < elt2, == elt2, > elt2
688*/
689#define qsort_cmp(elt1, elt2) \
690 ((*compare)(aTHX_ array[elt1], array[elt2]))
691
692#ifdef QSORT_ORDER_GUESS
693#define QSORT_NOTICE_SWAP swapped++;
694#else
695#define QSORT_NOTICE_SWAP
696#endif
697
698/* swaps contents of array elements elt1, elt2.
699*/
700#define qsort_swap(elt1, elt2) \
701 STMT_START { \
702 QSORT_NOTICE_SWAP \
703 temp = array[elt1]; \
704 array[elt1] = array[elt2]; \
705 array[elt2] = temp; \
706 } STMT_END
707
708/* rotate contents of elt1, elt2, elt3 such that elt1 gets elt2, elt2 gets
709 elt3 and elt3 gets elt1.
710*/
711#define qsort_rotate(elt1, elt2, elt3) \
712 STMT_START { \
713 QSORT_NOTICE_SWAP \
714 temp = array[elt1]; \
715 array[elt1] = array[elt2]; \
716 array[elt2] = array[elt3]; \
717 array[elt3] = temp; \
718 } STMT_END
719
720/* ************************************************************ Debug stuff */
721
722#ifdef QSORT_DEBUG
723
724static void
725break_here()
726{
727 return; /* good place to set a breakpoint */
728}
729
730#define qsort_assert(t) (void)( (t) || (break_here(), 0) )
731
732static void
733doqsort_all_asserts(
734 void * array,
735 size_t num_elts,
736 size_t elt_size,
737 int (*compare)(const void * elt1, const void * elt2),
738 int pc_left, int pc_right, int u_left, int u_right)
739{
740 int i;
741
742 qsort_assert(pc_left <= pc_right);
743 qsort_assert(u_right < pc_left);
744 qsort_assert(pc_right < u_left);
745 for (i = u_right + 1; i < pc_left; ++i) {
746 qsort_assert(qsort_cmp(i, pc_left) < 0);
747 }
748 for (i = pc_left; i < pc_right; ++i) {
749 qsort_assert(qsort_cmp(i, pc_right) == 0);
750 }
751 for (i = pc_right + 1; i < u_left; ++i) {
752 qsort_assert(qsort_cmp(pc_right, i) < 0);
753 }
754}
755
756#define qsort_all_asserts(PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) \
757 doqsort_all_asserts(array, num_elts, elt_size, compare, \
758 PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT)
759
760#else
761
762#define qsort_assert(t) ((void)0)
763
764#define qsort_all_asserts(PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) ((void)0)
765
766#endif
767
768/*
769=head1 Array Manipulation Functions
770
771=for apidoc sortsv
772
773In-place sort an array of SV pointers with the given comparison routine.
774
775Currently this always uses mergesort. See C<L</sortsv_flags>> for a more
776flexible routine.
777
778=cut
779*/
780
781void
782Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp)
783{
784 PERL_ARGS_ASSERT_SORTSV;
785
786 sortsv_flags(array, nmemb, cmp, 0);
787}
788
789#define SvNSIOK(sv) ((SvFLAGS(sv) & SVf_NOK) || ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK))
790#define SvSIOK(sv) ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK)
791#define SvNSIV(sv) ( SvNOK(sv) ? SvNVX(sv) : ( SvSIOK(sv) ? SvIVX(sv) : sv_2nv(sv) ) )
792
793PP(pp_sort)
794{
795 dSP; dMARK; dORIGMARK;
796 SV **p1 = ORIGMARK+1, **p2;
797 SSize_t max, i;
798 AV* av = NULL;
799 GV *gv;
800 CV *cv = NULL;
801 U8 gimme = GIMME_V;
802 OP* const nextop = PL_op->op_next;
803 I32 overloading = 0;
804 bool hasargs = FALSE;
805 bool copytmps;
806 I32 is_xsub = 0;
807 const U8 priv = PL_op->op_private;
808 const U8 flags = PL_op->op_flags;
809 U32 sort_flags = 0;
810 void (*sortsvp)(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
811 = Perl_sortsv_flags;
812 I32 all_SIVs = 1;
813
814 if ((priv & OPpSORT_DESCEND) != 0)
815 sort_flags |= SORTf_DESC;
816 if ((priv & OPpSORT_STABLE) != 0)
817 sort_flags |= SORTf_STABLE;
818 if ((priv & OPpSORT_UNSTABLE) != 0)
819 sort_flags |= SORTf_UNSTABLE;
820
821 if (gimme != G_ARRAY) {
822 SP = MARK;
823 EXTEND(SP,1);
824 RETPUSHUNDEF;
825 }
826
827 ENTER;
828 SAVEVPTR(PL_sortcop);
829 if (flags & OPf_STACKED) {
830 if (flags & OPf_SPECIAL) {
831 OP *nullop = OpSIBLING(cLISTOP->op_first); /* pass pushmark */
832 assert(nullop->op_type == OP_NULL);
833 PL_sortcop = nullop->op_next;
834 }
835 else {
836 GV *autogv = NULL;
837 HV *stash;
838 cv = sv_2cv(*++MARK, &stash, &gv, GV_ADD);
839 check_cv:
840 if (cv && SvPOK(cv)) {
841 const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv));
842 if (proto && strEQ(proto, "$$")) {
843 hasargs = TRUE;
844 }
845 }
846 if (cv && CvISXSUB(cv) && CvXSUB(cv)) {
847 is_xsub = 1;
848 }
849 else if (!(cv && CvROOT(cv))) {
850 if (gv) {
851 goto autoload;
852 }
853 else if (!CvANON(cv) && (gv = CvGV(cv))) {
854 if (cv != GvCV(gv)) cv = GvCV(gv);
855 autoload:
856 if (!autogv && (
857 autogv = gv_autoload_pvn(
858 GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv),
859 GvNAMEUTF8(gv) ? SVf_UTF8 : 0
860 )
861 )) {
862 cv = GvCVu(autogv);
863 goto check_cv;
864 }
865 else {
866 SV *tmpstr = sv_newmortal();
867 gv_efullname3(tmpstr, gv, NULL);
868 DIE(aTHX_ "Undefined sort subroutine \"%" SVf "\" called",
869 SVfARG(tmpstr));
870 }
871 }
872 else {
873 DIE(aTHX_ "Undefined subroutine in sort");
874 }
875 }
876
877 if (is_xsub)
878 PL_sortcop = (OP*)cv;
879 else
880 PL_sortcop = CvSTART(cv);
881 }
882 }
883 else {
884 PL_sortcop = NULL;
885 }
886
887 /* optimiser converts "@a = sort @a" to "sort \@a". In this case,
888 * push (@a) onto stack, then assign result back to @a at the end of
889 * this function */
890 if (priv & OPpSORT_INPLACE) {
891 assert( MARK+1 == SP && *SP && SvTYPE(*SP) == SVt_PVAV);
892 (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */
893 av = MUTABLE_AV((*SP));
894 if (SvREADONLY(av))
895 Perl_croak_no_modify();
896 max = AvFILL(av) + 1;
897 MEXTEND(SP, max);
898 if (SvMAGICAL(av)) {
899 for (i=0; i < max; i++) {
900 SV **svp = av_fetch(av, i, FALSE);
901 *SP++ = (svp) ? *svp : NULL;
902 }
903 }
904 else {
905 SV **svp = AvARRAY(av);
906 assert(svp || max == 0);
907 for (i = 0; i < max; i++)
908 *SP++ = *svp++;
909 }
910 SP--;
911 p1 = p2 = SP - (max-1);
912 }
913 else {
914 p2 = MARK+1;
915 max = SP - MARK;
916 }
917
918 /* shuffle stack down, removing optional initial cv (p1!=p2), plus
919 * any nulls; also stringify or converting to integer or number as
920 * required any args */
921 copytmps = cBOOL(PL_sortcop);
922 for (i=max; i > 0 ; i--) {
923 if ((*p1 = *p2++)) { /* Weed out nulls. */
924 if (copytmps && SvPADTMP(*p1)) {
925 *p1 = sv_mortalcopy(*p1);
926 }
927 SvTEMP_off(*p1);
928 if (!PL_sortcop) {
929 if (priv & OPpSORT_NUMERIC) {
930 if (priv & OPpSORT_INTEGER) {
931 if (!SvIOK(*p1))
932 (void)sv_2iv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
933 }
934 else {
935 if (!SvNSIOK(*p1))
936 (void)sv_2nv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
937 if (all_SIVs && !SvSIOK(*p1))
938 all_SIVs = 0;
939 }
940 }
941 else {
942 if (!SvPOK(*p1))
943 (void)sv_2pv_flags(*p1, 0,
944 SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD);
945 }
946 if (SvAMAGIC(*p1))
947 overloading = 1;
948 }
949 p1++;
950 }
951 else
952 max--;
953 }
954 if (max > 1) {
955 SV **start;
956 if (PL_sortcop) {
957 PERL_CONTEXT *cx;
958 const bool oldcatch = CATCH_GET;
959 I32 old_savestack_ix = PL_savestack_ix;
960
961 SAVEOP();
962
963 CATCH_SET(TRUE);
964 PUSHSTACKi(PERLSI_SORT);
965 if (!hasargs && !is_xsub) {
966 SAVEGENERICSV(PL_firstgv);
967 SAVEGENERICSV(PL_secondgv);
968 PL_firstgv = MUTABLE_GV(SvREFCNT_inc(
969 gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV)
970 ));
971 PL_secondgv = MUTABLE_GV(SvREFCNT_inc(
972 gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV)
973 ));
974 /* make sure the GP isn't removed out from under us for
975 * the SAVESPTR() */
976 save_gp(PL_firstgv, 0);
977 save_gp(PL_secondgv, 0);
978 /* we don't want modifications localized */
979 GvINTRO_off(PL_firstgv);
980 GvINTRO_off(PL_secondgv);
981 SAVEGENERICSV(GvSV(PL_firstgv));
982 SvREFCNT_inc(GvSV(PL_firstgv));
983 SAVEGENERICSV(GvSV(PL_secondgv));
984 SvREFCNT_inc(GvSV(PL_secondgv));
985 }
986
987 gimme = G_SCALAR;
988 cx = cx_pushblock(CXt_NULL, gimme, PL_stack_base, old_savestack_ix);
989 if (!(flags & OPf_SPECIAL)) {
990 cx->cx_type = CXt_SUB|CXp_MULTICALL;
991 cx_pushsub(cx, cv, NULL, hasargs);
992 if (!is_xsub) {
993 PADLIST * const padlist = CvPADLIST(cv);
994
995 if (++CvDEPTH(cv) >= 2)
996 pad_push(padlist, CvDEPTH(cv));
997 PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv));
998
999 if (hasargs) {
1000 /* This is mostly copied from pp_entersub */
1001 AV * const av = MUTABLE_AV(PAD_SVl(0));
1002
1003 cx->blk_sub.savearray = GvAV(PL_defgv);
1004 GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av));
1005 }
1006
1007 }
1008 }
1009
1010 start = p1 - max;
1011 sortsvp(aTHX_ start, max,
1012 (is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv),
1013 sort_flags);
1014
1015 /* Reset cx, in case the context stack has been reallocated. */
1016 cx = CX_CUR();
1017
1018 PL_stack_sp = PL_stack_base + cx->blk_oldsp;
1019
1020 CX_LEAVE_SCOPE(cx);
1021 if (!(flags & OPf_SPECIAL)) {
1022 assert(CxTYPE(cx) == CXt_SUB);
1023 cx_popsub(cx);
1024 }
1025 else
1026 assert(CxTYPE(cx) == CXt_NULL);
1027 /* there isn't a POPNULL ! */
1028
1029 cx_popblock(cx);
1030 CX_POP(cx);
1031 POPSTACK;
1032 CATCH_SET(oldcatch);
1033 }
1034 else {
1035 MEXTEND(SP, 20); /* Can't afford stack realloc on signal. */
1036 start = ORIGMARK+1;
1037 sortsvp(aTHX_ start, max,
1038 (priv & OPpSORT_NUMERIC)
1039 ? ( ( ( priv & OPpSORT_INTEGER) || all_SIVs)
1040 ? ( overloading ? S_amagic_i_ncmp : S_sv_i_ncmp)
1041 : ( overloading ? S_amagic_ncmp : S_sv_ncmp ) )
1042 : (
1043#ifdef USE_LOCALE_COLLATE
1044 IN_LC_RUNTIME(LC_COLLATE)
1045 ? ( overloading
1046 ? (SVCOMPARE_t)S_amagic_cmp_locale
1047 : (SVCOMPARE_t)sv_cmp_locale_static)
1048 :
1049#endif
1050 ( overloading ? (SVCOMPARE_t)S_amagic_cmp : (SVCOMPARE_t)sv_cmp_static)),
1051 sort_flags);
1052 }
1053 if ((priv & OPpSORT_REVERSE) != 0) {
1054 SV **q = start+max-1;
1055 while (start < q) {
1056 SV * const tmp = *start;
1057 *start++ = *q;
1058 *q-- = tmp;
1059 }
1060 }
1061 }
1062
1063 if (av) {
1064 /* copy back result to the array */
1065 SV** const base = MARK+1;
1066 if (SvMAGICAL(av)) {
1067 for (i = 0; i < max; i++)
1068 base[i] = newSVsv(base[i]);
1069 av_clear(av);
1070 av_extend(av, max);
1071 for (i=0; i < max; i++) {
1072 SV * const sv = base[i];
1073 SV ** const didstore = av_store(av, i, sv);
1074 if (SvSMAGICAL(sv))
1075 mg_set(sv);
1076 if (!didstore)
1077 sv_2mortal(sv);
1078 }
1079 }
1080 else {
1081 /* the elements of av are likely to be the same as the
1082 * (non-refcounted) elements on the stack, just in a different
1083 * order. However, its possible that someone's messed with av
1084 * in the meantime. So bump and unbump the relevant refcounts
1085 * first.
1086 */
1087 for (i = 0; i < max; i++) {
1088 SV *sv = base[i];
1089 assert(sv);
1090 if (SvREFCNT(sv) > 1)
1091 base[i] = newSVsv(sv);
1092 else
1093 SvREFCNT_inc_simple_void_NN(sv);
1094 }
1095 av_clear(av);
1096 if (max > 0) {
1097 av_extend(av, max);
1098 Copy(base, AvARRAY(av), max, SV*);
1099 }
1100 AvFILLp(av) = max - 1;
1101 AvREIFY_off(av);
1102 AvREAL_on(av);
1103 }
1104 }
1105 LEAVE;
1106 PL_stack_sp = ORIGMARK + max;
1107 return nextop;
1108}
1109
1110static I32
1111S_sortcv(pTHX_ SV *const a, SV *const b)
1112{
1113 const I32 oldsaveix = PL_savestack_ix;
1114 I32 result;
1115 PMOP * const pm = PL_curpm;
1116 COP * const cop = PL_curcop;
1117 SV *olda, *oldb;
1118
1119 PERL_ARGS_ASSERT_SORTCV;
1120
1121 olda = GvSV(PL_firstgv);
1122 GvSV(PL_firstgv) = SvREFCNT_inc_simple_NN(a);
1123 SvREFCNT_dec(olda);
1124 oldb = GvSV(PL_secondgv);
1125 GvSV(PL_secondgv) = SvREFCNT_inc_simple_NN(b);
1126 SvREFCNT_dec(oldb);
1127 PL_stack_sp = PL_stack_base;
1128 PL_op = PL_sortcop;
1129 CALLRUNOPS(aTHX);
1130 PL_curcop = cop;
1131 /* entry zero of a stack is always PL_sv_undef, which
1132 * simplifies converting a '()' return into undef in scalar context */
1133 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1134 result = SvIV(*PL_stack_sp);
1135
1136 LEAVE_SCOPE(oldsaveix);
1137 PL_curpm = pm;
1138 return result;
1139}
1140
1141static I32
1142S_sortcv_stacked(pTHX_ SV *const a, SV *const b)
1143{
1144 const I32 oldsaveix = PL_savestack_ix;
1145 I32 result;
1146 AV * const av = GvAV(PL_defgv);
1147 PMOP * const pm = PL_curpm;
1148 COP * const cop = PL_curcop;
1149
1150 PERL_ARGS_ASSERT_SORTCV_STACKED;
1151
1152 if (AvREAL(av)) {
1153 av_clear(av);
1154 AvREAL_off(av);
1155 AvREIFY_on(av);
1156 }
1157 if (AvMAX(av) < 1) {
1158 SV **ary = AvALLOC(av);
1159 if (AvARRAY(av) != ary) {
1160 AvMAX(av) += AvARRAY(av) - AvALLOC(av);
1161 AvARRAY(av) = ary;
1162 }
1163 if (AvMAX(av) < 1) {
1164 Renew(ary,2,SV*);
1165 AvMAX(av) = 1;
1166 AvARRAY(av) = ary;
1167 AvALLOC(av) = ary;
1168 }
1169 }
1170 AvFILLp(av) = 1;
1171
1172 AvARRAY(av)[0] = a;
1173 AvARRAY(av)[1] = b;
1174 PL_stack_sp = PL_stack_base;
1175 PL_op = PL_sortcop;
1176 CALLRUNOPS(aTHX);
1177 PL_curcop = cop;
1178 /* entry zero of a stack is always PL_sv_undef, which
1179 * simplifies converting a '()' return into undef in scalar context */
1180 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1181 result = SvIV(*PL_stack_sp);
1182
1183 LEAVE_SCOPE(oldsaveix);
1184 PL_curpm = pm;
1185 return result;
1186}
1187
1188static I32
1189S_sortcv_xsub(pTHX_ SV *const a, SV *const b)
1190{
1191 dSP;
1192 const I32 oldsaveix = PL_savestack_ix;
1193 CV * const cv=MUTABLE_CV(PL_sortcop);
1194 I32 result;
1195 PMOP * const pm = PL_curpm;
1196
1197 PERL_ARGS_ASSERT_SORTCV_XSUB;
1198
1199 SP = PL_stack_base;
1200 PUSHMARK(SP);
1201 EXTEND(SP, 2);
1202 *++SP = a;
1203 *++SP = b;
1204 PUTBACK;
1205 (void)(*CvXSUB(cv))(aTHX_ cv);
1206 /* entry zero of a stack is always PL_sv_undef, which
1207 * simplifies converting a '()' return into undef in scalar context */
1208 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1209 result = SvIV(*PL_stack_sp);
1210
1211 LEAVE_SCOPE(oldsaveix);
1212 PL_curpm = pm;
1213 return result;
1214}
1215
1216
1217static I32
1218S_sv_ncmp(pTHX_ SV *const a, SV *const b)
1219{
1220 I32 cmp = do_ncmp(a, b);
1221
1222 PERL_ARGS_ASSERT_SV_NCMP;
1223
1224 if (cmp == 2) {
1225 if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL);
1226 return 0;
1227 }
1228
1229 return cmp;
1230}
1231
1232static I32
1233S_sv_i_ncmp(pTHX_ SV *const a, SV *const b)
1234{
1235 const IV iv1 = SvIV(a);
1236 const IV iv2 = SvIV(b);
1237
1238 PERL_ARGS_ASSERT_SV_I_NCMP;
1239
1240 return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0;
1241}
1242
1243#define tryCALL_AMAGICbin(left,right,meth) \
1244 (SvAMAGIC(left)||SvAMAGIC(right)) \
1245 ? amagic_call(left, right, meth, 0) \
1246 : NULL;
1247
1248#define SORT_NORMAL_RETURN_VALUE(val) (((val) > 0) ? 1 : ((val) ? -1 : 0))
1249
1250static I32
1251S_amagic_ncmp(pTHX_ SV *const a, SV *const b)
1252{
1253 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg);
1254
1255 PERL_ARGS_ASSERT_AMAGIC_NCMP;
1256
1257 if (tmpsv) {
1258 if (SvIOK(tmpsv)) {
1259 const I32 i = SvIVX(tmpsv);
1260 return SORT_NORMAL_RETURN_VALUE(i);
1261 }
1262 else {
1263 const NV d = SvNV(tmpsv);
1264 return SORT_NORMAL_RETURN_VALUE(d);
1265 }
1266 }
1267 return S_sv_ncmp(aTHX_ a, b);
1268}
1269
1270static I32
1271S_amagic_i_ncmp(pTHX_ SV *const a, SV *const b)
1272{
1273 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg);
1274
1275 PERL_ARGS_ASSERT_AMAGIC_I_NCMP;
1276
1277 if (tmpsv) {
1278 if (SvIOK(tmpsv)) {
1279 const I32 i = SvIVX(tmpsv);
1280 return SORT_NORMAL_RETURN_VALUE(i);
1281 }
1282 else {
1283 const NV d = SvNV(tmpsv);
1284 return SORT_NORMAL_RETURN_VALUE(d);
1285 }
1286 }
1287 return S_sv_i_ncmp(aTHX_ a, b);
1288}
1289
1290static I32
1291S_amagic_cmp(pTHX_ SV *const str1, SV *const str2)
1292{
1293 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg);
1294
1295 PERL_ARGS_ASSERT_AMAGIC_CMP;
1296
1297 if (tmpsv) {
1298 if (SvIOK(tmpsv)) {
1299 const I32 i = SvIVX(tmpsv);
1300 return SORT_NORMAL_RETURN_VALUE(i);
1301 }
1302 else {
1303 const NV d = SvNV(tmpsv);
1304 return SORT_NORMAL_RETURN_VALUE(d);
1305 }
1306 }
1307 return sv_cmp(str1, str2);
1308}
1309
1310#ifdef USE_LOCALE_COLLATE
1311
1312static I32
1313S_amagic_cmp_locale(pTHX_ SV *const str1, SV *const str2)
1314{
1315 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg);
1316
1317 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE;
1318
1319 if (tmpsv) {
1320 if (SvIOK(tmpsv)) {
1321 const I32 i = SvIVX(tmpsv);
1322 return SORT_NORMAL_RETURN_VALUE(i);
1323 }
1324 else {
1325 const NV d = SvNV(tmpsv);
1326 return SORT_NORMAL_RETURN_VALUE(d);
1327 }
1328 }
1329 return sv_cmp_locale(str1, str2);
1330}
1331
1332#endif
1333
1334/*
1335 * ex: set ts=8 sts=4 sw=4 et:
1336 */