| 1 | /* pp_sort.c |
| 2 | * |
| 3 | * Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, |
| 4 | * 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by Larry Wall and others |
| 5 | * |
| 6 | * You may distribute under the terms of either the GNU General Public |
| 7 | * License or the Artistic License, as specified in the README file. |
| 8 | * |
| 9 | */ |
| 10 | |
| 11 | /* |
| 12 | * ...they shuffled back towards the rear of the line. 'No, not at the |
| 13 | * rear!' the slave-driver shouted. 'Three files up. And stay there... |
| 14 | * |
| 15 | * [p.931 of _The Lord of the Rings_, VI/ii: "The Land of Shadow"] |
| 16 | */ |
| 17 | |
| 18 | /* This file contains pp ("push/pop") functions that |
| 19 | * execute the opcodes that make up a perl program. A typical pp function |
| 20 | * expects to find its arguments on the stack, and usually pushes its |
| 21 | * results onto the stack, hence the 'pp' terminology. Each OP structure |
| 22 | * contains a pointer to the relevant pp_foo() function. |
| 23 | * |
| 24 | * This particular file just contains pp_sort(), which is complex |
| 25 | * enough to merit its own file! See the other pp*.c files for the rest of |
| 26 | * the pp_ functions. |
| 27 | */ |
| 28 | |
| 29 | #include "EXTERN.h" |
| 30 | #define PERL_IN_PP_SORT_C |
| 31 | #include "perl.h" |
| 32 | |
| 33 | #if defined(UNDER_CE) |
| 34 | /* looks like 'small' is reserved word for WINCE (or somesuch)*/ |
| 35 | #define small xsmall |
| 36 | #endif |
| 37 | |
| 38 | #define sv_cmp_static Perl_sv_cmp |
| 39 | #define sv_cmp_locale_static Perl_sv_cmp_locale |
| 40 | |
| 41 | #ifndef SMALLSORT |
| 42 | #define SMALLSORT (200) |
| 43 | #endif |
| 44 | |
| 45 | /* Flags for qsortsv and mergesortsv */ |
| 46 | #define SORTf_DESC 1 |
| 47 | #define SORTf_STABLE 2 |
| 48 | #define SORTf_UNSTABLE 8 |
| 49 | |
| 50 | /* |
| 51 | * The mergesort implementation is by Peter M. Mcilroy <pmcilroy@lucent.com>. |
| 52 | * |
| 53 | * The original code was written in conjunction with BSD Computer Software |
| 54 | * Research Group at University of California, Berkeley. |
| 55 | * |
| 56 | * See also: "Optimistic Sorting and Information Theoretic Complexity" |
| 57 | * Peter McIlroy |
| 58 | * SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms), |
| 59 | * pp 467-474, Austin, Texas, 25-27 January 1993. |
| 60 | * |
| 61 | * The integration to Perl is by John P. Linderman <jpl.jpl@gmail.com>. |
| 62 | * |
| 63 | * The code can be distributed under the same terms as Perl itself. |
| 64 | * |
| 65 | */ |
| 66 | |
| 67 | |
| 68 | typedef char * aptr; /* pointer for arithmetic on sizes */ |
| 69 | typedef SV * gptr; /* pointers in our lists */ |
| 70 | |
| 71 | /* Binary merge internal sort, with a few special mods |
| 72 | ** for the special perl environment it now finds itself in. |
| 73 | ** |
| 74 | ** Things that were once options have been hotwired |
| 75 | ** to values suitable for this use. In particular, we'll always |
| 76 | ** initialize looking for natural runs, we'll always produce stable |
| 77 | ** output, and we'll always do Peter McIlroy's binary merge. |
| 78 | */ |
| 79 | |
| 80 | /* Pointer types for arithmetic and storage and convenience casts */ |
| 81 | |
| 82 | #define APTR(P) ((aptr)(P)) |
| 83 | #define GPTP(P) ((gptr *)(P)) |
| 84 | #define GPPP(P) ((gptr **)(P)) |
| 85 | |
| 86 | |
| 87 | /* byte offset from pointer P to (larger) pointer Q */ |
| 88 | #define BYTEOFF(P, Q) (APTR(Q) - APTR(P)) |
| 89 | |
| 90 | #define PSIZE sizeof(gptr) |
| 91 | |
| 92 | /* If PSIZE is power of 2, make PSHIFT that power, if that helps */ |
| 93 | |
| 94 | #ifdef PSHIFT |
| 95 | #define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT)) |
| 96 | #define PNBYTE(N) ((N) << (PSHIFT)) |
| 97 | #define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N))) |
| 98 | #else |
| 99 | /* Leave optimization to compiler */ |
| 100 | #define PNELEM(P, Q) (GPTP(Q) - GPTP(P)) |
| 101 | #define PNBYTE(N) ((N) * (PSIZE)) |
| 102 | #define PINDEX(P, N) (GPTP(P) + (N)) |
| 103 | #endif |
| 104 | |
| 105 | /* Pointer into other corresponding to pointer into this */ |
| 106 | #define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P)) |
| 107 | |
| 108 | #define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim) |
| 109 | |
| 110 | |
| 111 | /* Runs are identified by a pointer in the auxiliary list. |
| 112 | ** The pointer is at the start of the list, |
| 113 | ** and it points to the start of the next list. |
| 114 | ** NEXT is used as an lvalue, too. |
| 115 | */ |
| 116 | |
| 117 | #define NEXT(P) (*GPPP(P)) |
| 118 | |
| 119 | |
| 120 | /* PTHRESH is the minimum number of pairs with the same sense to justify |
| 121 | ** checking for a run and extending it. Note that PTHRESH counts PAIRS, |
| 122 | ** not just elements, so PTHRESH == 8 means a run of 16. |
| 123 | */ |
| 124 | |
| 125 | #define PTHRESH (8) |
| 126 | |
| 127 | /* RTHRESH is the number of elements in a run that must compare low |
| 128 | ** to the low element from the opposing run before we justify |
| 129 | ** doing a binary rampup instead of single stepping. |
| 130 | ** In random input, N in a row low should only happen with |
| 131 | ** probability 2^(1-N), so we can risk that we are dealing |
| 132 | ** with orderly input without paying much when we aren't. |
| 133 | */ |
| 134 | |
| 135 | #define RTHRESH (6) |
| 136 | |
| 137 | |
| 138 | /* |
| 139 | ** Overview of algorithm and variables. |
| 140 | ** The array of elements at list1 will be organized into runs of length 2, |
| 141 | ** or runs of length >= 2 * PTHRESH. We only try to form long runs when |
| 142 | ** PTHRESH adjacent pairs compare in the same way, suggesting overall order. |
| 143 | ** |
| 144 | ** Unless otherwise specified, pair pointers address the first of two elements. |
| 145 | ** |
| 146 | ** b and b+1 are a pair that compare with sense "sense". |
| 147 | ** b is the "bottom" of adjacent pairs that might form a longer run. |
| 148 | ** |
| 149 | ** p2 parallels b in the list2 array, where runs are defined by |
| 150 | ** a pointer chain. |
| 151 | ** |
| 152 | ** t represents the "top" of the adjacent pairs that might extend |
| 153 | ** the run beginning at b. Usually, t addresses a pair |
| 154 | ** that compares with opposite sense from (b,b+1). |
| 155 | ** However, it may also address a singleton element at the end of list1, |
| 156 | ** or it may be equal to "last", the first element beyond list1. |
| 157 | ** |
| 158 | ** r addresses the Nth pair following b. If this would be beyond t, |
| 159 | ** we back it off to t. Only when r is less than t do we consider the |
| 160 | ** run long enough to consider checking. |
| 161 | ** |
| 162 | ** q addresses a pair such that the pairs at b through q already form a run. |
| 163 | ** Often, q will equal b, indicating we only are sure of the pair itself. |
| 164 | ** However, a search on the previous cycle may have revealed a longer run, |
| 165 | ** so q may be greater than b. |
| 166 | ** |
| 167 | ** p is used to work back from a candidate r, trying to reach q, |
| 168 | ** which would mean b through r would be a run. If we discover such a run, |
| 169 | ** we start q at r and try to push it further towards t. |
| 170 | ** If b through r is NOT a run, we detect the wrong order at (p-1,p). |
| 171 | ** In any event, after the check (if any), we have two main cases. |
| 172 | ** |
| 173 | ** 1) Short run. b <= q < p <= r <= t. |
| 174 | ** b through q is a run (perhaps trivial) |
| 175 | ** q through p are uninteresting pairs |
| 176 | ** p through r is a run |
| 177 | ** |
| 178 | ** 2) Long run. b < r <= q < t. |
| 179 | ** b through q is a run (of length >= 2 * PTHRESH) |
| 180 | ** |
| 181 | ** Note that degenerate cases are not only possible, but likely. |
| 182 | ** For example, if the pair following b compares with opposite sense, |
| 183 | ** then b == q < p == r == t. |
| 184 | */ |
| 185 | |
| 186 | |
| 187 | static IV |
| 188 | dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp) |
| 189 | { |
| 190 | I32 sense; |
| 191 | gptr *b, *p, *q, *t, *p2; |
| 192 | gptr *last, *r; |
| 193 | IV runs = 0; |
| 194 | |
| 195 | b = list1; |
| 196 | last = PINDEX(b, nmemb); |
| 197 | sense = (cmp(aTHX_ *b, *(b+1)) > 0); |
| 198 | for (p2 = list2; b < last; ) { |
| 199 | /* We just started, or just reversed sense. |
| 200 | ** Set t at end of pairs with the prevailing sense. |
| 201 | */ |
| 202 | for (p = b+2, t = p; ++p < last; t = ++p) { |
| 203 | if ((cmp(aTHX_ *t, *p) > 0) != sense) break; |
| 204 | } |
| 205 | q = b; |
| 206 | /* Having laid out the playing field, look for long runs */ |
| 207 | do { |
| 208 | p = r = b + (2 * PTHRESH); |
| 209 | if (r >= t) p = r = t; /* too short to care about */ |
| 210 | else { |
| 211 | while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) && |
| 212 | ((p -= 2) > q)) {} |
| 213 | if (p <= q) { |
| 214 | /* b through r is a (long) run. |
| 215 | ** Extend it as far as possible. |
| 216 | */ |
| 217 | p = q = r; |
| 218 | while (((p += 2) < t) && |
| 219 | ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p; |
| 220 | r = p = q + 2; /* no simple pairs, no after-run */ |
| 221 | } |
| 222 | } |
| 223 | if (q > b) { /* run of greater than 2 at b */ |
| 224 | gptr *savep = p; |
| 225 | |
| 226 | p = q += 2; |
| 227 | /* pick up singleton, if possible */ |
| 228 | if ((p == t) && |
| 229 | ((t + 1) == last) && |
| 230 | ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) |
| 231 | savep = r = p = q = last; |
| 232 | p2 = NEXT(p2) = p2 + (p - b); ++runs; |
| 233 | if (sense) |
| 234 | while (b < --p) { |
| 235 | const gptr c = *b; |
| 236 | *b++ = *p; |
| 237 | *p = c; |
| 238 | } |
| 239 | p = savep; |
| 240 | } |
| 241 | while (q < p) { /* simple pairs */ |
| 242 | p2 = NEXT(p2) = p2 + 2; ++runs; |
| 243 | if (sense) { |
| 244 | const gptr c = *q++; |
| 245 | *(q-1) = *q; |
| 246 | *q++ = c; |
| 247 | } else q += 2; |
| 248 | } |
| 249 | if (((b = p) == t) && ((t+1) == last)) { |
| 250 | NEXT(p2) = p2 + 1; ++runs; |
| 251 | b++; |
| 252 | } |
| 253 | q = r; |
| 254 | } while (b < t); |
| 255 | sense = !sense; |
| 256 | } |
| 257 | return runs; |
| 258 | } |
| 259 | |
| 260 | |
| 261 | /* The original merge sort, in use since 5.7, was as fast as, or faster than, |
| 262 | * qsort on many platforms, but slower than qsort, conspicuously so, |
| 263 | * on others. The most likely explanation was platform-specific |
| 264 | * differences in cache sizes and relative speeds. |
| 265 | * |
| 266 | * The quicksort divide-and-conquer algorithm guarantees that, as the |
| 267 | * problem is subdivided into smaller and smaller parts, the parts |
| 268 | * fit into smaller (and faster) caches. So it doesn't matter how |
| 269 | * many levels of cache exist, quicksort will "find" them, and, |
| 270 | * as long as smaller is faster, take advantage of them. |
| 271 | * |
| 272 | * By contrast, consider how the original mergesort algorithm worked. |
| 273 | * Suppose we have five runs (each typically of length 2 after dynprep). |
| 274 | * |
| 275 | * pass base aux |
| 276 | * 0 1 2 3 4 5 |
| 277 | * 1 12 34 5 |
| 278 | * 2 1234 5 |
| 279 | * 3 12345 |
| 280 | * 4 12345 |
| 281 | * |
| 282 | * Adjacent pairs are merged in "grand sweeps" through the input. |
| 283 | * This means, on pass 1, the records in runs 1 and 2 aren't revisited until |
| 284 | * runs 3 and 4 are merged and the runs from run 5 have been copied. |
| 285 | * The only cache that matters is one large enough to hold *all* the input. |
| 286 | * On some platforms, this may be many times slower than smaller caches. |
| 287 | * |
| 288 | * The following pseudo-code uses the same basic merge algorithm, |
| 289 | * but in a divide-and-conquer way. |
| 290 | * |
| 291 | * # merge $runs runs at offset $offset of list $list1 into $list2. |
| 292 | * # all unmerged runs ($runs == 1) originate in list $base. |
| 293 | * sub mgsort2 { |
| 294 | * my ($offset, $runs, $base, $list1, $list2) = @_; |
| 295 | * |
| 296 | * if ($runs == 1) { |
| 297 | * if ($list1 is $base) copy run to $list2 |
| 298 | * return offset of end of list (or copy) |
| 299 | * } else { |
| 300 | * $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1) |
| 301 | * mgsort2($off2, $runs/2, $base, $list2, $list1) |
| 302 | * merge the adjacent runs at $offset of $list1 into $list2 |
| 303 | * return the offset of the end of the merged runs |
| 304 | * } |
| 305 | * } |
| 306 | * mgsort2(0, $runs, $base, $aux, $base); |
| 307 | * |
| 308 | * For our 5 runs, the tree of calls looks like |
| 309 | * |
| 310 | * 5 |
| 311 | * 3 2 |
| 312 | * 2 1 1 1 |
| 313 | * 1 1 |
| 314 | * |
| 315 | * 1 2 3 4 5 |
| 316 | * |
| 317 | * and the corresponding activity looks like |
| 318 | * |
| 319 | * copy runs 1 and 2 from base to aux |
| 320 | * merge runs 1 and 2 from aux to base |
| 321 | * (run 3 is where it belongs, no copy needed) |
| 322 | * merge runs 12 and 3 from base to aux |
| 323 | * (runs 4 and 5 are where they belong, no copy needed) |
| 324 | * merge runs 4 and 5 from base to aux |
| 325 | * merge runs 123 and 45 from aux to base |
| 326 | * |
| 327 | * Note that we merge runs 1 and 2 immediately after copying them, |
| 328 | * while they are still likely to be in fast cache. Similarly, |
| 329 | * run 3 is merged with run 12 while it still may be lingering in cache. |
| 330 | * This implementation should therefore enjoy much of the cache-friendly |
| 331 | * behavior that quicksort does. In addition, it does less copying |
| 332 | * than the original mergesort implementation (only runs 1 and 2 are copied) |
| 333 | * and the "balancing" of merges is better (merged runs comprise more nearly |
| 334 | * equal numbers of original runs). |
| 335 | * |
| 336 | * The actual cache-friendly implementation will use a pseudo-stack |
| 337 | * to avoid recursion, and will unroll processing of runs of length 2, |
| 338 | * but it is otherwise similar to the recursive implementation. |
| 339 | */ |
| 340 | |
| 341 | typedef struct { |
| 342 | IV offset; /* offset of 1st of 2 runs at this level */ |
| 343 | IV runs; /* how many runs must be combined into 1 */ |
| 344 | } off_runs; /* pseudo-stack element */ |
| 345 | |
| 346 | |
| 347 | static I32 |
| 348 | cmp_desc(pTHX_ gptr const a, gptr const b) |
| 349 | { |
| 350 | return -PL_sort_RealCmp(aTHX_ a, b); |
| 351 | } |
| 352 | |
| 353 | /* |
| 354 | =for apidoc sortsv_flags |
| 355 | |
| 356 | In-place sort an array of SV pointers with the given comparison routine, |
| 357 | with various SORTf_* flag options. |
| 358 | |
| 359 | =cut |
| 360 | */ |
| 361 | void |
| 362 | Perl_sortsv_flags(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags) |
| 363 | { |
| 364 | IV i, run, offset; |
| 365 | I32 sense, level; |
| 366 | gptr *f1, *f2, *t, *b, *p; |
| 367 | int iwhich; |
| 368 | gptr *aux; |
| 369 | gptr *p1; |
| 370 | gptr small[SMALLSORT]; |
| 371 | gptr *which[3]; |
| 372 | off_runs stack[60], *stackp; |
| 373 | SVCOMPARE_t savecmp = NULL; |
| 374 | |
| 375 | PERL_ARGS_ASSERT_SORTSV_FLAGS; |
| 376 | if (nmemb <= 1) return; /* sorted trivially */ |
| 377 | |
| 378 | if ((flags & SORTf_DESC) != 0) { |
| 379 | savecmp = PL_sort_RealCmp; /* Save current comparison routine, if any */ |
| 380 | PL_sort_RealCmp = cmp; /* Put comparison routine where cmp_desc can find it */ |
| 381 | cmp = cmp_desc; |
| 382 | } |
| 383 | |
| 384 | if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */ |
| 385 | else { Newx(aux,nmemb,gptr); } /* allocate auxiliary array */ |
| 386 | level = 0; |
| 387 | stackp = stack; |
| 388 | stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp); |
| 389 | stackp->offset = offset = 0; |
| 390 | which[0] = which[2] = base; |
| 391 | which[1] = aux; |
| 392 | for (;;) { |
| 393 | /* On levels where both runs have be constructed (stackp->runs == 0), |
| 394 | * merge them, and note the offset of their end, in case the offset |
| 395 | * is needed at the next level up. Hop up a level, and, |
| 396 | * as long as stackp->runs is 0, keep merging. |
| 397 | */ |
| 398 | IV runs = stackp->runs; |
| 399 | if (runs == 0) { |
| 400 | gptr *list1, *list2; |
| 401 | iwhich = level & 1; |
| 402 | list1 = which[iwhich]; /* area where runs are now */ |
| 403 | list2 = which[++iwhich]; /* area for merged runs */ |
| 404 | do { |
| 405 | gptr *l1, *l2, *tp2; |
| 406 | offset = stackp->offset; |
| 407 | f1 = p1 = list1 + offset; /* start of first run */ |
| 408 | p = tp2 = list2 + offset; /* where merged run will go */ |
| 409 | t = NEXT(p); /* where first run ends */ |
| 410 | f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */ |
| 411 | t = NEXT(t); /* where second runs ends */ |
| 412 | l2 = POTHER(t, list2, list1); /* ... on the other side */ |
| 413 | offset = PNELEM(list2, t); |
| 414 | while (f1 < l1 && f2 < l2) { |
| 415 | /* If head 1 is larger than head 2, find ALL the elements |
| 416 | ** in list 2 strictly less than head1, write them all, |
| 417 | ** then head 1. Then compare the new heads, and repeat, |
| 418 | ** until one or both lists are exhausted. |
| 419 | ** |
| 420 | ** In all comparisons (after establishing |
| 421 | ** which head to merge) the item to merge |
| 422 | ** (at pointer q) is the first operand of |
| 423 | ** the comparison. When we want to know |
| 424 | ** if "q is strictly less than the other", |
| 425 | ** we can't just do |
| 426 | ** cmp(q, other) < 0 |
| 427 | ** because stability demands that we treat equality |
| 428 | ** as high when q comes from l2, and as low when |
| 429 | ** q was from l1. So we ask the question by doing |
| 430 | ** cmp(q, other) <= sense |
| 431 | ** and make sense == 0 when equality should look low, |
| 432 | ** and -1 when equality should look high. |
| 433 | */ |
| 434 | |
| 435 | gptr *q; |
| 436 | if (cmp(aTHX_ *f1, *f2) <= 0) { |
| 437 | q = f2; b = f1; t = l1; |
| 438 | sense = -1; |
| 439 | } else { |
| 440 | q = f1; b = f2; t = l2; |
| 441 | sense = 0; |
| 442 | } |
| 443 | |
| 444 | |
| 445 | /* ramp up |
| 446 | ** |
| 447 | ** Leave t at something strictly |
| 448 | ** greater than q (or at the end of the list), |
| 449 | ** and b at something strictly less than q. |
| 450 | */ |
| 451 | for (i = 1, run = 0 ;;) { |
| 452 | if ((p = PINDEX(b, i)) >= t) { |
| 453 | /* off the end */ |
| 454 | if (((p = PINDEX(t, -1)) > b) && |
| 455 | (cmp(aTHX_ *q, *p) <= sense)) |
| 456 | t = p; |
| 457 | else b = p; |
| 458 | break; |
| 459 | } else if (cmp(aTHX_ *q, *p) <= sense) { |
| 460 | t = p; |
| 461 | break; |
| 462 | } else b = p; |
| 463 | if (++run >= RTHRESH) i += i; |
| 464 | } |
| 465 | |
| 466 | |
| 467 | /* q is known to follow b and must be inserted before t. |
| 468 | ** Increment b, so the range of possibilities is [b,t). |
| 469 | ** Round binary split down, to favor early appearance. |
| 470 | ** Adjust b and t until q belongs just before t. |
| 471 | */ |
| 472 | |
| 473 | b++; |
| 474 | while (b < t) { |
| 475 | p = PINDEX(b, (PNELEM(b, t) - 1) / 2); |
| 476 | if (cmp(aTHX_ *q, *p) <= sense) { |
| 477 | t = p; |
| 478 | } else b = p + 1; |
| 479 | } |
| 480 | |
| 481 | |
| 482 | /* Copy all the strictly low elements */ |
| 483 | |
| 484 | if (q == f1) { |
| 485 | FROMTOUPTO(f2, tp2, t); |
| 486 | *tp2++ = *f1++; |
| 487 | } else { |
| 488 | FROMTOUPTO(f1, tp2, t); |
| 489 | *tp2++ = *f2++; |
| 490 | } |
| 491 | } |
| 492 | |
| 493 | |
| 494 | /* Run out remaining list */ |
| 495 | if (f1 == l1) { |
| 496 | if (f2 < l2) FROMTOUPTO(f2, tp2, l2); |
| 497 | } else FROMTOUPTO(f1, tp2, l1); |
| 498 | p1 = NEXT(p1) = POTHER(tp2, list2, list1); |
| 499 | |
| 500 | if (--level == 0) goto done; |
| 501 | --stackp; |
| 502 | t = list1; list1 = list2; list2 = t; /* swap lists */ |
| 503 | } while ((runs = stackp->runs) == 0); |
| 504 | } |
| 505 | |
| 506 | |
| 507 | stackp->runs = 0; /* current run will finish level */ |
| 508 | /* While there are more than 2 runs remaining, |
| 509 | * turn them into exactly 2 runs (at the "other" level), |
| 510 | * each made up of approximately half the runs. |
| 511 | * Stack the second half for later processing, |
| 512 | * and set about producing the first half now. |
| 513 | */ |
| 514 | while (runs > 2) { |
| 515 | ++level; |
| 516 | ++stackp; |
| 517 | stackp->offset = offset; |
| 518 | runs -= stackp->runs = runs / 2; |
| 519 | } |
| 520 | /* We must construct a single run from 1 or 2 runs. |
| 521 | * All the original runs are in which[0] == base. |
| 522 | * The run we construct must end up in which[level&1]. |
| 523 | */ |
| 524 | iwhich = level & 1; |
| 525 | if (runs == 1) { |
| 526 | /* Constructing a single run from a single run. |
| 527 | * If it's where it belongs already, there's nothing to do. |
| 528 | * Otherwise, copy it to where it belongs. |
| 529 | * A run of 1 is either a singleton at level 0, |
| 530 | * or the second half of a split 3. In neither event |
| 531 | * is it necessary to set offset. It will be set by the merge |
| 532 | * that immediately follows. |
| 533 | */ |
| 534 | if (iwhich) { /* Belongs in aux, currently in base */ |
| 535 | f1 = b = PINDEX(base, offset); /* where list starts */ |
| 536 | f2 = PINDEX(aux, offset); /* where list goes */ |
| 537 | t = NEXT(f2); /* where list will end */ |
| 538 | offset = PNELEM(aux, t); /* offset thereof */ |
| 539 | t = PINDEX(base, offset); /* where it currently ends */ |
| 540 | FROMTOUPTO(f1, f2, t); /* copy */ |
| 541 | NEXT(b) = t; /* set up parallel pointer */ |
| 542 | } else if (level == 0) goto done; /* single run at level 0 */ |
| 543 | } else { |
| 544 | /* Constructing a single run from two runs. |
| 545 | * The merge code at the top will do that. |
| 546 | * We need only make sure the two runs are in the "other" array, |
| 547 | * so they'll end up in the correct array after the merge. |
| 548 | */ |
| 549 | ++level; |
| 550 | ++stackp; |
| 551 | stackp->offset = offset; |
| 552 | stackp->runs = 0; /* take care of both runs, trigger merge */ |
| 553 | if (!iwhich) { /* Merged runs belong in aux, copy 1st */ |
| 554 | f1 = b = PINDEX(base, offset); /* where first run starts */ |
| 555 | f2 = PINDEX(aux, offset); /* where it will be copied */ |
| 556 | t = NEXT(f2); /* where first run will end */ |
| 557 | offset = PNELEM(aux, t); /* offset thereof */ |
| 558 | p = PINDEX(base, offset); /* end of first run */ |
| 559 | t = NEXT(t); /* where second run will end */ |
| 560 | t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */ |
| 561 | FROMTOUPTO(f1, f2, t); /* copy both runs */ |
| 562 | NEXT(b) = p; /* paralleled pointer for 1st */ |
| 563 | NEXT(p) = t; /* ... and for second */ |
| 564 | } |
| 565 | } |
| 566 | } |
| 567 | done: |
| 568 | if (aux != small) Safefree(aux); /* free iff allocated */ |
| 569 | if (savecmp != NULL) { |
| 570 | PL_sort_RealCmp = savecmp; /* Restore current comparison routine, if any */ |
| 571 | } |
| 572 | return; |
| 573 | } |
| 574 | |
| 575 | /* |
| 576 | * The quicksort implementation was derived from source code contributed |
| 577 | * by Tom Horsley. |
| 578 | * |
| 579 | * NOTE: this code was derived from Tom Horsley's qsort replacement |
| 580 | * and should not be confused with the original code. |
| 581 | */ |
| 582 | |
| 583 | /* Copyright (C) Tom Horsley, 1997. All rights reserved. |
| 584 | |
| 585 | Permission granted to distribute under the same terms as perl which are |
| 586 | (briefly): |
| 587 | |
| 588 | This program is free software; you can redistribute it and/or modify |
| 589 | it under the terms of either: |
| 590 | |
| 591 | a) the GNU General Public License as published by the Free |
| 592 | Software Foundation; either version 1, or (at your option) any |
| 593 | later version, or |
| 594 | |
| 595 | b) the "Artistic License" which comes with this Kit. |
| 596 | |
| 597 | Details on the perl license can be found in the perl source code which |
| 598 | may be located via the www.perl.com web page. |
| 599 | |
| 600 | This is the most wonderfulest possible qsort I can come up with (and |
| 601 | still be mostly portable) My (limited) tests indicate it consistently |
| 602 | does about 20% fewer calls to compare than does the qsort in the Visual |
| 603 | C++ library, other vendors may vary. |
| 604 | |
| 605 | Some of the ideas in here can be found in "Algorithms" by Sedgewick, |
| 606 | others I invented myself (or more likely re-invented since they seemed |
| 607 | pretty obvious once I watched the algorithm operate for a while). |
| 608 | |
| 609 | Most of this code was written while watching the Marlins sweep the Giants |
| 610 | in the 1997 National League Playoffs - no Braves fans allowed to use this |
| 611 | code (just kidding :-). |
| 612 | |
| 613 | I realize that if I wanted to be true to the perl tradition, the only |
| 614 | comment in this file would be something like: |
| 615 | |
| 616 | ...they shuffled back towards the rear of the line. 'No, not at the |
| 617 | rear!' the slave-driver shouted. 'Three files up. And stay there... |
| 618 | |
| 619 | However, I really needed to violate that tradition just so I could keep |
| 620 | track of what happens myself, not to mention some poor fool trying to |
| 621 | understand this years from now :-). |
| 622 | */ |
| 623 | |
| 624 | /* ********************************************************** Configuration */ |
| 625 | |
| 626 | #ifndef QSORT_ORDER_GUESS |
| 627 | #define QSORT_ORDER_GUESS 2 /* Select doubling version of the netBSD trick */ |
| 628 | #endif |
| 629 | |
| 630 | /* QSORT_MAX_STACK is the largest number of partitions that can be stacked up for |
| 631 | future processing - a good max upper bound is log base 2 of memory size |
| 632 | (32 on 32 bit machines, 64 on 64 bit machines, etc). In reality can |
| 633 | safely be smaller than that since the program is taking up some space and |
| 634 | most operating systems only let you grab some subset of contiguous |
| 635 | memory (not to mention that you are normally sorting data larger than |
| 636 | 1 byte element size :-). |
| 637 | */ |
| 638 | #ifndef QSORT_MAX_STACK |
| 639 | #define QSORT_MAX_STACK 32 |
| 640 | #endif |
| 641 | |
| 642 | /* QSORT_BREAK_EVEN is the size of the largest partition we should insertion sort. |
| 643 | Anything bigger and we use qsort. If you make this too small, the qsort |
| 644 | will probably break (or become less efficient), because it doesn't expect |
| 645 | the middle element of a partition to be the same as the right or left - |
| 646 | you have been warned). |
| 647 | */ |
| 648 | #ifndef QSORT_BREAK_EVEN |
| 649 | #define QSORT_BREAK_EVEN 6 |
| 650 | #endif |
| 651 | |
| 652 | /* QSORT_PLAY_SAFE is the size of the largest partition we're willing |
| 653 | to go quadratic on. We innoculate larger partitions against |
| 654 | quadratic behavior by shuffling them before sorting. This is not |
| 655 | an absolute guarantee of non-quadratic behavior, but it would take |
| 656 | staggeringly bad luck to pick extreme elements as the pivot |
| 657 | from randomized data. |
| 658 | */ |
| 659 | #ifndef QSORT_PLAY_SAFE |
| 660 | #define QSORT_PLAY_SAFE 255 |
| 661 | #endif |
| 662 | |
| 663 | /* ************************************************************* Data Types */ |
| 664 | |
| 665 | /* hold left and right index values of a partition waiting to be sorted (the |
| 666 | partition includes both left and right - right is NOT one past the end or |
| 667 | anything like that). |
| 668 | */ |
| 669 | struct partition_stack_entry { |
| 670 | int left; |
| 671 | int right; |
| 672 | #ifdef QSORT_ORDER_GUESS |
| 673 | int qsort_break_even; |
| 674 | #endif |
| 675 | }; |
| 676 | |
| 677 | /* ******************************************************* Shorthand Macros */ |
| 678 | |
| 679 | /* Note that these macros will be used from inside the qsort function where |
| 680 | we happen to know that the variable 'elt_size' contains the size of an |
| 681 | array element and the variable 'temp' points to enough space to hold a |
| 682 | temp element and the variable 'array' points to the array being sorted |
| 683 | and 'compare' is the pointer to the compare routine. |
| 684 | |
| 685 | Also note that there are very many highly architecture specific ways |
| 686 | these might be sped up, but this is simply the most generally portable |
| 687 | code I could think of. |
| 688 | */ |
| 689 | |
| 690 | /* Return < 0 == 0 or > 0 as the value of elt1 is < elt2, == elt2, > elt2 |
| 691 | */ |
| 692 | #define qsort_cmp(elt1, elt2) \ |
| 693 | ((*compare)(aTHX_ array[elt1], array[elt2])) |
| 694 | |
| 695 | #ifdef QSORT_ORDER_GUESS |
| 696 | #define QSORT_NOTICE_SWAP swapped++; |
| 697 | #else |
| 698 | #define QSORT_NOTICE_SWAP |
| 699 | #endif |
| 700 | |
| 701 | /* swaps contents of array elements elt1, elt2. |
| 702 | */ |
| 703 | #define qsort_swap(elt1, elt2) \ |
| 704 | STMT_START { \ |
| 705 | QSORT_NOTICE_SWAP \ |
| 706 | temp = array[elt1]; \ |
| 707 | array[elt1] = array[elt2]; \ |
| 708 | array[elt2] = temp; \ |
| 709 | } STMT_END |
| 710 | |
| 711 | /* rotate contents of elt1, elt2, elt3 such that elt1 gets elt2, elt2 gets |
| 712 | elt3 and elt3 gets elt1. |
| 713 | */ |
| 714 | #define qsort_rotate(elt1, elt2, elt3) \ |
| 715 | STMT_START { \ |
| 716 | QSORT_NOTICE_SWAP \ |
| 717 | temp = array[elt1]; \ |
| 718 | array[elt1] = array[elt2]; \ |
| 719 | array[elt2] = array[elt3]; \ |
| 720 | array[elt3] = temp; \ |
| 721 | } STMT_END |
| 722 | |
| 723 | /* ************************************************************ Debug stuff */ |
| 724 | |
| 725 | #ifdef QSORT_DEBUG |
| 726 | |
| 727 | static void |
| 728 | break_here() |
| 729 | { |
| 730 | return; /* good place to set a breakpoint */ |
| 731 | } |
| 732 | |
| 733 | #define qsort_assert(t) (void)( (t) || (break_here(), 0) ) |
| 734 | |
| 735 | static void |
| 736 | doqsort_all_asserts( |
| 737 | void * array, |
| 738 | size_t num_elts, |
| 739 | size_t elt_size, |
| 740 | int (*compare)(const void * elt1, const void * elt2), |
| 741 | int pc_left, int pc_right, int u_left, int u_right) |
| 742 | { |
| 743 | int i; |
| 744 | |
| 745 | qsort_assert(pc_left <= pc_right); |
| 746 | qsort_assert(u_right < pc_left); |
| 747 | qsort_assert(pc_right < u_left); |
| 748 | for (i = u_right + 1; i < pc_left; ++i) { |
| 749 | qsort_assert(qsort_cmp(i, pc_left) < 0); |
| 750 | } |
| 751 | for (i = pc_left; i < pc_right; ++i) { |
| 752 | qsort_assert(qsort_cmp(i, pc_right) == 0); |
| 753 | } |
| 754 | for (i = pc_right + 1; i < u_left; ++i) { |
| 755 | qsort_assert(qsort_cmp(pc_right, i) < 0); |
| 756 | } |
| 757 | } |
| 758 | |
| 759 | #define qsort_all_asserts(PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) \ |
| 760 | doqsort_all_asserts(array, num_elts, elt_size, compare, \ |
| 761 | PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) |
| 762 | |
| 763 | #else |
| 764 | |
| 765 | #define qsort_assert(t) ((void)0) |
| 766 | |
| 767 | #define qsort_all_asserts(PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) ((void)0) |
| 768 | |
| 769 | #endif |
| 770 | |
| 771 | /* |
| 772 | =head1 Array Manipulation Functions |
| 773 | |
| 774 | =for apidoc sortsv |
| 775 | |
| 776 | In-place sort an array of SV pointers with the given comparison routine. |
| 777 | |
| 778 | Currently this always uses mergesort. See C<L</sortsv_flags>> for a more |
| 779 | flexible routine. |
| 780 | |
| 781 | =cut |
| 782 | */ |
| 783 | |
| 784 | void |
| 785 | Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp) |
| 786 | { |
| 787 | PERL_ARGS_ASSERT_SORTSV; |
| 788 | |
| 789 | sortsv_flags(array, nmemb, cmp, 0); |
| 790 | } |
| 791 | |
| 792 | #define SvNSIOK(sv) ((SvFLAGS(sv) & SVf_NOK) || ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK)) |
| 793 | #define SvSIOK(sv) ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK) |
| 794 | #define SvNSIV(sv) ( SvNOK(sv) ? SvNVX(sv) : ( SvSIOK(sv) ? SvIVX(sv) : sv_2nv(sv) ) ) |
| 795 | |
| 796 | PP(pp_sort) |
| 797 | { |
| 798 | dSP; dMARK; dORIGMARK; |
| 799 | SV **p1 = ORIGMARK+1, **p2; |
| 800 | SSize_t max, i; |
| 801 | AV* av = NULL; |
| 802 | GV *gv; |
| 803 | CV *cv = NULL; |
| 804 | U8 gimme = GIMME_V; |
| 805 | OP* const nextop = PL_op->op_next; |
| 806 | I32 overloading = 0; |
| 807 | bool hasargs = FALSE; |
| 808 | bool copytmps; |
| 809 | I32 is_xsub = 0; |
| 810 | const U8 priv = PL_op->op_private; |
| 811 | const U8 flags = PL_op->op_flags; |
| 812 | U32 sort_flags = 0; |
| 813 | void (*sortsvp)(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp, U32 flags) |
| 814 | = Perl_sortsv_flags; |
| 815 | I32 all_SIVs = 1; |
| 816 | |
| 817 | if ((priv & OPpSORT_DESCEND) != 0) |
| 818 | sort_flags |= SORTf_DESC; |
| 819 | if ((priv & OPpSORT_STABLE) != 0) |
| 820 | sort_flags |= SORTf_STABLE; |
| 821 | if ((priv & OPpSORT_UNSTABLE) != 0) |
| 822 | sort_flags |= SORTf_UNSTABLE; |
| 823 | |
| 824 | if (gimme != G_ARRAY) { |
| 825 | SP = MARK; |
| 826 | EXTEND(SP,1); |
| 827 | RETPUSHUNDEF; |
| 828 | } |
| 829 | |
| 830 | ENTER; |
| 831 | SAVEVPTR(PL_sortcop); |
| 832 | if (flags & OPf_STACKED) { |
| 833 | if (flags & OPf_SPECIAL) { |
| 834 | OP *nullop = OpSIBLING(cLISTOP->op_first); /* pass pushmark */ |
| 835 | assert(nullop->op_type == OP_NULL); |
| 836 | PL_sortcop = nullop->op_next; |
| 837 | } |
| 838 | else { |
| 839 | GV *autogv = NULL; |
| 840 | HV *stash; |
| 841 | cv = sv_2cv(*++MARK, &stash, &gv, GV_ADD); |
| 842 | check_cv: |
| 843 | if (cv && SvPOK(cv)) { |
| 844 | const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv)); |
| 845 | if (proto && strEQ(proto, "$$")) { |
| 846 | hasargs = TRUE; |
| 847 | } |
| 848 | } |
| 849 | if (cv && CvISXSUB(cv) && CvXSUB(cv)) { |
| 850 | is_xsub = 1; |
| 851 | } |
| 852 | else if (!(cv && CvROOT(cv))) { |
| 853 | if (gv) { |
| 854 | goto autoload; |
| 855 | } |
| 856 | else if (!CvANON(cv) && (gv = CvGV(cv))) { |
| 857 | if (cv != GvCV(gv)) cv = GvCV(gv); |
| 858 | autoload: |
| 859 | if (!autogv && ( |
| 860 | autogv = gv_autoload_pvn( |
| 861 | GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv), |
| 862 | GvNAMEUTF8(gv) ? SVf_UTF8 : 0 |
| 863 | ) |
| 864 | )) { |
| 865 | cv = GvCVu(autogv); |
| 866 | goto check_cv; |
| 867 | } |
| 868 | else { |
| 869 | SV *tmpstr = sv_newmortal(); |
| 870 | gv_efullname3(tmpstr, gv, NULL); |
| 871 | DIE(aTHX_ "Undefined sort subroutine \"%" SVf "\" called", |
| 872 | SVfARG(tmpstr)); |
| 873 | } |
| 874 | } |
| 875 | else { |
| 876 | DIE(aTHX_ "Undefined subroutine in sort"); |
| 877 | } |
| 878 | } |
| 879 | |
| 880 | if (is_xsub) |
| 881 | PL_sortcop = (OP*)cv; |
| 882 | else |
| 883 | PL_sortcop = CvSTART(cv); |
| 884 | } |
| 885 | } |
| 886 | else { |
| 887 | PL_sortcop = NULL; |
| 888 | } |
| 889 | |
| 890 | /* optimiser converts "@a = sort @a" to "sort \@a". In this case, |
| 891 | * push (@a) onto stack, then assign result back to @a at the end of |
| 892 | * this function */ |
| 893 | if (priv & OPpSORT_INPLACE) { |
| 894 | assert( MARK+1 == SP && *SP && SvTYPE(*SP) == SVt_PVAV); |
| 895 | (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */ |
| 896 | av = MUTABLE_AV((*SP)); |
| 897 | if (SvREADONLY(av)) |
| 898 | Perl_croak_no_modify(); |
| 899 | max = AvFILL(av) + 1; |
| 900 | MEXTEND(SP, max); |
| 901 | if (SvMAGICAL(av)) { |
| 902 | for (i=0; i < max; i++) { |
| 903 | SV **svp = av_fetch(av, i, FALSE); |
| 904 | *SP++ = (svp) ? *svp : NULL; |
| 905 | } |
| 906 | } |
| 907 | else { |
| 908 | SV **svp = AvARRAY(av); |
| 909 | assert(svp || max == 0); |
| 910 | for (i = 0; i < max; i++) |
| 911 | *SP++ = *svp++; |
| 912 | } |
| 913 | SP--; |
| 914 | p1 = p2 = SP - (max-1); |
| 915 | } |
| 916 | else { |
| 917 | p2 = MARK+1; |
| 918 | max = SP - MARK; |
| 919 | } |
| 920 | |
| 921 | /* shuffle stack down, removing optional initial cv (p1!=p2), plus |
| 922 | * any nulls; also stringify or converting to integer or number as |
| 923 | * required any args */ |
| 924 | copytmps = cBOOL(PL_sortcop); |
| 925 | for (i=max; i > 0 ; i--) { |
| 926 | if ((*p1 = *p2++)) { /* Weed out nulls. */ |
| 927 | if (copytmps && SvPADTMP(*p1)) { |
| 928 | *p1 = sv_mortalcopy(*p1); |
| 929 | } |
| 930 | SvTEMP_off(*p1); |
| 931 | if (!PL_sortcop) { |
| 932 | if (priv & OPpSORT_NUMERIC) { |
| 933 | if (priv & OPpSORT_INTEGER) { |
| 934 | if (!SvIOK(*p1)) |
| 935 | (void)sv_2iv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD); |
| 936 | } |
| 937 | else { |
| 938 | if (!SvNSIOK(*p1)) |
| 939 | (void)sv_2nv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD); |
| 940 | if (all_SIVs && !SvSIOK(*p1)) |
| 941 | all_SIVs = 0; |
| 942 | } |
| 943 | } |
| 944 | else { |
| 945 | if (!SvPOK(*p1)) |
| 946 | (void)sv_2pv_flags(*p1, 0, |
| 947 | SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD); |
| 948 | } |
| 949 | if (SvAMAGIC(*p1)) |
| 950 | overloading = 1; |
| 951 | } |
| 952 | p1++; |
| 953 | } |
| 954 | else |
| 955 | max--; |
| 956 | } |
| 957 | if (max > 1) { |
| 958 | SV **start; |
| 959 | if (PL_sortcop) { |
| 960 | PERL_CONTEXT *cx; |
| 961 | const bool oldcatch = CATCH_GET; |
| 962 | I32 old_savestack_ix = PL_savestack_ix; |
| 963 | |
| 964 | SAVEOP(); |
| 965 | |
| 966 | CATCH_SET(TRUE); |
| 967 | PUSHSTACKi(PERLSI_SORT); |
| 968 | if (!hasargs && !is_xsub) { |
| 969 | SAVEGENERICSV(PL_firstgv); |
| 970 | SAVEGENERICSV(PL_secondgv); |
| 971 | PL_firstgv = MUTABLE_GV(SvREFCNT_inc( |
| 972 | gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV) |
| 973 | )); |
| 974 | PL_secondgv = MUTABLE_GV(SvREFCNT_inc( |
| 975 | gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV) |
| 976 | )); |
| 977 | /* make sure the GP isn't removed out from under us for |
| 978 | * the SAVESPTR() */ |
| 979 | save_gp(PL_firstgv, 0); |
| 980 | save_gp(PL_secondgv, 0); |
| 981 | /* we don't want modifications localized */ |
| 982 | GvINTRO_off(PL_firstgv); |
| 983 | GvINTRO_off(PL_secondgv); |
| 984 | SAVEGENERICSV(GvSV(PL_firstgv)); |
| 985 | SvREFCNT_inc(GvSV(PL_firstgv)); |
| 986 | SAVEGENERICSV(GvSV(PL_secondgv)); |
| 987 | SvREFCNT_inc(GvSV(PL_secondgv)); |
| 988 | } |
| 989 | |
| 990 | gimme = G_SCALAR; |
| 991 | cx = cx_pushblock(CXt_NULL, gimme, PL_stack_base, old_savestack_ix); |
| 992 | if (!(flags & OPf_SPECIAL)) { |
| 993 | cx->cx_type = CXt_SUB|CXp_MULTICALL; |
| 994 | cx_pushsub(cx, cv, NULL, hasargs); |
| 995 | if (!is_xsub) { |
| 996 | PADLIST * const padlist = CvPADLIST(cv); |
| 997 | |
| 998 | if (++CvDEPTH(cv) >= 2) |
| 999 | pad_push(padlist, CvDEPTH(cv)); |
| 1000 | PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv)); |
| 1001 | |
| 1002 | if (hasargs) { |
| 1003 | /* This is mostly copied from pp_entersub */ |
| 1004 | AV * const av = MUTABLE_AV(PAD_SVl(0)); |
| 1005 | |
| 1006 | cx->blk_sub.savearray = GvAV(PL_defgv); |
| 1007 | GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av)); |
| 1008 | } |
| 1009 | |
| 1010 | } |
| 1011 | } |
| 1012 | |
| 1013 | start = p1 - max; |
| 1014 | sortsvp(aTHX_ start, max, |
| 1015 | (is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv), |
| 1016 | sort_flags); |
| 1017 | |
| 1018 | /* Reset cx, in case the context stack has been reallocated. */ |
| 1019 | cx = CX_CUR(); |
| 1020 | |
| 1021 | PL_stack_sp = PL_stack_base + cx->blk_oldsp; |
| 1022 | |
| 1023 | CX_LEAVE_SCOPE(cx); |
| 1024 | if (!(flags & OPf_SPECIAL)) { |
| 1025 | assert(CxTYPE(cx) == CXt_SUB); |
| 1026 | cx_popsub(cx); |
| 1027 | } |
| 1028 | else |
| 1029 | assert(CxTYPE(cx) == CXt_NULL); |
| 1030 | /* there isn't a POPNULL ! */ |
| 1031 | |
| 1032 | cx_popblock(cx); |
| 1033 | CX_POP(cx); |
| 1034 | POPSTACK; |
| 1035 | CATCH_SET(oldcatch); |
| 1036 | } |
| 1037 | else { |
| 1038 | MEXTEND(SP, 20); /* Can't afford stack realloc on signal. */ |
| 1039 | start = ORIGMARK+1; |
| 1040 | sortsvp(aTHX_ start, max, |
| 1041 | (priv & OPpSORT_NUMERIC) |
| 1042 | ? ( ( ( priv & OPpSORT_INTEGER) || all_SIVs) |
| 1043 | ? ( overloading ? S_amagic_i_ncmp : S_sv_i_ncmp) |
| 1044 | : ( overloading ? S_amagic_ncmp : S_sv_ncmp ) ) |
| 1045 | : ( |
| 1046 | #ifdef USE_LOCALE_COLLATE |
| 1047 | IN_LC_RUNTIME(LC_COLLATE) |
| 1048 | ? ( overloading |
| 1049 | ? (SVCOMPARE_t)S_amagic_cmp_locale |
| 1050 | : (SVCOMPARE_t)sv_cmp_locale_static) |
| 1051 | : |
| 1052 | #endif |
| 1053 | ( overloading ? (SVCOMPARE_t)S_amagic_cmp : (SVCOMPARE_t)sv_cmp_static)), |
| 1054 | sort_flags); |
| 1055 | } |
| 1056 | if ((priv & OPpSORT_REVERSE) != 0) { |
| 1057 | SV **q = start+max-1; |
| 1058 | while (start < q) { |
| 1059 | SV * const tmp = *start; |
| 1060 | *start++ = *q; |
| 1061 | *q-- = tmp; |
| 1062 | } |
| 1063 | } |
| 1064 | } |
| 1065 | |
| 1066 | if (av) { |
| 1067 | /* copy back result to the array */ |
| 1068 | SV** const base = MARK+1; |
| 1069 | if (SvMAGICAL(av)) { |
| 1070 | for (i = 0; i < max; i++) |
| 1071 | base[i] = newSVsv(base[i]); |
| 1072 | av_clear(av); |
| 1073 | av_extend(av, max); |
| 1074 | for (i=0; i < max; i++) { |
| 1075 | SV * const sv = base[i]; |
| 1076 | SV ** const didstore = av_store(av, i, sv); |
| 1077 | if (SvSMAGICAL(sv)) |
| 1078 | mg_set(sv); |
| 1079 | if (!didstore) |
| 1080 | sv_2mortal(sv); |
| 1081 | } |
| 1082 | } |
| 1083 | else { |
| 1084 | /* the elements of av are likely to be the same as the |
| 1085 | * (non-refcounted) elements on the stack, just in a different |
| 1086 | * order. However, its possible that someone's messed with av |
| 1087 | * in the meantime. So bump and unbump the relevant refcounts |
| 1088 | * first. |
| 1089 | */ |
| 1090 | for (i = 0; i < max; i++) { |
| 1091 | SV *sv = base[i]; |
| 1092 | assert(sv); |
| 1093 | if (SvREFCNT(sv) > 1) |
| 1094 | base[i] = newSVsv(sv); |
| 1095 | else |
| 1096 | SvREFCNT_inc_simple_void_NN(sv); |
| 1097 | } |
| 1098 | av_clear(av); |
| 1099 | if (max > 0) { |
| 1100 | av_extend(av, max); |
| 1101 | Copy(base, AvARRAY(av), max, SV*); |
| 1102 | } |
| 1103 | AvFILLp(av) = max - 1; |
| 1104 | AvREIFY_off(av); |
| 1105 | AvREAL_on(av); |
| 1106 | } |
| 1107 | } |
| 1108 | LEAVE; |
| 1109 | PL_stack_sp = ORIGMARK + max; |
| 1110 | return nextop; |
| 1111 | } |
| 1112 | |
| 1113 | static I32 |
| 1114 | S_sortcv(pTHX_ SV *const a, SV *const b) |
| 1115 | { |
| 1116 | const I32 oldsaveix = PL_savestack_ix; |
| 1117 | I32 result; |
| 1118 | PMOP * const pm = PL_curpm; |
| 1119 | COP * const cop = PL_curcop; |
| 1120 | SV *olda, *oldb; |
| 1121 | |
| 1122 | PERL_ARGS_ASSERT_SORTCV; |
| 1123 | |
| 1124 | olda = GvSV(PL_firstgv); |
| 1125 | GvSV(PL_firstgv) = SvREFCNT_inc_simple_NN(a); |
| 1126 | SvREFCNT_dec(olda); |
| 1127 | oldb = GvSV(PL_secondgv); |
| 1128 | GvSV(PL_secondgv) = SvREFCNT_inc_simple_NN(b); |
| 1129 | SvREFCNT_dec(oldb); |
| 1130 | PL_stack_sp = PL_stack_base; |
| 1131 | PL_op = PL_sortcop; |
| 1132 | CALLRUNOPS(aTHX); |
| 1133 | PL_curcop = cop; |
| 1134 | /* entry zero of a stack is always PL_sv_undef, which |
| 1135 | * simplifies converting a '()' return into undef in scalar context */ |
| 1136 | assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef); |
| 1137 | result = SvIV(*PL_stack_sp); |
| 1138 | |
| 1139 | LEAVE_SCOPE(oldsaveix); |
| 1140 | PL_curpm = pm; |
| 1141 | return result; |
| 1142 | } |
| 1143 | |
| 1144 | static I32 |
| 1145 | S_sortcv_stacked(pTHX_ SV *const a, SV *const b) |
| 1146 | { |
| 1147 | const I32 oldsaveix = PL_savestack_ix; |
| 1148 | I32 result; |
| 1149 | AV * const av = GvAV(PL_defgv); |
| 1150 | PMOP * const pm = PL_curpm; |
| 1151 | COP * const cop = PL_curcop; |
| 1152 | |
| 1153 | PERL_ARGS_ASSERT_SORTCV_STACKED; |
| 1154 | |
| 1155 | if (AvREAL(av)) { |
| 1156 | av_clear(av); |
| 1157 | AvREAL_off(av); |
| 1158 | AvREIFY_on(av); |
| 1159 | } |
| 1160 | if (AvMAX(av) < 1) { |
| 1161 | SV **ary = AvALLOC(av); |
| 1162 | if (AvARRAY(av) != ary) { |
| 1163 | AvMAX(av) += AvARRAY(av) - AvALLOC(av); |
| 1164 | AvARRAY(av) = ary; |
| 1165 | } |
| 1166 | if (AvMAX(av) < 1) { |
| 1167 | Renew(ary,2,SV*); |
| 1168 | AvMAX(av) = 1; |
| 1169 | AvARRAY(av) = ary; |
| 1170 | AvALLOC(av) = ary; |
| 1171 | } |
| 1172 | } |
| 1173 | AvFILLp(av) = 1; |
| 1174 | |
| 1175 | AvARRAY(av)[0] = a; |
| 1176 | AvARRAY(av)[1] = b; |
| 1177 | PL_stack_sp = PL_stack_base; |
| 1178 | PL_op = PL_sortcop; |
| 1179 | CALLRUNOPS(aTHX); |
| 1180 | PL_curcop = cop; |
| 1181 | /* entry zero of a stack is always PL_sv_undef, which |
| 1182 | * simplifies converting a '()' return into undef in scalar context */ |
| 1183 | assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef); |
| 1184 | result = SvIV(*PL_stack_sp); |
| 1185 | |
| 1186 | LEAVE_SCOPE(oldsaveix); |
| 1187 | PL_curpm = pm; |
| 1188 | return result; |
| 1189 | } |
| 1190 | |
| 1191 | static I32 |
| 1192 | S_sortcv_xsub(pTHX_ SV *const a, SV *const b) |
| 1193 | { |
| 1194 | dSP; |
| 1195 | const I32 oldsaveix = PL_savestack_ix; |
| 1196 | CV * const cv=MUTABLE_CV(PL_sortcop); |
| 1197 | I32 result; |
| 1198 | PMOP * const pm = PL_curpm; |
| 1199 | |
| 1200 | PERL_ARGS_ASSERT_SORTCV_XSUB; |
| 1201 | |
| 1202 | SP = PL_stack_base; |
| 1203 | PUSHMARK(SP); |
| 1204 | EXTEND(SP, 2); |
| 1205 | *++SP = a; |
| 1206 | *++SP = b; |
| 1207 | PUTBACK; |
| 1208 | (void)(*CvXSUB(cv))(aTHX_ cv); |
| 1209 | /* entry zero of a stack is always PL_sv_undef, which |
| 1210 | * simplifies converting a '()' return into undef in scalar context */ |
| 1211 | assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef); |
| 1212 | result = SvIV(*PL_stack_sp); |
| 1213 | |
| 1214 | LEAVE_SCOPE(oldsaveix); |
| 1215 | PL_curpm = pm; |
| 1216 | return result; |
| 1217 | } |
| 1218 | |
| 1219 | |
| 1220 | static I32 |
| 1221 | S_sv_ncmp(pTHX_ SV *const a, SV *const b) |
| 1222 | { |
| 1223 | I32 cmp = do_ncmp(a, b); |
| 1224 | |
| 1225 | PERL_ARGS_ASSERT_SV_NCMP; |
| 1226 | |
| 1227 | if (cmp == 2) { |
| 1228 | if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL); |
| 1229 | return 0; |
| 1230 | } |
| 1231 | |
| 1232 | return cmp; |
| 1233 | } |
| 1234 | |
| 1235 | static I32 |
| 1236 | S_sv_i_ncmp(pTHX_ SV *const a, SV *const b) |
| 1237 | { |
| 1238 | const IV iv1 = SvIV(a); |
| 1239 | const IV iv2 = SvIV(b); |
| 1240 | |
| 1241 | PERL_ARGS_ASSERT_SV_I_NCMP; |
| 1242 | |
| 1243 | return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0; |
| 1244 | } |
| 1245 | |
| 1246 | #define tryCALL_AMAGICbin(left,right,meth) \ |
| 1247 | (SvAMAGIC(left)||SvAMAGIC(right)) \ |
| 1248 | ? amagic_call(left, right, meth, 0) \ |
| 1249 | : NULL; |
| 1250 | |
| 1251 | #define SORT_NORMAL_RETURN_VALUE(val) (((val) > 0) ? 1 : ((val) ? -1 : 0)) |
| 1252 | |
| 1253 | static I32 |
| 1254 | S_amagic_ncmp(pTHX_ SV *const a, SV *const b) |
| 1255 | { |
| 1256 | SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg); |
| 1257 | |
| 1258 | PERL_ARGS_ASSERT_AMAGIC_NCMP; |
| 1259 | |
| 1260 | if (tmpsv) { |
| 1261 | if (SvIOK(tmpsv)) { |
| 1262 | const I32 i = SvIVX(tmpsv); |
| 1263 | return SORT_NORMAL_RETURN_VALUE(i); |
| 1264 | } |
| 1265 | else { |
| 1266 | const NV d = SvNV(tmpsv); |
| 1267 | return SORT_NORMAL_RETURN_VALUE(d); |
| 1268 | } |
| 1269 | } |
| 1270 | return S_sv_ncmp(aTHX_ a, b); |
| 1271 | } |
| 1272 | |
| 1273 | static I32 |
| 1274 | S_amagic_i_ncmp(pTHX_ SV *const a, SV *const b) |
| 1275 | { |
| 1276 | SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg); |
| 1277 | |
| 1278 | PERL_ARGS_ASSERT_AMAGIC_I_NCMP; |
| 1279 | |
| 1280 | if (tmpsv) { |
| 1281 | if (SvIOK(tmpsv)) { |
| 1282 | const I32 i = SvIVX(tmpsv); |
| 1283 | return SORT_NORMAL_RETURN_VALUE(i); |
| 1284 | } |
| 1285 | else { |
| 1286 | const NV d = SvNV(tmpsv); |
| 1287 | return SORT_NORMAL_RETURN_VALUE(d); |
| 1288 | } |
| 1289 | } |
| 1290 | return S_sv_i_ncmp(aTHX_ a, b); |
| 1291 | } |
| 1292 | |
| 1293 | static I32 |
| 1294 | S_amagic_cmp(pTHX_ SV *const str1, SV *const str2) |
| 1295 | { |
| 1296 | SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg); |
| 1297 | |
| 1298 | PERL_ARGS_ASSERT_AMAGIC_CMP; |
| 1299 | |
| 1300 | if (tmpsv) { |
| 1301 | if (SvIOK(tmpsv)) { |
| 1302 | const I32 i = SvIVX(tmpsv); |
| 1303 | return SORT_NORMAL_RETURN_VALUE(i); |
| 1304 | } |
| 1305 | else { |
| 1306 | const NV d = SvNV(tmpsv); |
| 1307 | return SORT_NORMAL_RETURN_VALUE(d); |
| 1308 | } |
| 1309 | } |
| 1310 | return sv_cmp(str1, str2); |
| 1311 | } |
| 1312 | |
| 1313 | #ifdef USE_LOCALE_COLLATE |
| 1314 | |
| 1315 | static I32 |
| 1316 | S_amagic_cmp_locale(pTHX_ SV *const str1, SV *const str2) |
| 1317 | { |
| 1318 | SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg); |
| 1319 | |
| 1320 | PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE; |
| 1321 | |
| 1322 | if (tmpsv) { |
| 1323 | if (SvIOK(tmpsv)) { |
| 1324 | const I32 i = SvIVX(tmpsv); |
| 1325 | return SORT_NORMAL_RETURN_VALUE(i); |
| 1326 | } |
| 1327 | else { |
| 1328 | const NV d = SvNV(tmpsv); |
| 1329 | return SORT_NORMAL_RETURN_VALUE(d); |
| 1330 | } |
| 1331 | } |
| 1332 | return sv_cmp_locale(str1, str2); |
| 1333 | } |
| 1334 | |
| 1335 | #endif |
| 1336 | |
| 1337 | /* |
| 1338 | * ex: set ts=8 sts=4 sw=4 et: |
| 1339 | */ |