X-Git-Url: https://perl5.git.perl.org/perl5.git/blobdiff_plain/84d4ea48280f6b54fdc70fe4c8b9494e3331071e..e19eb3c1189f80471d9e8bc1bac9bafb84e853ef:/pp_sort.c diff --git a/pp_sort.c b/pp_sort.c index d4e4d8b..8fe6bcd 100644 --- a/pp_sort.c +++ b/pp_sort.c @@ -1,6 +1,7 @@ /* pp_sort.c * - * Copyright (c) 1991-2001, Larry Wall + * Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, + * 2000, 2001, 2002, 2003, by Larry Wall and others * * You may distribute under the terms of either the GNU General Public * License or the Artistic License, as specified in the README file. @@ -16,6 +17,11 @@ #define PERL_IN_PP_SORT_C #include "perl.h" +#if defined(UNDER_CE) +/* looks like 'small' is reserved word for WINCE (or somesuch)*/ +#define small xsmall +#endif + static I32 sortcv(pTHX_ SV *a, SV *b); static I32 sortcv_stacked(pTHX_ SV *a, SV *b); static I32 sortcv_xsub(pTHX_ SV *a, SV *b); @@ -29,10 +35,13 @@ static I32 amagic_cmp_locale(pTHX_ SV *a, SV *b); #define sv_cmp_static Perl_sv_cmp #define sv_cmp_locale_static Perl_sv_cmp_locale -#define SORTHINTS(hintsvp) \ - ((PL_hintgv && \ - (hintsvp = hv_fetch(GvHV(PL_hintgv), "SORT", 4, FALSE))) ? \ - (I32)SvIV(*hintsvp) : 0) +#define SORTHINTS(hintsv) \ + (((hintsv) = GvSV(gv_fetchpv("sort::hints", GV_ADDMULTI, SVt_IV))), \ + (SvIOK(hintsv) ? ((I32)SvIV(hintsv)) : 0)) + +#ifndef SMALLSORT +#define SMALLSORT (200) +#endif /* * The mergesort implementation is by Peter M. Mcilroy . @@ -48,15 +57,6 @@ static I32 amagic_cmp_locale(pTHX_ SV *a, SV *b); * */ -#ifdef TESTHARNESS -#include -typedef void SV; -#define pTHX_ -#define STATIC -#define New(ID,VAR,N,TYPE) VAR=(TYPE *)malloc((N)*sizeof(TYPE)) -#define Safefree(VAR) free(VAR) -typedef int (*SVCOMPARE_t) (pTHX_ SV*, SV*); -#endif /* TESTHARNESS */ typedef char * aptr; /* pointer for arithmetic on sizes */ typedef SV * gptr; /* pointers in our lists */ @@ -177,13 +177,14 @@ typedef SV * gptr; /* pointers in our lists */ */ -static void +static IV dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, SVCOMPARE_t cmp) { - int sense; + I32 sense; register gptr *b, *p, *q, *t, *p2; register gptr c, *last, *r; gptr *savep; + IV runs = 0; b = list1; last = PINDEX(b, nmemb); @@ -221,7 +222,7 @@ dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, SVCOMPARE_t cmp) ((t + 1) == last) && ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) savep = r = p = q = last; - p2 = NEXT(p2) = p2 + (p - b); + p2 = NEXT(p2) = p2 + (p - b); ++runs; if (sense) while (b < --p) { c = *b; *b++ = *p; @@ -230,7 +231,7 @@ dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, SVCOMPARE_t cmp) p = savep; } while (q < p) { /* simple pairs */ - p2 = NEXT(p2) = p2 + 2; + p2 = NEXT(p2) = p2 + 2; ++runs; if (sense) { c = *q++; *(q-1) = *q; @@ -238,164 +239,298 @@ dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, SVCOMPARE_t cmp) } else q += 2; } if (((b = p) == t) && ((t+1) == last)) { - NEXT(p2) = p2 + 1; + NEXT(p2) = p2 + 1; ++runs; b++; } q = r; } while (b < t); sense = !sense; } - return; + return runs; } -/* Overview of bmerge variables: -** -** list1 and list2 address the main and auxiliary arrays. -** They swap identities after each merge pass. -** Base points to the original list1, so we can tell if -** the pointers ended up where they belonged (or must be copied). -** -** When we are merging two lists, f1 and f2 are the next elements -** on the respective lists. l1 and l2 mark the end of the lists. -** tp2 is the current location in the merged list. -** -** p1 records where f1 started. -** After the merge, a new descriptor is built there. -** -** p2 is a ``parallel'' pointer in (what starts as) descriptor space. -** It is used to identify and delimit the runs. -** -** In the heat of determining where q, the greater of the f1/f2 elements, -** belongs in the other list, b, t and p, represent bottom, top and probe -** locations, respectively, in the other list. -** They make convenient temporary pointers in other places. -*/ +/* The original merge sort, in use since 5.7, was as fast as, or faster than, + * qsort on many platforms, but slower than qsort, conspicuously so, + * on others. The most likely explanation was platform-specific + * differences in cache sizes and relative speeds. + * + * The quicksort divide-and-conquer algorithm guarantees that, as the + * problem is subdivided into smaller and smaller parts, the parts + * fit into smaller (and faster) caches. So it doesn't matter how + * many levels of cache exist, quicksort will "find" them, and, + * as long as smaller is faster, take advanatge of them. + * + * By contrast, consider how the original mergesort algorithm worked. + * Suppose we have five runs (each typically of length 2 after dynprep). + * + * pass base aux + * 0 1 2 3 4 5 + * 1 12 34 5 + * 2 1234 5 + * 3 12345 + * 4 12345 + * + * Adjacent pairs are merged in "grand sweeps" through the input. + * This means, on pass 1, the records in runs 1 and 2 aren't revisited until + * runs 3 and 4 are merged and the runs from run 5 have been copied. + * The only cache that matters is one large enough to hold *all* the input. + * On some platforms, this may be many times slower than smaller caches. + * + * The following pseudo-code uses the same basic merge algorithm, + * but in a divide-and-conquer way. + * + * # merge $runs runs at offset $offset of list $list1 into $list2. + * # all unmerged runs ($runs == 1) originate in list $base. + * sub mgsort2 { + * my ($offset, $runs, $base, $list1, $list2) = @_; + * + * if ($runs == 1) { + * if ($list1 is $base) copy run to $list2 + * return offset of end of list (or copy) + * } else { + * $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1) + * mgsort2($off2, $runs/2, $base, $list2, $list1) + * merge the adjacent runs at $offset of $list1 into $list2 + * return the offset of the end of the merged runs + * } + * } + * mgsort2(0, $runs, $base, $aux, $base); + * + * For our 5 runs, the tree of calls looks like + * + * 5 + * 3 2 + * 2 1 1 1 + * 1 1 + * + * 1 2 3 4 5 + * + * and the corresponding activity looks like + * + * copy runs 1 and 2 from base to aux + * merge runs 1 and 2 from aux to base + * (run 3 is where it belongs, no copy needed) + * merge runs 12 and 3 from base to aux + * (runs 4 and 5 are where they belong, no copy needed) + * merge runs 4 and 5 from base to aux + * merge runs 123 and 45 from aux to base + * + * Note that we merge runs 1 and 2 immediately after copying them, + * while they are still likely to be in fast cache. Similarly, + * run 3 is merged with run 12 while it still may be lingering in cache. + * This implementation should therefore enjoy much of the cache-friendly + * behavior that quicksort does. In addition, it does less copying + * than the original mergesort implementation (only runs 1 and 2 are copied) + * and the "balancing" of merges is better (merged runs comprise more nearly + * equal numbers of original runs). + * + * The actual cache-friendly implementation will use a pseudo-stack + * to avoid recursion, and will unroll processing of runs of length 2, + * but it is otherwise similar to the recursive implementation. + */ + +typedef struct { + IV offset; /* offset of 1st of 2 runs at this level */ + IV runs; /* how many runs must be combined into 1 */ +} off_runs; /* pseudo-stack element */ STATIC void -S_mergesortsv(pTHX_ gptr *list1, size_t nmemb, SVCOMPARE_t cmp) +S_mergesortsv(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp) { - int i, run; - int sense; + IV i, run, runs, offset; + I32 sense, level; + int iwhich; register gptr *f1, *f2, *t, *b, *p, *tp2, *l1, *l2, *q; - gptr *aux, *list2, *p2, *last; - gptr *base = list1; + gptr *aux, *list1, *list2; gptr *p1; + gptr small[SMALLSORT]; + gptr *which[3]; + off_runs stack[60], *stackp; + + if (nmemb <= 1) return; /* sorted trivially */ + if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */ + else { New(799,aux,nmemb,gptr); } /* allocate auxilliary array */ + level = 0; + stackp = stack; + stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp); + stackp->offset = offset = 0; + which[0] = which[2] = base; + which[1] = aux; + for (;;) { + /* On levels where both runs have be constructed (stackp->runs == 0), + * merge them, and note the offset of their end, in case the offset + * is needed at the next level up. Hop up a level, and, + * as long as stackp->runs is 0, keep merging. + */ + if ((runs = stackp->runs) == 0) { + iwhich = level & 1; + list1 = which[iwhich]; /* area where runs are now */ + list2 = which[++iwhich]; /* area for merged runs */ + do { + offset = stackp->offset; + f1 = p1 = list1 + offset; /* start of first run */ + p = tp2 = list2 + offset; /* where merged run will go */ + t = NEXT(p); /* where first run ends */ + f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */ + t = NEXT(t); /* where second runs ends */ + l2 = POTHER(t, list2, list1); /* ... on the other side */ + offset = PNELEM(list2, t); + while (f1 < l1 && f2 < l2) { + /* If head 1 is larger than head 2, find ALL the elements + ** in list 2 strictly less than head1, write them all, + ** then head 1. Then compare the new heads, and repeat, + ** until one or both lists are exhausted. + ** + ** In all comparisons (after establishing + ** which head to merge) the item to merge + ** (at pointer q) is the first operand of + ** the comparison. When we want to know + ** if ``q is strictly less than the other'', + ** we can't just do + ** cmp(q, other) < 0 + ** because stability demands that we treat equality + ** as high when q comes from l2, and as low when + ** q was from l1. So we ask the question by doing + ** cmp(q, other) <= sense + ** and make sense == 0 when equality should look low, + ** and -1 when equality should look high. + */ - if (nmemb <= 1) return; /* sorted trivially */ - New(799,list2,nmemb,gptr); /* allocate auxilliary array */ - aux = list2; - dynprep(aTHX_ list1, list2, nmemb, cmp); - last = PINDEX(list2, nmemb); - while (NEXT(list2) != last) { - /* More than one run remains. Do some merging to reduce runs. */ - l2 = p1 = list1; - for (tp2 = p2 = list2; p2 != last;) { - /* The new first run begins where the old second list ended. - ** Use the p2 ``parallel'' pointer to identify the end of the run. - */ - f1 = l2; - t = NEXT(p2); - f2 = l1 = POTHER(t, list2, list1); - if (t != last) t = NEXT(t); - l2 = POTHER(t, list2, list1); - p2 = t; - while (f1 < l1 && f2 < l2) { - /* If head 1 is larger than head 2, find ALL the elements - ** in list 2 strictly less than head1, write them all, - ** then head 1. Then compare the new heads, and repeat, - ** until one or both lists are exhausted. - ** - ** In all comparisons (after establishing - ** which head to merge) the item to merge - ** (at pointer q) is the first operand of - ** the comparison. When we want to know - ** if ``q is strictly less than the other'', - ** we can't just do - ** cmp(q, other) < 0 - ** because stability demands that we treat equality - ** as high when q comes from l2, and as low when - ** q was from l1. So we ask the question by doing - ** cmp(q, other) <= sense - ** and make sense == 0 when equality should look low, - ** and -1 when equality should look high. - */ - - - if (cmp(aTHX_ *f1, *f2) <= 0) { - q = f2; b = f1; t = l1; - sense = -1; - } else { - q = f1; b = f2; t = l2; - sense = 0; - } + if (cmp(aTHX_ *f1, *f2) <= 0) { + q = f2; b = f1; t = l1; + sense = -1; + } else { + q = f1; b = f2; t = l2; + sense = 0; + } - /* ramp up - ** - ** Leave t at something strictly - ** greater than q (or at the end of the list), - ** and b at something strictly less than q. - */ - for (i = 1, run = 0 ;;) { - if ((p = PINDEX(b, i)) >= t) { - /* off the end */ - if (((p = PINDEX(t, -1)) > b) && - (cmp(aTHX_ *q, *p) <= sense)) - t = p; - else b = p; - break; - } else if (cmp(aTHX_ *q, *p) <= sense) { - t = p; - break; - } else b = p; - if (++run >= RTHRESH) i += i; - } + /* ramp up + ** + ** Leave t at something strictly + ** greater than q (or at the end of the list), + ** and b at something strictly less than q. + */ + for (i = 1, run = 0 ;;) { + if ((p = PINDEX(b, i)) >= t) { + /* off the end */ + if (((p = PINDEX(t, -1)) > b) && + (cmp(aTHX_ *q, *p) <= sense)) + t = p; + else b = p; + break; + } else if (cmp(aTHX_ *q, *p) <= sense) { + t = p; + break; + } else b = p; + if (++run >= RTHRESH) i += i; + } + + + /* q is known to follow b and must be inserted before t. + ** Increment b, so the range of possibilities is [b,t). + ** Round binary split down, to favor early appearance. + ** Adjust b and t until q belongs just before t. + */ - /* q is known to follow b and must be inserted before t. - ** Increment b, so the range of possibilities is [b,t). - ** Round binary split down, to favor early appearance. - ** Adjust b and t until q belongs just before t. - */ + b++; + while (b < t) { + p = PINDEX(b, (PNELEM(b, t) - 1) / 2); + if (cmp(aTHX_ *q, *p) <= sense) { + t = p; + } else b = p + 1; + } - b++; - while (b < t) { - p = PINDEX(b, (PNELEM(b, t) - 1) / 2); - if (cmp(aTHX_ *q, *p) <= sense) { - t = p; - } else b = p + 1; + + /* Copy all the strictly low elements */ + + if (q == f1) { + FROMTOUPTO(f2, tp2, t); + *tp2++ = *f1++; + } else { + FROMTOUPTO(f1, tp2, t); + *tp2++ = *f2++; + } } - /* Copy all the strictly low elements */ + /* Run out remaining list */ + if (f1 == l1) { + if (f2 < l2) FROMTOUPTO(f2, tp2, l2); + } else FROMTOUPTO(f1, tp2, l1); + p1 = NEXT(p1) = POTHER(tp2, list2, list1); - if (q == f1) { - FROMTOUPTO(f2, tp2, t); - *tp2++ = *f1++; - } else { - FROMTOUPTO(f1, tp2, t); - *tp2++ = *f2++; - } - } + if (--level == 0) goto done; + --stackp; + t = list1; list1 = list2; list2 = t; /* swap lists */ + } while ((runs = stackp->runs) == 0); + } - /* Run out remaining list */ - if (f1 == l1) { - if (f2 < l2) FROMTOUPTO(f2, tp2, l2); - } else FROMTOUPTO(f1, tp2, l1); - p1 = NEXT(p1) = POTHER(tp2, list2, list1); + stackp->runs = 0; /* current run will finish level */ + /* While there are more than 2 runs remaining, + * turn them into exactly 2 runs (at the "other" level), + * each made up of approximately half the runs. + * Stack the second half for later processing, + * and set about producing the first half now. + */ + while (runs > 2) { + ++level; + ++stackp; + stackp->offset = offset; + runs -= stackp->runs = runs / 2; + } + /* We must construct a single run from 1 or 2 runs. + * All the original runs are in which[0] == base. + * The run we construct must end up in which[level&1]. + */ + iwhich = level & 1; + if (runs == 1) { + /* Constructing a single run from a single run. + * If it's where it belongs already, there's nothing to do. + * Otherwise, copy it to where it belongs. + * A run of 1 is either a singleton at level 0, + * or the second half of a split 3. In neither event + * is it necessary to set offset. It will be set by the merge + * that immediately follows. + */ + if (iwhich) { /* Belongs in aux, currently in base */ + f1 = b = PINDEX(base, offset); /* where list starts */ + f2 = PINDEX(aux, offset); /* where list goes */ + t = NEXT(f2); /* where list will end */ + offset = PNELEM(aux, t); /* offset thereof */ + t = PINDEX(base, offset); /* where it currently ends */ + FROMTOUPTO(f1, f2, t); /* copy */ + NEXT(b) = t; /* set up parallel pointer */ + } else if (level == 0) goto done; /* single run at level 0 */ + } else { + /* Constructing a single run from two runs. + * The merge code at the top will do that. + * We need only make sure the two runs are in the "other" array, + * so they'll end up in the correct array after the merge. + */ + ++level; + ++stackp; + stackp->offset = offset; + stackp->runs = 0; /* take care of both runs, trigger merge */ + if (!iwhich) { /* Merged runs belong in aux, copy 1st */ + f1 = b = PINDEX(base, offset); /* where first run starts */ + f2 = PINDEX(aux, offset); /* where it will be copied */ + t = NEXT(f2); /* where first run will end */ + offset = PNELEM(aux, t); /* offset thereof */ + p = PINDEX(base, offset); /* end of first run */ + t = NEXT(t); /* where second run will end */ + t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */ + FROMTOUPTO(f1, f2, t); /* copy both runs */ + NEXT(b) = p; /* paralled pointer for 1st */ + NEXT(p) = t; /* ... and for second */ + } } - t = list1; - list1 = list2; - list2 = t; - last = PINDEX(list2, nmemb); - } - if (base == list2) { - last = PINDEX(list1, nmemb); - FROMTOUPTO(list1, list2, last); } - Safefree(aux); +done: + if (aux != small) Safefree(aux); /* free iff allocated */ return; } @@ -476,6 +611,17 @@ S_mergesortsv(pTHX_ gptr *list1, size_t nmemb, SVCOMPARE_t cmp) #define QSORT_BREAK_EVEN 6 #endif +/* QSORT_PLAY_SAFE is the size of the largest partition we're willing + to go quadratic on. We innoculate larger partitions against + quadratic behavior by shuffling them before sorting. This is not + an absolute guarantee of non-quadratic behavior, but it would take + staggeringly bad luck to pick extreme elements as the pivot + from randomized data. +*/ +#ifndef QSORT_PLAY_SAFE +#define QSORT_PLAY_SAFE 255 +#endif + /* ************************************************************* Data Types */ /* hold left and right index values of a partition waiting to be sorted (the @@ -607,6 +753,18 @@ S_qsortsvu(pTHX_ SV ** array, size_t num_elts, SVCOMPARE_t compare) return; } + /* Innoculate large partitions against quadratic behavior */ + if (num_elts > QSORT_PLAY_SAFE) { + register size_t n, j; + register SV **q; + for (n = num_elts, q = array; n > 1; ) { + j = (size_t)(n-- * Drand01()); + temp = q[j]; + q[j] = q[n]; + q[n] = temp; + } + } + /* Setup the initial partition definition and fall into the sorting loop */ part_left = 0; @@ -1078,10 +1236,6 @@ S_qsortsvu(pTHX_ SV ** array, size_t num_elts, SVCOMPARE_t compare) /* Believe it or not, the array is sorted at this point! */ } -#ifndef SMALLSORT -#define SMALLSORT (200) -#endif - /* Stabilize what is, presumably, an otherwise unstable sort method. * We do that by allocating (or having on hand) an array of pointers * that is the same size as the original array of elements to be sorted. @@ -1133,7 +1287,6 @@ S_qsortsvu(pTHX_ SV ** array, size_t num_elts, SVCOMPARE_t compare) * dictated by the indirect array. */ -static SVCOMPARE_t RealCmp; static I32 cmpindir(pTHX_ gptr a, gptr b) @@ -1142,7 +1295,7 @@ cmpindir(pTHX_ gptr a, gptr b) gptr *ap = (gptr *)a; gptr *bp = (gptr *)b; - if ((sense = RealCmp(aTHX_ *ap, *bp)) == 0) + if ((sense = PL_sort_RealCmp(aTHX_ *ap, *bp)) == 0) sense = (ap > bp) ? 1 : ((ap < bp) ? -1 : 0); return sense; } @@ -1150,30 +1303,28 @@ cmpindir(pTHX_ gptr a, gptr b) STATIC void S_qsortsv(pTHX_ gptr *list1, size_t nmemb, SVCOMPARE_t cmp) { - SV **hintsvp; + SV *hintsv; - if (SORTHINTS(hintsvp) & HINT_SORT_FAST) - S_qsortsvu(aTHX_ list1, nmemb, cmp); - else { + if (SORTHINTS(hintsv) & HINT_SORT_STABLE) { register gptr **pp, *q; register size_t n, j, i; gptr *small[SMALLSORT], **indir, tmp; SVCOMPARE_t savecmp; if (nmemb <= 1) return; /* sorted trivially */ - + /* Small arrays can use the stack, big ones must be allocated */ if (nmemb <= SMALLSORT) indir = small; else { New(1799, indir, nmemb, gptr *); } - + /* Copy pointers to original array elements into indirect array */ for (n = nmemb, pp = indir, q = list1; n--; ) *pp++ = q++; - - savecmp = RealCmp; /* Save current comparison routine, if any */ - RealCmp = cmp; /* Put comparison routine where cmpindir can find it */ - + + savecmp = PL_sort_RealCmp; /* Save current comparison routine, if any */ + PL_sort_RealCmp = cmp; /* Put comparison routine where cmpindir can find it */ + /* sort, with indirection */ S_qsortsvu(aTHX_ (gptr *)indir, nmemb, cmpindir); - + pp = indir; q = list1; for (n = nmemb; n--; ) { @@ -1214,39 +1365,48 @@ S_qsortsv(pTHX_ gptr *list1, size_t nmemb, SVCOMPARE_t cmp) /* free iff allocated */ if (indir != small) { Safefree(indir); } /* restore prevailing comparison routine */ - RealCmp = savecmp; + PL_sort_RealCmp = savecmp; + } else { + S_qsortsvu(aTHX_ list1, nmemb, cmp); } } - -/* + +/* +=head1 Array Manipulation Functions + =for apidoc sortsv Sort an array. Here is an example: - sortsv(AvARRAY(av), av_len(av)+1, Perl_sv_cmp_locale); + sortsv(AvARRAY(av), av_len(av)+1, Perl_sv_cmp_locale); + +See lib/sort.pm for details about controlling the sorting algorithm. =cut */ - + void Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp) { void (*sortsvp)(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp) = S_mergesortsv; - SV **hintsvp; + SV *hintsv; I32 hints; - - if ((hints = SORTHINTS(hintsvp))) { - if (hints & HINT_SORT_QUICKSORT) - sortsvp = S_qsortsv; - else { - if (hints & HINT_SORT_MERGESORT) - sortsvp = S_mergesortsv; - else - sortsvp = S_mergesortsv; - } + + /* Sun's Compiler (cc: WorkShop Compilers 4.2 30 Oct 1996 C 4.2) used + to miscompile this function under optimization -O. If you get test + errors related to picking the correct sort() function, try recompiling + this file without optimiziation. -- A.D. 4/2002. + */ + hints = SORTHINTS(hintsv); + if (hints & HINT_SORT_QUICKSORT) { + sortsvp = S_qsortsv; + } + else { + /* The default as of 5.8.0 is mergesort */ + sortsvp = S_mergesortsv; } - + sortsvp(aTHX_ array, nmemb, cmp); } @@ -1296,8 +1456,8 @@ PP(pp_sort) else if (gv) { SV *tmpstr = sv_newmortal(); gv_efullname3(tmpstr, gv, Nullch); - DIE(aTHX_ "Undefined sort subroutine \"%s\" called", - SvPVX(tmpstr)); + DIE(aTHX_ "Undefined sort subroutine \"%"SVf"\" called", + tmpstr); } else { DIE(aTHX_ "Undefined subroutine in sort"); @@ -1311,8 +1471,7 @@ PP(pp_sort) SAVEVPTR(CvROOT(cv)->op_ppaddr); CvROOT(cv)->op_ppaddr = PL_ppaddr[OP_NULL]; - SAVEVPTR(PL_curpad); - PL_curpad = AvARRAY((AV*)AvARRAY(CvPADLIST(cv))[1]); + PAD_SET_CUR(CvPADLIST(cv), 1); } } } @@ -1356,10 +1515,6 @@ PP(pp_sort) PL_secondgv = gv_fetchpv("b", TRUE, SVt_PV); PL_sortstash = stash; } -#ifdef USE_5005THREADS - sv_lock((SV *)PL_firstgv); - sv_lock((SV *)PL_secondgv); -#endif SAVESPTR(GvSV(PL_firstgv)); SAVESPTR(GvSV(PL_secondgv)); } @@ -1376,13 +1531,11 @@ PP(pp_sort) if (hasargs && !is_xsub) { /* This is mostly copied from pp_entersub */ - AV *av = (AV*)PL_curpad[0]; + AV *av = (AV*)PAD_SVl(0); -#ifndef USE_5005THREADS cx->blk_sub.savearray = GvAV(PL_defgv); GvAV(PL_defgv) = (AV*)SvREFCNT_inc(av); -#endif /* USE_5005THREADS */ - cx->blk_sub.oldcurpad = PL_curpad; + CX_CURPAD_SAVE(cx->blk_sub); cx->blk_sub.argarray = av; } sortsv((myorigmark+1), max, @@ -1454,11 +1607,7 @@ sortcv_stacked(pTHX_ SV *a, SV *b) I32 result; AV *av; -#ifdef USE_5005THREADS - av = (AV*)PL_curpad[0]; -#else av = GvAV(PL_defgv); -#endif if (AvMAX(av) < 1) { SV** ary = AvALLOC(av); @@ -1553,7 +1702,7 @@ amagic_ncmp(pTHX_ register SV *a, register SV *b) tryCALL_AMAGICbin(a,b,ncmp,&tmpsv); if (tmpsv) { NV d; - + if (SvIOK(tmpsv)) { I32 i = SvIVX(tmpsv); if (i > 0) @@ -1575,7 +1724,7 @@ amagic_i_ncmp(pTHX_ register SV *a, register SV *b) tryCALL_AMAGICbin(a,b,ncmp,&tmpsv); if (tmpsv) { NV d; - + if (SvIOK(tmpsv)) { I32 i = SvIVX(tmpsv); if (i > 0) @@ -1597,7 +1746,7 @@ amagic_cmp(pTHX_ register SV *str1, register SV *str2) tryCALL_AMAGICbin(str1,str2,scmp,&tmpsv); if (tmpsv) { NV d; - + if (SvIOK(tmpsv)) { I32 i = SvIVX(tmpsv); if (i > 0) @@ -1619,7 +1768,7 @@ amagic_cmp_locale(pTHX_ register SV *str1, register SV *str2) tryCALL_AMAGICbin(str1,str2,scmp,&tmpsv); if (tmpsv) { NV d; - + if (SvIOK(tmpsv)) { I32 i = SvIVX(tmpsv); if (i > 0) @@ -1633,5 +1782,3 @@ amagic_cmp_locale(pTHX_ register SV *str1, register SV *str2) } return sv_cmp_locale(str1, str2); } - -