| 1 | /* op.h |
| 2 | * |
| 3 | * Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, |
| 4 | * 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by Larry Wall and others |
| 5 | * |
| 6 | * You may distribute under the terms of either the GNU General Public |
| 7 | * License or the Artistic License, as specified in the README file. |
| 8 | * |
| 9 | */ |
| 10 | |
| 11 | /* |
| 12 | * The fields of BASEOP are: |
| 13 | * op_next Pointer to next ppcode to execute after this one. |
| 14 | * (Top level pre-grafted op points to first op, |
| 15 | * but this is replaced when op is grafted in, when |
| 16 | * this op will point to the real next op, and the new |
| 17 | * parent takes over role of remembering starting op.) |
| 18 | * op_ppaddr Pointer to current ppcode's function. |
| 19 | * op_type The type of the operation. |
| 20 | * op_opt Whether or not the op has been optimised by the |
| 21 | * peephole optimiser. |
| 22 | * op_slabbed allocated via opslab |
| 23 | * op_static tell op_free() to skip PerlMemShared_free(), when |
| 24 | * !op_slabbed. |
| 25 | * op_savefree on savestack via SAVEFREEOP |
| 26 | * op_folded Result/remainder of a constant fold operation. |
| 27 | * op_moresib this op is is not the last sibling |
| 28 | * op_spare One spare bit |
| 29 | * op_flags Flags common to all operations. See OPf_* below. |
| 30 | * op_private Flags peculiar to a particular operation (BUT, |
| 31 | * by default, set to the number of children until |
| 32 | * the operation is privatized by a check routine, |
| 33 | * which may or may not check number of children). |
| 34 | */ |
| 35 | #include "op_reg_common.h" |
| 36 | |
| 37 | #define OPCODE U16 |
| 38 | |
| 39 | typedef PERL_BITFIELD16 Optype; |
| 40 | |
| 41 | #ifdef BASEOP_DEFINITION |
| 42 | #define BASEOP BASEOP_DEFINITION |
| 43 | #else |
| 44 | #define BASEOP \ |
| 45 | OP* op_next; \ |
| 46 | OP* op_sibparent; \ |
| 47 | OP* (*op_ppaddr)(pTHX); \ |
| 48 | PADOFFSET op_targ; \ |
| 49 | PERL_BITFIELD16 op_type:9; \ |
| 50 | PERL_BITFIELD16 op_opt:1; \ |
| 51 | PERL_BITFIELD16 op_slabbed:1; \ |
| 52 | PERL_BITFIELD16 op_savefree:1; \ |
| 53 | PERL_BITFIELD16 op_static:1; \ |
| 54 | PERL_BITFIELD16 op_folded:1; \ |
| 55 | PERL_BITFIELD16 op_moresib:1; \ |
| 56 | PERL_BITFIELD16 op_spare:1; \ |
| 57 | U8 op_flags; \ |
| 58 | U8 op_private; |
| 59 | #endif |
| 60 | |
| 61 | /* If op_type:9 is changed to :10, also change cx_pusheval() |
| 62 | Also, if the type of op_type is ever changed (e.g. to PERL_BITFIELD32) |
| 63 | then all the other bit-fields before/after it should change their |
| 64 | types too to let VC pack them into the same 4 byte integer.*/ |
| 65 | |
| 66 | /* for efficiency, requires OPf_WANT_VOID == G_VOID etc */ |
| 67 | #define OP_GIMME(op,dfl) \ |
| 68 | (((op)->op_flags & OPf_WANT) ? ((op)->op_flags & OPf_WANT) : dfl) |
| 69 | |
| 70 | #define OP_GIMME_REVERSE(flags) ((flags) & G_WANT) |
| 71 | |
| 72 | /* |
| 73 | =head1 "Gimme" Values |
| 74 | |
| 75 | =for apidoc Amn|U32|GIMME_V |
| 76 | The XSUB-writer's equivalent to Perl's C<wantarray>. Returns C<G_VOID>, |
| 77 | C<G_SCALAR> or C<G_ARRAY> for void, scalar or list context, |
| 78 | respectively. See L<perlcall> for a usage example. |
| 79 | |
| 80 | =for apidoc Amn|U32|GIMME |
| 81 | A backward-compatible version of C<GIMME_V> which can only return |
| 82 | C<G_SCALAR> or C<G_ARRAY>; in a void context, it returns C<G_SCALAR>. |
| 83 | Deprecated. Use C<GIMME_V> instead. |
| 84 | |
| 85 | =cut |
| 86 | */ |
| 87 | |
| 88 | #define GIMME_V Perl_gimme_V(aTHX) |
| 89 | |
| 90 | /* Public flags */ |
| 91 | |
| 92 | #define OPf_WANT 3 /* Mask for "want" bits: */ |
| 93 | #define OPf_WANT_VOID 1 /* Want nothing */ |
| 94 | #define OPf_WANT_SCALAR 2 /* Want single value */ |
| 95 | #define OPf_WANT_LIST 3 /* Want list of any length */ |
| 96 | #define OPf_KIDS 4 /* There is a firstborn child. */ |
| 97 | #define OPf_PARENS 8 /* This operator was parenthesized. */ |
| 98 | /* (Or block needs explicit scope entry.) */ |
| 99 | #define OPf_REF 16 /* Certified reference. */ |
| 100 | /* (Return container, not containee). */ |
| 101 | #define OPf_MOD 32 /* Will modify (lvalue). */ |
| 102 | |
| 103 | #define OPf_STACKED 64 /* Some arg is arriving on the stack. */ |
| 104 | /* Indicates mutator-variant of op for those |
| 105 | * ops which support them, e.g. $x += 1 |
| 106 | */ |
| 107 | |
| 108 | #define OPf_SPECIAL 128 /* Do something weird for this op: */ |
| 109 | /* On local LVAL, don't init local value. */ |
| 110 | /* On OP_SORT, subroutine is inlined. */ |
| 111 | /* On OP_NOT, inversion was implicit. */ |
| 112 | /* On OP_LEAVE, don't restore curpm, e.g. |
| 113 | * /(...)/ while ...>; */ |
| 114 | /* On truncate, we truncate filehandle */ |
| 115 | /* On control verbs, we saw no label */ |
| 116 | /* On flipflop, we saw ... instead of .. */ |
| 117 | /* On UNOPs, saw bare parens, e.g. eof(). */ |
| 118 | /* On OP_CHDIR, handle (or bare parens) */ |
| 119 | /* On OP_NULL, saw a "do". */ |
| 120 | /* On OP_EXISTS, treat av as av, not avhv. */ |
| 121 | /* On OP_(ENTER|LEAVE)EVAL, don't clear $@ */ |
| 122 | /* On regcomp, "use re 'eval'" was in scope */ |
| 123 | /* On RV2[ACGHS]V, don't create GV--in |
| 124 | defined()*/ |
| 125 | /* On OP_DBSTATE, indicates breakpoint |
| 126 | * (runtime property) */ |
| 127 | /* On OP_REQUIRE, was seen as CORE::require */ |
| 128 | /* On OP_(ENTER|LEAVE)WHEN, there's |
| 129 | no condition */ |
| 130 | /* On OP_SMARTMATCH, an implicit smartmatch */ |
| 131 | /* On OP_ANONHASH and OP_ANONLIST, create a |
| 132 | reference to the new anon hash or array */ |
| 133 | /* On OP_HELEM, OP_MULTIDEREF and OP_HSLICE, |
| 134 | localization will be followed by assignment, |
| 135 | so do not wipe the target if it is special |
| 136 | (e.g. a glob or a magic SV) */ |
| 137 | /* On OP_MATCH, OP_SUBST & OP_TRANS, the |
| 138 | operand of a logical or conditional |
| 139 | that was optimised away, so it should |
| 140 | not be bound via =~ */ |
| 141 | /* On OP_CONST, from a constant CV */ |
| 142 | /* On OP_GLOB, two meanings: |
| 143 | - Before ck_glob, called as CORE::glob |
| 144 | - After ck_glob, use Perl glob function |
| 145 | */ |
| 146 | /* On OP_PADRANGE, push @_ */ |
| 147 | /* On OP_DUMP, has no label */ |
| 148 | /* On OP_UNSTACK, in a C-style for loop */ |
| 149 | /* On OP_READLINE, it's for <<>>, not <> */ |
| 150 | /* There is no room in op_flags for this one, so it has its own bit- |
| 151 | field member (op_folded) instead. The flag is only used to tell |
| 152 | op_convert_list to set op_folded. */ |
| 153 | #define OPf_FOLDED (1<<16) |
| 154 | |
| 155 | /* old names; don't use in new code, but don't break them, either */ |
| 156 | #define OPf_LIST OPf_WANT_LIST |
| 157 | #define OPf_KNOW OPf_WANT |
| 158 | |
| 159 | #if !defined(PERL_CORE) && !defined(PERL_EXT) |
| 160 | # define GIMME \ |
| 161 | (PL_op->op_flags & OPf_WANT \ |
| 162 | ? ((PL_op->op_flags & OPf_WANT) == OPf_WANT_LIST \ |
| 163 | ? G_ARRAY \ |
| 164 | : G_SCALAR) \ |
| 165 | : dowantarray()) |
| 166 | #endif |
| 167 | |
| 168 | |
| 169 | /* NOTE: OPp* flags are now auto-generated and defined in opcode.h, |
| 170 | * from data in regen/op_private */ |
| 171 | |
| 172 | |
| 173 | #define OPpTRANS_ALL (OPpTRANS_USE_SVOP|OPpTRANS_CAN_FORCE_UTF8|OPpTRANS_IDENTICAL|OPpTRANS_SQUASH|OPpTRANS_COMPLEMENT|OPpTRANS_GROWS|OPpTRANS_DELETE) |
| 174 | #define OPpTRANS_FROM_UTF OPpTRANS_USE_SVOP |
| 175 | #define OPpTRANS_TO_UTF OPpTRANS_CAN_FORCE_UTF8 |
| 176 | |
| 177 | |
| 178 | /* Mask for OP_ENTERSUB flags, the absence of which must be propagated |
| 179 | in dynamic context */ |
| 180 | #define OPpENTERSUB_LVAL_MASK (OPpLVAL_INTRO|OPpENTERSUB_INARGS) |
| 181 | |
| 182 | |
| 183 | /* things that can be elements of op_aux */ |
| 184 | typedef union { |
| 185 | PADOFFSET pad_offset; |
| 186 | SV *sv; |
| 187 | IV iv; |
| 188 | UV uv; |
| 189 | char *pv; |
| 190 | SSize_t ssize; |
| 191 | } UNOP_AUX_item; |
| 192 | |
| 193 | #ifdef USE_ITHREADS |
| 194 | # define UNOP_AUX_item_sv(item) PAD_SVl((item)->pad_offset); |
| 195 | #else |
| 196 | # define UNOP_AUX_item_sv(item) ((item)->sv); |
| 197 | #endif |
| 198 | |
| 199 | |
| 200 | |
| 201 | |
| 202 | struct op { |
| 203 | BASEOP |
| 204 | }; |
| 205 | |
| 206 | struct unop { |
| 207 | BASEOP |
| 208 | OP * op_first; |
| 209 | }; |
| 210 | |
| 211 | struct unop_aux { |
| 212 | BASEOP |
| 213 | OP *op_first; |
| 214 | UNOP_AUX_item *op_aux; |
| 215 | }; |
| 216 | |
| 217 | struct binop { |
| 218 | BASEOP |
| 219 | OP * op_first; |
| 220 | OP * op_last; |
| 221 | }; |
| 222 | |
| 223 | struct logop { |
| 224 | BASEOP |
| 225 | OP * op_first; |
| 226 | OP * op_other; |
| 227 | }; |
| 228 | |
| 229 | struct listop { |
| 230 | BASEOP |
| 231 | OP * op_first; |
| 232 | OP * op_last; |
| 233 | }; |
| 234 | |
| 235 | struct methop { |
| 236 | BASEOP |
| 237 | union { |
| 238 | /* op_u.op_first *must* be aligned the same as the op_first |
| 239 | * field of the other op types, and op_u.op_meth_sv *must* |
| 240 | * be aligned with op_sv */ |
| 241 | OP* op_first; /* optree for method name */ |
| 242 | SV* op_meth_sv; /* static method name */ |
| 243 | } op_u; |
| 244 | #ifdef USE_ITHREADS |
| 245 | PADOFFSET op_rclass_targ; /* pad index for redirect class */ |
| 246 | #else |
| 247 | SV* op_rclass_sv; /* static redirect class $o->A::meth() */ |
| 248 | #endif |
| 249 | }; |
| 250 | |
| 251 | struct pmop { |
| 252 | BASEOP |
| 253 | OP * op_first; |
| 254 | OP * op_last; |
| 255 | #ifdef USE_ITHREADS |
| 256 | PADOFFSET op_pmoffset; |
| 257 | #else |
| 258 | REGEXP * op_pmregexp; /* compiled expression */ |
| 259 | #endif |
| 260 | U32 op_pmflags; |
| 261 | union { |
| 262 | OP * op_pmreplroot; /* For OP_SUBST */ |
| 263 | PADOFFSET op_pmtargetoff; /* For OP_SPLIT lex ary or thr GV */ |
| 264 | GV * op_pmtargetgv; /* For OP_SPLIT non-threaded GV */ |
| 265 | } op_pmreplrootu; |
| 266 | union { |
| 267 | OP * op_pmreplstart; /* Only used in OP_SUBST */ |
| 268 | #ifdef USE_ITHREADS |
| 269 | PADOFFSET op_pmstashoff; /* Only used in OP_MATCH, with PMf_ONCE set */ |
| 270 | #else |
| 271 | HV * op_pmstash; |
| 272 | #endif |
| 273 | } op_pmstashstartu; |
| 274 | OP * op_code_list; /* list of (?{}) code blocks */ |
| 275 | }; |
| 276 | |
| 277 | #ifdef USE_ITHREADS |
| 278 | #define PM_GETRE(o) (SvTYPE(PL_regex_pad[(o)->op_pmoffset]) == SVt_REGEXP \ |
| 279 | ? (REGEXP*)(PL_regex_pad[(o)->op_pmoffset]) : NULL) |
| 280 | /* The assignment is just to enforce type safety (or at least get a warning). |
| 281 | */ |
| 282 | /* With first class regexps not via a reference one needs to assign |
| 283 | &PL_sv_undef under ithreads. (This would probably work unthreaded, but NULL |
| 284 | is cheaper. I guess we could allow NULL, but the check above would get |
| 285 | more complex, and we'd have an AV with (SV*)NULL in it, which feels bad */ |
| 286 | /* BEWARE - something that calls this macro passes (r) which has a side |
| 287 | effect. */ |
| 288 | #define PM_SETRE(o,r) STMT_START { \ |
| 289 | REGEXP *const _pm_setre = (r); \ |
| 290 | assert(_pm_setre); \ |
| 291 | PL_regex_pad[(o)->op_pmoffset] = MUTABLE_SV(_pm_setre); \ |
| 292 | } STMT_END |
| 293 | #else |
| 294 | #define PM_GETRE(o) ((o)->op_pmregexp) |
| 295 | #define PM_SETRE(o,r) ((o)->op_pmregexp = (r)) |
| 296 | #endif |
| 297 | |
| 298 | /* Currently these PMf flags occupy a single 32-bit word. Not all bits are |
| 299 | * currently used. The lower bits are shared with their corresponding RXf flag |
| 300 | * bits, up to but not including _RXf_PMf_SHIFT_NEXT. The unused bits |
| 301 | * immediately follow; finally the used Pmf-only (unshared) bits, so that the |
| 302 | * highest bit in the word is used. This gathers all the unused bits as a pool |
| 303 | * in the middle, like so: 11111111111111110000001111111111 |
| 304 | * where the '1's represent used bits, and the '0's unused. This design allows |
| 305 | * us to allocate off one end of the pool if we need to add a shared bit, and |
| 306 | * off the other end if we need a non-shared bit, without disturbing the other |
| 307 | * bits. This maximizes the likelihood of being able to change things without |
| 308 | * breaking binary compatibility. |
| 309 | * |
| 310 | * To add shared bits, do so in op_reg_common.h. This should change |
| 311 | * _RXf_PMf_SHIFT_NEXT so that things won't compile. Then come to regexp.h and |
| 312 | * op.h and adjust the constant adders in the definitions of PMf_BASE_SHIFT and |
| 313 | * Pmf_BASE_SHIFT down by the number of shared bits you added. That's it. |
| 314 | * Things should be binary compatible. But if either of these gets to having |
| 315 | * to subtract rather than add, leave at 0 and adjust all the entries below |
| 316 | * that are in terms of this according. But if the first one of those is |
| 317 | * already PMf_BASE_SHIFT+0, there are no bits left, and a redesign is in |
| 318 | * order. |
| 319 | * |
| 320 | * To remove unshared bits, just delete its entry. If you're where breaking |
| 321 | * binary compatibility is ok to do, you might want to adjust things to move |
| 322 | * the newly opened space so that it gets absorbed into the common pool. |
| 323 | * |
| 324 | * To add unshared bits, first use up any gaps in the middle. Otherwise, |
| 325 | * allocate off the low end until you get to PMf_BASE_SHIFT+0. If that isn't |
| 326 | * enough, move PMf_BASE_SHIFT down (if possible) and add the new bit at the |
| 327 | * other end instead; this preserves binary compatibility. */ |
| 328 | #define PMf_BASE_SHIFT (_RXf_PMf_SHIFT_NEXT+2) |
| 329 | |
| 330 | /* Set by the parser if it discovers an error, so the regex shouldn't be |
| 331 | * compiled */ |
| 332 | #define PMf_HAS_ERROR (1U<<(PMf_BASE_SHIFT+4)) |
| 333 | |
| 334 | /* 'use re "taint"' in scope: taint $1 etc. if target tainted */ |
| 335 | #define PMf_RETAINT (1U<<(PMf_BASE_SHIFT+5)) |
| 336 | |
| 337 | /* match successfully only once per reset, with related flag RXf_USED in |
| 338 | * re->extflags holding state. This is used only for ?? matches, and only on |
| 339 | * OP_MATCH and OP_QR */ |
| 340 | #define PMf_ONCE (1U<<(PMf_BASE_SHIFT+6)) |
| 341 | |
| 342 | /* PMf_ONCE, i.e. ?pat?, has matched successfully. Not used under threading. */ |
| 343 | #define PMf_USED (1U<<(PMf_BASE_SHIFT+7)) |
| 344 | |
| 345 | /* subst replacement is constant */ |
| 346 | #define PMf_CONST (1U<<(PMf_BASE_SHIFT+8)) |
| 347 | |
| 348 | /* keep 1st runtime pattern forever */ |
| 349 | #define PMf_KEEP (1U<<(PMf_BASE_SHIFT+9)) |
| 350 | |
| 351 | #define PMf_GLOBAL (1U<<(PMf_BASE_SHIFT+10)) /* pattern had a g modifier */ |
| 352 | |
| 353 | /* don't reset pos() if //g fails */ |
| 354 | #define PMf_CONTINUE (1U<<(PMf_BASE_SHIFT+11)) |
| 355 | |
| 356 | /* evaluating replacement as expr */ |
| 357 | #define PMf_EVAL (1U<<(PMf_BASE_SHIFT+12)) |
| 358 | |
| 359 | /* Return substituted string instead of modifying it. */ |
| 360 | #define PMf_NONDESTRUCT (1U<<(PMf_BASE_SHIFT+13)) |
| 361 | |
| 362 | /* the pattern has a CV attached (currently only under qr/...(?{}).../) */ |
| 363 | #define PMf_HAS_CV (1U<<(PMf_BASE_SHIFT+14)) |
| 364 | |
| 365 | /* op_code_list is private; don't free it etc. It may well point to |
| 366 | * code within another sub, with different pad etc */ |
| 367 | #define PMf_CODELIST_PRIVATE (1U<<(PMf_BASE_SHIFT+15)) |
| 368 | |
| 369 | /* the PMOP is a QR (we should be able to detect that from the op type, |
| 370 | * but the regex compilation API passes just the pm flags, not the op |
| 371 | * itself */ |
| 372 | #define PMf_IS_QR (1U<<(PMf_BASE_SHIFT+16)) |
| 373 | #define PMf_USE_RE_EVAL (1U<<(PMf_BASE_SHIFT+17)) /* use re'eval' in scope */ |
| 374 | |
| 375 | /* See comments at the beginning of these defines about adding bits. The |
| 376 | * highest bit position should be used, so that if PMf_BASE_SHIFT gets |
| 377 | * increased, the #error below will be triggered so that you will be reminded |
| 378 | * to adjust things at the other end to keep the bit positions unchanged */ |
| 379 | #if PMf_BASE_SHIFT+17 > 31 |
| 380 | # error Too many PMf_ bits used. See above and regnodes.h for any spare in middle |
| 381 | #endif |
| 382 | |
| 383 | #ifdef USE_ITHREADS |
| 384 | |
| 385 | # define PmopSTASH(o) ((o)->op_pmflags & PMf_ONCE \ |
| 386 | ? PL_stashpad[(o)->op_pmstashstartu.op_pmstashoff] \ |
| 387 | : NULL) |
| 388 | # define PmopSTASH_set(o,hv) \ |
| 389 | (assert_((o)->op_pmflags & PMf_ONCE) \ |
| 390 | (o)->op_pmstashstartu.op_pmstashoff = \ |
| 391 | (hv) ? alloccopstash(hv) : 0) |
| 392 | #else |
| 393 | # define PmopSTASH(o) \ |
| 394 | (((o)->op_pmflags & PMf_ONCE) ? (o)->op_pmstashstartu.op_pmstash : NULL) |
| 395 | # if defined (DEBUGGING) && defined(__GNUC__) && !defined(PERL_GCC_BRACE_GROUPS_FORBIDDEN) |
| 396 | # define PmopSTASH_set(o,hv) ({ \ |
| 397 | assert((o)->op_pmflags & PMf_ONCE); \ |
| 398 | ((o)->op_pmstashstartu.op_pmstash = (hv)); \ |
| 399 | }) |
| 400 | # else |
| 401 | # define PmopSTASH_set(o,hv) ((o)->op_pmstashstartu.op_pmstash = (hv)) |
| 402 | # endif |
| 403 | #endif |
| 404 | #define PmopSTASHPV(o) (PmopSTASH(o) ? HvNAME_get(PmopSTASH(o)) : NULL) |
| 405 | /* op_pmstashstartu.op_pmstash is not refcounted */ |
| 406 | #define PmopSTASHPV_set(o,pv) PmopSTASH_set((o), gv_stashpv(pv,GV_ADD)) |
| 407 | |
| 408 | struct svop { |
| 409 | BASEOP |
| 410 | SV * op_sv; |
| 411 | }; |
| 412 | |
| 413 | struct padop { |
| 414 | BASEOP |
| 415 | PADOFFSET op_padix; |
| 416 | }; |
| 417 | |
| 418 | struct pvop { |
| 419 | BASEOP |
| 420 | char * op_pv; |
| 421 | }; |
| 422 | |
| 423 | struct loop { |
| 424 | BASEOP |
| 425 | OP * op_first; |
| 426 | OP * op_last; |
| 427 | OP * op_redoop; |
| 428 | OP * op_nextop; |
| 429 | OP * op_lastop; |
| 430 | }; |
| 431 | |
| 432 | #define cUNOPx(o) ((UNOP*)(o)) |
| 433 | #define cUNOP_AUXx(o) ((UNOP_AUX*)(o)) |
| 434 | #define cBINOPx(o) ((BINOP*)(o)) |
| 435 | #define cLISTOPx(o) ((LISTOP*)(o)) |
| 436 | #define cLOGOPx(o) ((LOGOP*)(o)) |
| 437 | #define cPMOPx(o) ((PMOP*)(o)) |
| 438 | #define cSVOPx(o) ((SVOP*)(o)) |
| 439 | #define cPADOPx(o) ((PADOP*)(o)) |
| 440 | #define cPVOPx(o) ((PVOP*)(o)) |
| 441 | #define cCOPx(o) ((COP*)(o)) |
| 442 | #define cLOOPx(o) ((LOOP*)(o)) |
| 443 | #define cMETHOPx(o) ((METHOP*)(o)) |
| 444 | |
| 445 | #define cUNOP cUNOPx(PL_op) |
| 446 | #define cUNOP_AUX cUNOP_AUXx(PL_op) |
| 447 | #define cBINOP cBINOPx(PL_op) |
| 448 | #define cLISTOP cLISTOPx(PL_op) |
| 449 | #define cLOGOP cLOGOPx(PL_op) |
| 450 | #define cPMOP cPMOPx(PL_op) |
| 451 | #define cSVOP cSVOPx(PL_op) |
| 452 | #define cPADOP cPADOPx(PL_op) |
| 453 | #define cPVOP cPVOPx(PL_op) |
| 454 | #define cCOP cCOPx(PL_op) |
| 455 | #define cLOOP cLOOPx(PL_op) |
| 456 | |
| 457 | #define cUNOPo cUNOPx(o) |
| 458 | #define cUNOP_AUXo cUNOP_AUXx(o) |
| 459 | #define cBINOPo cBINOPx(o) |
| 460 | #define cLISTOPo cLISTOPx(o) |
| 461 | #define cLOGOPo cLOGOPx(o) |
| 462 | #define cPMOPo cPMOPx(o) |
| 463 | #define cSVOPo cSVOPx(o) |
| 464 | #define cPADOPo cPADOPx(o) |
| 465 | #define cPVOPo cPVOPx(o) |
| 466 | #define cCOPo cCOPx(o) |
| 467 | #define cLOOPo cLOOPx(o) |
| 468 | |
| 469 | #define kUNOP cUNOPx(kid) |
| 470 | #define kUNOP_AUX cUNOP_AUXx(kid) |
| 471 | #define kBINOP cBINOPx(kid) |
| 472 | #define kLISTOP cLISTOPx(kid) |
| 473 | #define kLOGOP cLOGOPx(kid) |
| 474 | #define kPMOP cPMOPx(kid) |
| 475 | #define kSVOP cSVOPx(kid) |
| 476 | #define kPADOP cPADOPx(kid) |
| 477 | #define kPVOP cPVOPx(kid) |
| 478 | #define kCOP cCOPx(kid) |
| 479 | #define kLOOP cLOOPx(kid) |
| 480 | |
| 481 | |
| 482 | typedef enum { |
| 483 | OPclass_NULL, /* 0 */ |
| 484 | OPclass_BASEOP, /* 1 */ |
| 485 | OPclass_UNOP, /* 2 */ |
| 486 | OPclass_BINOP, /* 3 */ |
| 487 | OPclass_LOGOP, /* 4 */ |
| 488 | OPclass_LISTOP, /* 5 */ |
| 489 | OPclass_PMOP, /* 6 */ |
| 490 | OPclass_SVOP, /* 7 */ |
| 491 | OPclass_PADOP, /* 8 */ |
| 492 | OPclass_PVOP, /* 9 */ |
| 493 | OPclass_LOOP, /* 10 */ |
| 494 | OPclass_COP, /* 11 */ |
| 495 | OPclass_METHOP, /* 12 */ |
| 496 | OPclass_UNOP_AUX /* 13 */ |
| 497 | } OPclass; |
| 498 | |
| 499 | |
| 500 | #ifdef USE_ITHREADS |
| 501 | # define cGVOPx_gv(o) ((GV*)PAD_SVl(cPADOPx(o)->op_padix)) |
| 502 | # ifndef PERL_CORE |
| 503 | # define IS_PADGV(v) (v && isGV(v)) |
| 504 | # define IS_PADCONST(v) \ |
| 505 | (v && (SvREADONLY(v) || (SvIsCOW(v) && !SvLEN(v)))) |
| 506 | # endif |
| 507 | # define cSVOPx_sv(v) (cSVOPx(v)->op_sv \ |
| 508 | ? cSVOPx(v)->op_sv : PAD_SVl((v)->op_targ)) |
| 509 | # define cSVOPx_svp(v) (cSVOPx(v)->op_sv \ |
| 510 | ? &cSVOPx(v)->op_sv : &PAD_SVl((v)->op_targ)) |
| 511 | # define cMETHOPx_rclass(v) PAD_SVl(cMETHOPx(v)->op_rclass_targ) |
| 512 | #else |
| 513 | # define cGVOPx_gv(o) ((GV*)cSVOPx(o)->op_sv) |
| 514 | # ifndef PERL_CORE |
| 515 | # define IS_PADGV(v) FALSE |
| 516 | # define IS_PADCONST(v) FALSE |
| 517 | # endif |
| 518 | # define cSVOPx_sv(v) (cSVOPx(v)->op_sv) |
| 519 | # define cSVOPx_svp(v) (&cSVOPx(v)->op_sv) |
| 520 | # define cMETHOPx_rclass(v) (cMETHOPx(v)->op_rclass_sv) |
| 521 | #endif |
| 522 | |
| 523 | #define cMETHOPx_meth(v) cSVOPx_sv(v) |
| 524 | |
| 525 | #define cGVOP_gv cGVOPx_gv(PL_op) |
| 526 | #define cGVOPo_gv cGVOPx_gv(o) |
| 527 | #define kGVOP_gv cGVOPx_gv(kid) |
| 528 | #define cSVOP_sv cSVOPx_sv(PL_op) |
| 529 | #define cSVOPo_sv cSVOPx_sv(o) |
| 530 | #define kSVOP_sv cSVOPx_sv(kid) |
| 531 | |
| 532 | #ifndef PERL_CORE |
| 533 | # define Nullop ((OP*)NULL) |
| 534 | #endif |
| 535 | |
| 536 | /* Lowest byte of PL_opargs */ |
| 537 | #define OA_MARK 1 |
| 538 | #define OA_FOLDCONST 2 |
| 539 | #define OA_RETSCALAR 4 |
| 540 | #define OA_TARGET 8 |
| 541 | #define OA_TARGLEX 16 |
| 542 | #define OA_OTHERINT 32 |
| 543 | #define OA_DANGEROUS 64 |
| 544 | #define OA_DEFGV 128 |
| 545 | |
| 546 | /* The next 4 bits (8..11) encode op class information */ |
| 547 | #define OCSHIFT 8 |
| 548 | |
| 549 | #define OA_CLASS_MASK (15 << OCSHIFT) |
| 550 | |
| 551 | #define OA_BASEOP (0 << OCSHIFT) |
| 552 | #define OA_UNOP (1 << OCSHIFT) |
| 553 | #define OA_BINOP (2 << OCSHIFT) |
| 554 | #define OA_LOGOP (3 << OCSHIFT) |
| 555 | #define OA_LISTOP (4 << OCSHIFT) |
| 556 | #define OA_PMOP (5 << OCSHIFT) |
| 557 | #define OA_SVOP (6 << OCSHIFT) |
| 558 | #define OA_PADOP (7 << OCSHIFT) |
| 559 | #define OA_PVOP_OR_SVOP (8 << OCSHIFT) |
| 560 | #define OA_LOOP (9 << OCSHIFT) |
| 561 | #define OA_COP (10 << OCSHIFT) |
| 562 | #define OA_BASEOP_OR_UNOP (11 << OCSHIFT) |
| 563 | #define OA_FILESTATOP (12 << OCSHIFT) |
| 564 | #define OA_LOOPEXOP (13 << OCSHIFT) |
| 565 | #define OA_METHOP (14 << OCSHIFT) |
| 566 | #define OA_UNOP_AUX (15 << OCSHIFT) |
| 567 | |
| 568 | /* Each remaining nybble of PL_opargs (i.e. bits 12..15, 16..19 etc) |
| 569 | * encode the type for each arg */ |
| 570 | #define OASHIFT 12 |
| 571 | |
| 572 | #define OA_SCALAR 1 |
| 573 | #define OA_LIST 2 |
| 574 | #define OA_AVREF 3 |
| 575 | #define OA_HVREF 4 |
| 576 | #define OA_CVREF 5 |
| 577 | #define OA_FILEREF 6 |
| 578 | #define OA_SCALARREF 7 |
| 579 | #define OA_OPTIONAL 8 |
| 580 | |
| 581 | /* Op_REFCNT is a reference count at the head of each op tree: needed |
| 582 | * since the tree is shared between threads, and between cloned closure |
| 583 | * copies in the same thread. OP_REFCNT_LOCK/UNLOCK is used when modifying |
| 584 | * this count. |
| 585 | * The same mutex is used to protect the refcounts of the reg_trie_data |
| 586 | * and reg_ac_data structures, which are shared between duplicated |
| 587 | * regexes. |
| 588 | */ |
| 589 | |
| 590 | #ifdef USE_ITHREADS |
| 591 | # define OP_REFCNT_INIT MUTEX_INIT(&PL_op_mutex) |
| 592 | # ifdef PERL_CORE |
| 593 | # define OP_REFCNT_LOCK MUTEX_LOCK(&PL_op_mutex) |
| 594 | # define OP_REFCNT_UNLOCK MUTEX_UNLOCK(&PL_op_mutex) |
| 595 | # else |
| 596 | # define OP_REFCNT_LOCK op_refcnt_lock() |
| 597 | # define OP_REFCNT_UNLOCK op_refcnt_unlock() |
| 598 | # endif |
| 599 | # define OP_REFCNT_TERM MUTEX_DESTROY(&PL_op_mutex) |
| 600 | #else |
| 601 | # define OP_REFCNT_INIT NOOP |
| 602 | # define OP_REFCNT_LOCK NOOP |
| 603 | # define OP_REFCNT_UNLOCK NOOP |
| 604 | # define OP_REFCNT_TERM NOOP |
| 605 | #endif |
| 606 | |
| 607 | #define OpREFCNT_set(o,n) ((o)->op_targ = (n)) |
| 608 | #ifdef PERL_DEBUG_READONLY_OPS |
| 609 | # define OpREFCNT_inc(o) Perl_op_refcnt_inc(aTHX_ o) |
| 610 | # define OpREFCNT_dec(o) Perl_op_refcnt_dec(aTHX_ o) |
| 611 | #else |
| 612 | # define OpREFCNT_inc(o) ((o) ? (++(o)->op_targ, (o)) : NULL) |
| 613 | # define OpREFCNT_dec(o) (--(o)->op_targ) |
| 614 | #endif |
| 615 | |
| 616 | /* flags used by Perl_load_module() */ |
| 617 | #define PERL_LOADMOD_DENY 0x1 /* no Module */ |
| 618 | #define PERL_LOADMOD_NOIMPORT 0x2 /* use Module () */ |
| 619 | #define PERL_LOADMOD_IMPORT_OPS 0x4 /* import arguments |
| 620 | are passed as a sin- |
| 621 | gle op tree, not a |
| 622 | list of SVs */ |
| 623 | |
| 624 | #if defined(PERL_IN_PERLY_C) || defined(PERL_IN_OP_C) || defined(PERL_IN_TOKE_C) |
| 625 | #define ref(o, type) doref(o, type, TRUE) |
| 626 | #endif |
| 627 | |
| 628 | |
| 629 | /* translation table attached to OP_TRANS/OP_TRANSR ops */ |
| 630 | |
| 631 | typedef struct { |
| 632 | Size_t size; /* number of entries in map[], not including final slot */ |
| 633 | short map[1]; /* Unwarranted chumminess */ |
| 634 | } OPtrans_map; |
| 635 | |
| 636 | |
| 637 | /* |
| 638 | =head1 Optree Manipulation Functions |
| 639 | |
| 640 | =for apidoc Am|OP*|LINKLIST|OP *o |
| 641 | Given the root of an optree, link the tree in execution order using the |
| 642 | C<op_next> pointers and return the first op executed. If this has |
| 643 | already been done, it will not be redone, and C<< o->op_next >> will be |
| 644 | returned. If C<< o->op_next >> is not already set, C<o> should be at |
| 645 | least an C<UNOP>. |
| 646 | |
| 647 | =cut |
| 648 | */ |
| 649 | |
| 650 | #define LINKLIST(o) ((o)->op_next ? (o)->op_next : op_linklist((OP*)o)) |
| 651 | |
| 652 | /* no longer used anywhere in core */ |
| 653 | #ifndef PERL_CORE |
| 654 | #define cv_ckproto(cv, gv, p) \ |
| 655 | cv_ckproto_len_flags((cv), (gv), (p), (p) ? strlen(p) : 0, 0) |
| 656 | #endif |
| 657 | |
| 658 | #ifdef PERL_CORE |
| 659 | # define my(o) my_attrs((o), NULL) |
| 660 | #endif |
| 661 | |
| 662 | #ifdef USE_REENTRANT_API |
| 663 | #include "reentr.h" |
| 664 | #endif |
| 665 | |
| 666 | #define NewOp(m,var,c,type) \ |
| 667 | (var = (type *) Perl_Slab_Alloc(aTHX_ c*sizeof(type))) |
| 668 | #define NewOpSz(m,var,size) \ |
| 669 | (var = (OP *) Perl_Slab_Alloc(aTHX_ size)) |
| 670 | #define FreeOp(p) Perl_Slab_Free(aTHX_ p) |
| 671 | |
| 672 | /* |
| 673 | * The per-CV op slabs consist of a header (the opslab struct) and a bunch |
| 674 | * of space for allocating op slots, each of which consists of two pointers |
| 675 | * followed by an op. The first pointer points to the next op slot. The |
| 676 | * second points to the slab. At the end of the slab is a null pointer, |
| 677 | * so that slot->opslot_next - slot can be used to determine the size |
| 678 | * of the op. |
| 679 | * |
| 680 | * Each CV can have multiple slabs; opslab_next points to the next slab, to |
| 681 | * form a chain. All bookkeeping is done on the first slab, which is where |
| 682 | * all the op slots point. |
| 683 | * |
| 684 | * Freed ops are marked as freed and attached to the freed chain |
| 685 | * via op_next pointers. |
| 686 | * |
| 687 | * When there is more than one slab, the second slab in the slab chain is |
| 688 | * assumed to be the one with free space available. It is used when allo- |
| 689 | * cating an op if there are no freed ops available or big enough. |
| 690 | */ |
| 691 | |
| 692 | #ifdef PERL_CORE |
| 693 | struct opslot { |
| 694 | U16 opslot_size; /* size of this slot (in pointers) */ |
| 695 | U16 opslot_offset; /* offset from start of slab (in ptr units) */ |
| 696 | OP opslot_op; /* the op itself */ |
| 697 | }; |
| 698 | |
| 699 | struct opslab { |
| 700 | OPSLAB * opslab_next; /* next slab */ |
| 701 | OPSLAB * opslab_head; /* first slab in chain */ |
| 702 | OP * opslab_freed; /* chain of freed ops (head only)*/ |
| 703 | size_t opslab_refcnt; /* number of ops (head slab only) */ |
| 704 | U16 opslab_size; /* size of slab in pointers, |
| 705 | including header */ |
| 706 | U16 opslab_free_space; /* space available in this slab |
| 707 | for allocating new ops (in ptr |
| 708 | units) */ |
| 709 | # ifdef PERL_DEBUG_READONLY_OPS |
| 710 | bool opslab_readonly; |
| 711 | # endif |
| 712 | OPSLOT opslab_slots; /* slots begin here */ |
| 713 | }; |
| 714 | |
| 715 | # define OPSLOT_HEADER STRUCT_OFFSET(OPSLOT, opslot_op) |
| 716 | # define OPSLOT_HEADER_P (OPSLOT_HEADER/sizeof(I32 *)) |
| 717 | # define OpSLOT(o) (assert_(o->op_slabbed) \ |
| 718 | (OPSLOT *)(((char *)o)-OPSLOT_HEADER)) |
| 719 | |
| 720 | /* the first (head) opslab of the chain in which this op is allocated */ |
| 721 | # define OpSLAB(o) \ |
| 722 | (((OPSLAB*)( (I32**)OpSLOT(o) - OpSLOT(o)->opslot_offset))->opslab_head) |
| 723 | |
| 724 | # define OpslabREFCNT_dec(slab) \ |
| 725 | (((slab)->opslab_refcnt == 1) \ |
| 726 | ? opslab_free_nopad(slab) \ |
| 727 | : (void)--(slab)->opslab_refcnt) |
| 728 | /* Variant that does not null out the pads */ |
| 729 | # define OpslabREFCNT_dec_padok(slab) \ |
| 730 | (((slab)->opslab_refcnt == 1) \ |
| 731 | ? opslab_free(slab) \ |
| 732 | : (void)--(slab)->opslab_refcnt) |
| 733 | #endif |
| 734 | |
| 735 | struct block_hooks { |
| 736 | U32 bhk_flags; |
| 737 | void (*bhk_start) (pTHX_ int full); |
| 738 | void (*bhk_pre_end) (pTHX_ OP **seq); |
| 739 | void (*bhk_post_end) (pTHX_ OP **seq); |
| 740 | void (*bhk_eval) (pTHX_ OP *const saveop); |
| 741 | }; |
| 742 | |
| 743 | /* |
| 744 | =head1 Compile-time scope hooks |
| 745 | |
| 746 | =for apidoc mx|U32|BhkFLAGS|BHK *hk |
| 747 | Return the BHK's flags. |
| 748 | |
| 749 | =for apidoc mxu|void *|BhkENTRY|BHK *hk|which |
| 750 | Return an entry from the BHK structure. C<which> is a preprocessor token |
| 751 | indicating which entry to return. If the appropriate flag is not set |
| 752 | this will return C<NULL>. The type of the return value depends on which |
| 753 | entry you ask for. |
| 754 | |
| 755 | =for apidoc Amxu|void|BhkENTRY_set|BHK *hk|which|void *ptr |
| 756 | Set an entry in the BHK structure, and set the flags to indicate it is |
| 757 | valid. C<which> is a preprocessing token indicating which entry to set. |
| 758 | The type of C<ptr> depends on the entry. |
| 759 | |
| 760 | =for apidoc Amxu|void|BhkDISABLE|BHK *hk|which |
| 761 | Temporarily disable an entry in this BHK structure, by clearing the |
| 762 | appropriate flag. C<which> is a preprocessor token indicating which |
| 763 | entry to disable. |
| 764 | |
| 765 | =for apidoc Amxu|void|BhkENABLE|BHK *hk|which |
| 766 | Re-enable an entry in this BHK structure, by setting the appropriate |
| 767 | flag. C<which> is a preprocessor token indicating which entry to enable. |
| 768 | This will assert (under -DDEBUGGING) if the entry doesn't contain a valid |
| 769 | pointer. |
| 770 | |
| 771 | =for apidoc mxu|void|CALL_BLOCK_HOOKS|which|arg |
| 772 | Call all the registered block hooks for type C<which>. C<which> is a |
| 773 | preprocessing token; the type of C<arg> depends on C<which>. |
| 774 | |
| 775 | =cut |
| 776 | */ |
| 777 | |
| 778 | #define BhkFLAGS(hk) ((hk)->bhk_flags) |
| 779 | |
| 780 | #define BHKf_bhk_start 0x01 |
| 781 | #define BHKf_bhk_pre_end 0x02 |
| 782 | #define BHKf_bhk_post_end 0x04 |
| 783 | #define BHKf_bhk_eval 0x08 |
| 784 | |
| 785 | #define BhkENTRY(hk, which) \ |
| 786 | ((BhkFLAGS(hk) & BHKf_ ## which) ? ((hk)->which) : NULL) |
| 787 | |
| 788 | #define BhkENABLE(hk, which) \ |
| 789 | STMT_START { \ |
| 790 | BhkFLAGS(hk) |= BHKf_ ## which; \ |
| 791 | assert(BhkENTRY(hk, which)); \ |
| 792 | } STMT_END |
| 793 | |
| 794 | #define BhkDISABLE(hk, which) \ |
| 795 | STMT_START { \ |
| 796 | BhkFLAGS(hk) &= ~(BHKf_ ## which); \ |
| 797 | } STMT_END |
| 798 | |
| 799 | #define BhkENTRY_set(hk, which, ptr) \ |
| 800 | STMT_START { \ |
| 801 | (hk)->which = ptr; \ |
| 802 | BhkENABLE(hk, which); \ |
| 803 | } STMT_END |
| 804 | |
| 805 | #define CALL_BLOCK_HOOKS(which, arg) \ |
| 806 | STMT_START { \ |
| 807 | if (PL_blockhooks) { \ |
| 808 | SSize_t i; \ |
| 809 | for (i = av_tindex(PL_blockhooks); i >= 0; i--) { \ |
| 810 | SV *sv = AvARRAY(PL_blockhooks)[i]; \ |
| 811 | BHK *hk; \ |
| 812 | \ |
| 813 | assert(SvIOK(sv)); \ |
| 814 | if (SvUOK(sv)) \ |
| 815 | hk = INT2PTR(BHK *, SvUVX(sv)); \ |
| 816 | else \ |
| 817 | hk = INT2PTR(BHK *, SvIVX(sv)); \ |
| 818 | \ |
| 819 | if (BhkENTRY(hk, which)) \ |
| 820 | BhkENTRY(hk, which)(aTHX_ arg); \ |
| 821 | } \ |
| 822 | } \ |
| 823 | } STMT_END |
| 824 | |
| 825 | /* flags for rv2cv_op_cv */ |
| 826 | |
| 827 | #define RV2CVOPCV_MARK_EARLY 0x00000001 |
| 828 | #define RV2CVOPCV_RETURN_NAME_GV 0x00000002 |
| 829 | #define RV2CVOPCV_RETURN_STUB 0x00000004 |
| 830 | #ifdef PERL_CORE /* behaviour of this flag is subject to change: */ |
| 831 | # define RV2CVOPCV_MAYBE_NAME_GV 0x00000008 |
| 832 | #endif |
| 833 | #define RV2CVOPCV_FLAG_MASK 0x0000000f /* all of the above */ |
| 834 | |
| 835 | #define op_lvalue(op,t) Perl_op_lvalue_flags(aTHX_ op,t,0) |
| 836 | |
| 837 | /* flags for op_lvalue_flags */ |
| 838 | |
| 839 | #define OP_LVALUE_NO_CROAK 1 |
| 840 | |
| 841 | /* |
| 842 | =head1 Custom Operators |
| 843 | |
| 844 | =for apidoc Am|U32|XopFLAGS|XOP *xop |
| 845 | Return the XOP's flags. |
| 846 | |
| 847 | =for apidoc Am||XopENTRY|XOP *xop|which |
| 848 | Return a member of the XOP structure. C<which> is a cpp token |
| 849 | indicating which entry to return. If the member is not set |
| 850 | this will return a default value. The return type depends |
| 851 | on C<which>. This macro evaluates its arguments more than |
| 852 | once. If you are using C<Perl_custom_op_xop> to retreive a |
| 853 | C<XOP *> from a C<OP *>, use the more efficient L</XopENTRYCUSTOM> instead. |
| 854 | |
| 855 | =for apidoc Am||XopENTRYCUSTOM|const OP *o|which |
| 856 | Exactly like C<XopENTRY(XopENTRY(Perl_custom_op_xop(aTHX_ o), which)> but more |
| 857 | efficient. The C<which> parameter is identical to L</XopENTRY>. |
| 858 | |
| 859 | =for apidoc Am|void|XopENTRY_set|XOP *xop|which|value |
| 860 | Set a member of the XOP structure. C<which> is a cpp token |
| 861 | indicating which entry to set. See L<perlguts/"Custom Operators"> |
| 862 | for details about the available members and how |
| 863 | they are used. This macro evaluates its argument |
| 864 | more than once. |
| 865 | |
| 866 | =for apidoc Am|void|XopDISABLE|XOP *xop|which |
| 867 | Temporarily disable a member of the XOP, by clearing the appropriate flag. |
| 868 | |
| 869 | =for apidoc Am|void|XopENABLE|XOP *xop|which |
| 870 | Reenable a member of the XOP which has been disabled. |
| 871 | |
| 872 | =cut |
| 873 | */ |
| 874 | |
| 875 | struct custom_op { |
| 876 | U32 xop_flags; |
| 877 | const char *xop_name; |
| 878 | const char *xop_desc; |
| 879 | U32 xop_class; |
| 880 | void (*xop_peep)(pTHX_ OP *o, OP *oldop); |
| 881 | }; |
| 882 | |
| 883 | /* return value of Perl_custom_op_get_field, similar to void * then casting but |
| 884 | the U32 doesn't need truncation on 64 bit platforms in the caller, also |
| 885 | for easier macro writing */ |
| 886 | typedef union { |
| 887 | const char *xop_name; |
| 888 | const char *xop_desc; |
| 889 | U32 xop_class; |
| 890 | void (*xop_peep)(pTHX_ OP *o, OP *oldop); |
| 891 | XOP *xop_ptr; |
| 892 | } XOPRETANY; |
| 893 | |
| 894 | #define XopFLAGS(xop) ((xop)->xop_flags) |
| 895 | |
| 896 | #define XOPf_xop_name 0x01 |
| 897 | #define XOPf_xop_desc 0x02 |
| 898 | #define XOPf_xop_class 0x04 |
| 899 | #define XOPf_xop_peep 0x08 |
| 900 | |
| 901 | /* used by Perl_custom_op_get_field for option checking */ |
| 902 | typedef enum { |
| 903 | XOPe_xop_ptr = 0, /* just get the XOP *, don't look inside it */ |
| 904 | XOPe_xop_name = XOPf_xop_name, |
| 905 | XOPe_xop_desc = XOPf_xop_desc, |
| 906 | XOPe_xop_class = XOPf_xop_class, |
| 907 | XOPe_xop_peep = XOPf_xop_peep |
| 908 | } xop_flags_enum; |
| 909 | |
| 910 | #define XOPd_xop_name PL_op_name[OP_CUSTOM] |
| 911 | #define XOPd_xop_desc PL_op_desc[OP_CUSTOM] |
| 912 | #define XOPd_xop_class OA_BASEOP |
| 913 | #define XOPd_xop_peep ((Perl_cpeep_t)0) |
| 914 | |
| 915 | #define XopENTRY_set(xop, which, to) \ |
| 916 | STMT_START { \ |
| 917 | (xop)->which = (to); \ |
| 918 | (xop)->xop_flags |= XOPf_ ## which; \ |
| 919 | } STMT_END |
| 920 | |
| 921 | #define XopENTRY(xop, which) \ |
| 922 | ((XopFLAGS(xop) & XOPf_ ## which) ? (xop)->which : XOPd_ ## which) |
| 923 | |
| 924 | #define XopENTRYCUSTOM(o, which) \ |
| 925 | (Perl_custom_op_get_field(aTHX_ o, XOPe_ ## which).which) |
| 926 | |
| 927 | #define XopDISABLE(xop, which) ((xop)->xop_flags &= ~XOPf_ ## which) |
| 928 | #define XopENABLE(xop, which) \ |
| 929 | STMT_START { \ |
| 930 | (xop)->xop_flags |= XOPf_ ## which; \ |
| 931 | assert(XopENTRY(xop, which)); \ |
| 932 | } STMT_END |
| 933 | |
| 934 | #define Perl_custom_op_xop(x) \ |
| 935 | (Perl_custom_op_get_field(x, XOPe_xop_ptr).xop_ptr) |
| 936 | |
| 937 | /* |
| 938 | =head1 Optree Manipulation Functions |
| 939 | |
| 940 | =for apidoc Am|const char *|OP_NAME|OP *o |
| 941 | Return the name of the provided OP. For core ops this looks up the name |
| 942 | from the op_type; for custom ops from the op_ppaddr. |
| 943 | |
| 944 | =for apidoc Am|const char *|OP_DESC|OP *o |
| 945 | Return a short description of the provided OP. |
| 946 | |
| 947 | =for apidoc Am|U32|OP_CLASS|OP *o |
| 948 | Return the class of the provided OP: that is, which of the *OP |
| 949 | structures it uses. For core ops this currently gets the information out |
| 950 | of C<PL_opargs>, which does not always accurately reflect the type used; |
| 951 | in v5.26 onwards, see also the function C<L</op_class>> which can do a better |
| 952 | job of determining the used type. |
| 953 | |
| 954 | For custom ops the type is returned from the registration, and it is up |
| 955 | to the registree to ensure it is accurate. The value returned will be |
| 956 | one of the C<OA_>* constants from F<op.h>. |
| 957 | |
| 958 | =for apidoc Am|bool|OP_TYPE_IS|OP *o|Optype type |
| 959 | Returns true if the given OP is not a C<NULL> pointer |
| 960 | and if it is of the given type. |
| 961 | |
| 962 | The negation of this macro, C<OP_TYPE_ISNT> is also available |
| 963 | as well as C<OP_TYPE_IS_NN> and C<OP_TYPE_ISNT_NN> which elide |
| 964 | the NULL pointer check. |
| 965 | |
| 966 | =for apidoc Am|bool|OP_TYPE_IS_OR_WAS|OP *o|Optype type |
| 967 | Returns true if the given OP is not a NULL pointer and |
| 968 | if it is of the given type or used to be before being |
| 969 | replaced by an OP of type OP_NULL. |
| 970 | |
| 971 | The negation of this macro, C<OP_TYPE_ISNT_AND_WASNT> |
| 972 | is also available as well as C<OP_TYPE_IS_OR_WAS_NN> |
| 973 | and C<OP_TYPE_ISNT_AND_WASNT_NN> which elide |
| 974 | the C<NULL> pointer check. |
| 975 | |
| 976 | =for apidoc Am|bool|OpHAS_SIBLING|OP *o |
| 977 | Returns true if C<o> has a sibling |
| 978 | |
| 979 | =for apidoc Am|OP*|OpSIBLING|OP *o |
| 980 | Returns the sibling of C<o>, or C<NULL> if there is no sibling |
| 981 | |
| 982 | =for apidoc Am|void|OpMORESIB_set|OP *o|OP *sib |
| 983 | Sets the sibling of C<o> to the non-zero value C<sib>. See also C<L</OpLASTSIB_set>> |
| 984 | and C<L</OpMAYBESIB_set>>. For a higher-level interface, see |
| 985 | C<L</op_sibling_splice>>. |
| 986 | |
| 987 | =for apidoc Am|void|OpLASTSIB_set|OP *o|OP *parent |
| 988 | Marks C<o> as having no further siblings and marks |
| 989 | o as having the specified parent. See also C<L</OpMORESIB_set>> and |
| 990 | C<OpMAYBESIB_set>. For a higher-level interface, see |
| 991 | C<L</op_sibling_splice>>. |
| 992 | |
| 993 | =for apidoc Am|void|OpMAYBESIB_set|OP *o|OP *sib|OP *parent |
| 994 | Conditionally does C<OpMORESIB_set> or C<OpLASTSIB_set> depending on whether |
| 995 | C<sib> is non-null. For a higher-level interface, see C<L</op_sibling_splice>>. |
| 996 | |
| 997 | =cut |
| 998 | */ |
| 999 | |
| 1000 | #define OP_NAME(o) ((o)->op_type == OP_CUSTOM \ |
| 1001 | ? XopENTRYCUSTOM(o, xop_name) \ |
| 1002 | : PL_op_name[(o)->op_type]) |
| 1003 | #define OP_DESC(o) ((o)->op_type == OP_CUSTOM \ |
| 1004 | ? XopENTRYCUSTOM(o, xop_desc) \ |
| 1005 | : PL_op_desc[(o)->op_type]) |
| 1006 | #define OP_CLASS(o) ((o)->op_type == OP_CUSTOM \ |
| 1007 | ? XopENTRYCUSTOM(o, xop_class) \ |
| 1008 | : (PL_opargs[(o)->op_type] & OA_CLASS_MASK)) |
| 1009 | |
| 1010 | #define OP_TYPE_IS(o, type) ((o) && (o)->op_type == (type)) |
| 1011 | #define OP_TYPE_IS_NN(o, type) ((o)->op_type == (type)) |
| 1012 | #define OP_TYPE_ISNT(o, type) ((o) && (o)->op_type != (type)) |
| 1013 | #define OP_TYPE_ISNT_NN(o, type) ((o)->op_type != (type)) |
| 1014 | |
| 1015 | #define OP_TYPE_IS_OR_WAS_NN(o, type) \ |
| 1016 | ( ((o)->op_type == OP_NULL \ |
| 1017 | ? (o)->op_targ \ |
| 1018 | : (o)->op_type) \ |
| 1019 | == (type) ) |
| 1020 | |
| 1021 | #define OP_TYPE_IS_OR_WAS(o, type) \ |
| 1022 | ( (o) && OP_TYPE_IS_OR_WAS_NN(o, type) ) |
| 1023 | |
| 1024 | #define OP_TYPE_ISNT_AND_WASNT_NN(o, type) \ |
| 1025 | ( ((o)->op_type == OP_NULL \ |
| 1026 | ? (o)->op_targ \ |
| 1027 | : (o)->op_type) \ |
| 1028 | != (type) ) |
| 1029 | |
| 1030 | #define OP_TYPE_ISNT_AND_WASNT(o, type) \ |
| 1031 | ( (o) && OP_TYPE_ISNT_AND_WASNT_NN(o, type) ) |
| 1032 | |
| 1033 | /* should match anything that uses ck_ftst in regen/opcodes */ |
| 1034 | #define OP_IS_STAT(op) (OP_IS_FILETEST(op) || (op) == OP_LSTAT || (op) == OP_STAT) |
| 1035 | |
| 1036 | #define OpHAS_SIBLING(o) (cBOOL((o)->op_moresib)) |
| 1037 | #define OpSIBLING(o) (0 + (o)->op_moresib ? (o)->op_sibparent : NULL) |
| 1038 | #define OpMORESIB_set(o, sib) ((o)->op_moresib = 1, (o)->op_sibparent = (sib)) |
| 1039 | #define OpLASTSIB_set(o, parent) \ |
| 1040 | ((o)->op_moresib = 0, (o)->op_sibparent = (parent)) |
| 1041 | #define OpMAYBESIB_set(o, sib, parent) \ |
| 1042 | ((o)->op_sibparent = ((o)->op_moresib = cBOOL(sib)) ? (sib) : (parent)) |
| 1043 | |
| 1044 | #if !defined(PERL_CORE) && !defined(PERL_EXT) |
| 1045 | /* for backwards compatibility only */ |
| 1046 | # define OP_SIBLING(o) OpSIBLING(o) |
| 1047 | #endif |
| 1048 | |
| 1049 | #define newATTRSUB(f, o, p, a, b) Perl_newATTRSUB_x(aTHX_ f, o, p, a, b, FALSE) |
| 1050 | #define newSUB(f, o, p, b) newATTRSUB((f), (o), (p), NULL, (b)) |
| 1051 | |
| 1052 | /* |
| 1053 | =head1 Hook manipulation |
| 1054 | */ |
| 1055 | |
| 1056 | #ifdef USE_ITHREADS |
| 1057 | # define OP_CHECK_MUTEX_INIT MUTEX_INIT(&PL_check_mutex) |
| 1058 | # define OP_CHECK_MUTEX_LOCK MUTEX_LOCK(&PL_check_mutex) |
| 1059 | # define OP_CHECK_MUTEX_UNLOCK MUTEX_UNLOCK(&PL_check_mutex) |
| 1060 | # define OP_CHECK_MUTEX_TERM MUTEX_DESTROY(&PL_check_mutex) |
| 1061 | #else |
| 1062 | # define OP_CHECK_MUTEX_INIT NOOP |
| 1063 | # define OP_CHECK_MUTEX_LOCK NOOP |
| 1064 | # define OP_CHECK_MUTEX_UNLOCK NOOP |
| 1065 | # define OP_CHECK_MUTEX_TERM NOOP |
| 1066 | #endif |
| 1067 | |
| 1068 | |
| 1069 | /* Stuff for OP_MULTDEREF/pp_multideref. */ |
| 1070 | |
| 1071 | /* actions */ |
| 1072 | |
| 1073 | /* Load another word of actions/flag bits. Must be 0 */ |
| 1074 | #define MDEREF_reload 0 |
| 1075 | |
| 1076 | #define MDEREF_AV_pop_rv2av_aelem 1 |
| 1077 | #define MDEREF_AV_gvsv_vivify_rv2av_aelem 2 |
| 1078 | #define MDEREF_AV_padsv_vivify_rv2av_aelem 3 |
| 1079 | #define MDEREF_AV_vivify_rv2av_aelem 4 |
| 1080 | #define MDEREF_AV_padav_aelem 5 |
| 1081 | #define MDEREF_AV_gvav_aelem 6 |
| 1082 | |
| 1083 | #define MDEREF_HV_pop_rv2hv_helem 8 |
| 1084 | #define MDEREF_HV_gvsv_vivify_rv2hv_helem 9 |
| 1085 | #define MDEREF_HV_padsv_vivify_rv2hv_helem 10 |
| 1086 | #define MDEREF_HV_vivify_rv2hv_helem 11 |
| 1087 | #define MDEREF_HV_padhv_helem 12 |
| 1088 | #define MDEREF_HV_gvhv_helem 13 |
| 1089 | |
| 1090 | #define MDEREF_ACTION_MASK 0xf |
| 1091 | |
| 1092 | /* key / index type */ |
| 1093 | |
| 1094 | #define MDEREF_INDEX_none 0x00 /* run external ops to generate index */ |
| 1095 | #define MDEREF_INDEX_const 0x10 /* index is const PV/UV */ |
| 1096 | #define MDEREF_INDEX_padsv 0x20 /* index is lexical var */ |
| 1097 | #define MDEREF_INDEX_gvsv 0x30 /* index is GV */ |
| 1098 | |
| 1099 | #define MDEREF_INDEX_MASK 0x30 |
| 1100 | |
| 1101 | /* bit flags */ |
| 1102 | |
| 1103 | #define MDEREF_FLAG_last 0x40 /* the last [ah]elem; PL_op flags apply */ |
| 1104 | |
| 1105 | #define MDEREF_MASK 0x7F |
| 1106 | #define MDEREF_SHIFT 7 |
| 1107 | |
| 1108 | #if defined(PERL_IN_DOOP_C) || defined(PERL_IN_PP_C) |
| 1109 | # define FATAL_ABOVE_FF_MSG \ |
| 1110 | "Use of strings with code points over 0xFF as arguments to " \ |
| 1111 | "%s operator is not allowed" |
| 1112 | #endif |
| 1113 | #if defined(PERL_IN_OP_C) || defined(PERL_IN_DOOP_C) || defined(PERL_IN_PERL_C) |
| 1114 | # define TR_UNMAPPED (UV)-1 |
| 1115 | # define TR_DELETE (UV)-2 |
| 1116 | # define TR_R_EMPTY (UV)-3 /* rhs (replacement) is empty */ |
| 1117 | # define TR_OOB (UV)-4 /* Something that isn't one of the others */ |
| 1118 | # define TR_SPECIAL_HANDLING TR_DELETE /* Can occupy same value */ |
| 1119 | # define TR_UNLISTED TR_UNMAPPED /* A synonym whose name is clearer |
| 1120 | at times */ |
| 1121 | #endif |
| 1122 | #if defined(PERL_IN_OP_C) || defined(PERL_IN_TOKE_C) |
| 1123 | #define RANGE_INDICATOR ILLEGAL_UTF8_BYTE |
| 1124 | #endif |
| 1125 | |
| 1126 | /* stuff for OP_ARGCHECK */ |
| 1127 | |
| 1128 | struct op_argcheck_aux { |
| 1129 | UV params; /* number of positional parameters */ |
| 1130 | UV opt_params; /* number of optional positional parameters */ |
| 1131 | char slurpy; /* presence of slurpy: may be '\0', '@' or '%' */ |
| 1132 | }; |
| 1133 | |
| 1134 | |
| 1135 | /* |
| 1136 | * ex: set ts=8 sts=4 sw=4 et: |
| 1137 | */ |