+=cut
+ */
+#define UTF8SKIP(s) PL_utf8skip[*(const U8*)(s)]
+#define UTF8_SKIP(s) UTF8SKIP(s)
+
+/* Most code that says 'UNI_' really means the native value for code points up
+ * through 255 */
+#define UNI_IS_INVARIANT(cp) UVCHR_IS_INVARIANT(cp)
+
+/* Is the byte 'c' the same character when encoded in UTF-8 as when not. This
+ * works on both UTF-8 encoded strings and non-encoded, as it returns TRUE in
+ * each for the exact same set of bit patterns. It is valid on a subset of
+ * what UVCHR_IS_INVARIANT is valid on, so can just use that; and the compiler
+ * should optimize out anything extraneous given the implementation of the
+ * latter. The |0 makes sure this isn't mistakenly called with a ptr argument.
+ * */
+#define UTF8_IS_INVARIANT(c) UVCHR_IS_INVARIANT((c) | 0)
+
+/* Like the above, but its name implies a non-UTF8 input, which as the comments
+ * above show, doesn't matter as to its implementation */
+#define NATIVE_BYTE_IS_INVARIANT(c) UVCHR_IS_INVARIANT(c)
+
+/* The macros in the next 4 sets are used to generate the two utf8 or utfebcdic
+ * bytes from an ordinal that is known to fit into exactly two (not one) bytes;
+ * it must be less than 0x3FF to work across both encodings. */
+
+/* These two are helper macros for the other three sets, and should not be used
+ * directly anywhere else. 'translate_function' is either NATIVE_TO_LATIN1
+ * (which works for code points up through 0xFF) or NATIVE_TO_UNI which works
+ * for any code point */
+#define __BASE_TWO_BYTE_HI(c, translate_function) \
+ (__ASSERT_(! UVCHR_IS_INVARIANT(c)) \
+ I8_TO_NATIVE_UTF8((translate_function(c) >> UTF_ACCUMULATION_SHIFT) \
+ | UTF_START_MARK(2)))
+#define __BASE_TWO_BYTE_LO(c, translate_function) \
+ (__ASSERT_(! UVCHR_IS_INVARIANT(c)) \
+ I8_TO_NATIVE_UTF8((translate_function(c) & UTF_CONTINUATION_MASK) \
+ | UTF_CONTINUATION_MARK))
+
+/* The next two macros should not be used. They were designed to be usable as
+ * the case label of a switch statement, but this doesn't work for EBCDIC. Use
+ * regen/unicode_constants.pl instead */
+#define UTF8_TWO_BYTE_HI_nocast(c) __BASE_TWO_BYTE_HI(c, NATIVE_TO_UNI)
+#define UTF8_TWO_BYTE_LO_nocast(c) __BASE_TWO_BYTE_LO(c, NATIVE_TO_UNI)
+
+/* The next two macros are used when the source should be a single byte
+ * character; checked for under DEBUGGING */
+#define UTF8_EIGHT_BIT_HI(c) (__ASSERT_(FITS_IN_8_BITS(c)) \
+ ( __BASE_TWO_BYTE_HI(c, NATIVE_TO_LATIN1)))
+#define UTF8_EIGHT_BIT_LO(c) (__ASSERT_(FITS_IN_8_BITS(c)) \
+ (__BASE_TWO_BYTE_LO(c, NATIVE_TO_LATIN1)))
+
+/* These final two macros in the series are used when the source can be any
+ * code point whose UTF-8 is known to occupy 2 bytes; they are less efficient
+ * than the EIGHT_BIT versions on EBCDIC platforms. We use the logical '~'
+ * operator instead of "<=" to avoid getting compiler warnings.
+ * MAX_UTF8_TWO_BYTE should be exactly all one bits in the lower few
+ * places, so the ~ works */
+#define UTF8_TWO_BYTE_HI(c) \
+ (__ASSERT_((sizeof(c) == 1) \
+ || !(((WIDEST_UTYPE)(c)) & ~MAX_UTF8_TWO_BYTE)) \
+ (__BASE_TWO_BYTE_HI(c, NATIVE_TO_UNI)))
+#define UTF8_TWO_BYTE_LO(c) \
+ (__ASSERT_((sizeof(c) == 1) \
+ || !(((WIDEST_UTYPE)(c)) & ~MAX_UTF8_TWO_BYTE)) \
+ (__BASE_TWO_BYTE_LO(c, NATIVE_TO_UNI)))
+
+/* This is illegal in any well-formed UTF-8 in both EBCDIC and ASCII
+ * as it is only in overlongs. */
+#define ILLEGAL_UTF8_BYTE I8_TO_NATIVE_UTF8(0xC1)