From 1c4b2386d9e49f946664295ac003573dea41155e Mon Sep 17 00:00:00 2001 From: Yves Orton Date: Thu, 1 Jun 2017 15:02:27 +0200 Subject: [PATCH] Restore "Move utility macros to their own file" This reverts commit 3f023586c2fbf826d45cf78795361337eca3daa1, which was a revert of commit 259e968485f855f70472c8be9267efceca42b0fb. After this patch hv_func.h is left with only logic relating to selecting and configuring the hash function we use, not the utility macros (such as ROTL and equivalent) our hash functions use and share. --- MANIFEST | 1 + hv_func.h | 83 +------------------------------------------------------------- hv_macro.h | 81 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 83 insertions(+), 82 deletions(-) create mode 100644 hv_macro.h diff --git a/MANIFEST b/MANIFEST index bd02337..0d61af0 100644 --- a/MANIFEST +++ b/MANIFEST @@ -4448,6 +4448,7 @@ hints/vos.sh Hints for named architecture hv.c Hash value code hv.h Hash value header hv_func.h Hash value static inline function header +hv_macro.h Macros used by hv_func.h inline.h Static inline functions INSTALL Detailed installation instructions install_lib.pl functions shared between install* scripts diff --git a/hv_func.h b/hv_func.h index b05eb87..ce2d50b 100644 --- a/hv_func.h +++ b/hv_func.h @@ -4,92 +4,11 @@ * to avoid "algorithmic complexity attacks". * * If USE_HASH_SEED is defined, hash randomisation is done by default - * If USE_HASH_SEED_EXPLICIT is defined, hash randomisation is done - * only if the environment variable PERL_HASH_SEED is set. * (see also perl.c:perl_parse() and S_init_tls_and_interp() and util.c:get_hash_seed()) */ - #ifndef PERL_SEEN_HV_FUNC_H /* compile once */ #define PERL_SEEN_HV_FUNC_H - -#if IVSIZE == 8 -#define CAN64BITHASH -#endif - -/*----------------------------------------------------------------------------- - * Endianess, misalignment capabilities and util macros - * - * The following 3 macros are defined in this section. The other macros defined - * are only needed to help derive these 3. - * - * U8TO16_LE(x) Read a little endian unsigned 32-bit int - * U8TO32_LE(x) Read a little endian unsigned 32-bit int - * U8TO28_LE(x) Read a little endian unsigned 32-bit int - * ROTL32(x,r) Rotate x left by r bits - * ROTL64(x,r) Rotate x left by r bits - * ROTR32(x,r) Rotate x right by r bits - * ROTR64(x,r) Rotate x right by r bits - */ - -#ifndef U32_ALIGNMENT_REQUIRED - #if (BYTEORDER == 0x1234 || BYTEORDER == 0x12345678) - #define U8TO16_LE(ptr) (*((const U16*)(ptr))) - #define U8TO32_LE(ptr) (*((const U32*)(ptr))) - #define U8TO64_LE(ptr) (*((const U64*)(ptr))) - #elif (BYTEORDER == 0x4321 || BYTEORDER == 0x87654321) - #if defined(__GNUC__) && (__GNUC__>4 || (__GNUC__==4 && __GNUC_MINOR__>=3)) - #define U8TO16_LE(ptr) (__builtin_bswap16(*((U16*)(ptr)))) - #define U8TO32_LE(ptr) (__builtin_bswap32(*((U32*)(ptr)))) - #define U8TO64_LE(ptr) (__builtin_bswap64(*((U64*)(ptr)))) - #endif - #endif -#endif - -#ifndef U8TO16_LE - /* Without a known fast bswap32 we're just as well off doing this */ - #define U8TO16_LE(ptr) ((U32)(ptr)[0]|(U32)(ptr)[1]<<8) - #define U8TO32_LE(ptr) ((U32)(ptr)[0]|(U32)(ptr)[1]<<8|(U32)(ptr)[2]<<16|(U32)(ptr)[3]<<24) - #define U8TO64_LE(ptr) ((U64)(ptr)[0]|(U64)(ptr)[1]<<8|(U64)(ptr)[2]<<16|(U64)(ptr)[3]<<24|\ - (U64)(ptr)[4]<<32|(U64)(ptr)[5]<<40|\ - (U64)(ptr)[6]<<48|(U64)(ptr)[7]<<56) -#endif - -#ifdef CAN64BITHASH - #ifndef U64TYPE - /* This probably isn't going to work, but failing with a compiler error due to - lack of uint64_t is no worse than failing right now with an #error. */ - #define U64 uint64_t - #endif -#endif - -/* Find best way to ROTL32/ROTL64 */ -#if defined(_MSC_VER) - #include /* Microsoft put _rotl declaration in here */ - #define ROTL32(x,r) _rotl(x,r) - #define ROTR32(x,r) _rotr(x,r) - #define ROTL64(x,r) _rotl64(x,r) - #define ROTR64(x,r) _rotr64(x,r) -#else - /* gcc recognises this code and generates a rotate instruction for CPUs with one */ - #define ROTL32(x,r) (((U32)(x) << (r)) | ((U32)(x) >> (32 - (r)))) - #define ROTR32(x,r) (((U32)(x) << (32 - (r))) | ((U32)(x) >> (r))) - #define ROTL64(x,r) ( ( (U64)(x) << (r) ) | ( (U64)(x) >> ( 64 - (r) ) ) ) - #define ROTR64(x,r) ( ( (U64)(x) << ( 64 - (r) ) ) | ( (U64)(x) >> (r) ) ) -#endif - - -#ifdef UV_IS_QUAD -#define ROTL_UV(x,r) ROTL64(x,r) -#define ROTR_UV(x,r) ROTL64(x,r) -#else -#define ROTL_UV(x,r) ROTL32(x,r) -#define ROTR_UV(x,r) ROTR32(x,r) -#endif - -/*-----------------------------------------------------------------------------* - * And now set up the actual hashing macros - *-----------------------------------------------------------------------------*/ -#define PERL_HASH_FUNC_ZAPHOD32 +#include "hv_macro.h" #if !( 0 \ || defined(PERL_HASH_FUNC_SIPHASH) \ diff --git a/hv_macro.h b/hv_macro.h new file mode 100644 index 0000000..77a4c84 --- /dev/null +++ b/hv_macro.h @@ -0,0 +1,81 @@ +#ifndef PERL_SEEN_HV_MACRO_H /* compile once */ +#define PERL_SEEN_HV_MACRO_H + +#if IVSIZE == 8 +#define CAN64BITHASH +#endif + +/*----------------------------------------------------------------------------- + * Endianess, misalignment capabilities and util macros + * + * The following 3 macros are defined in this section. The other macros defined + * are only needed to help derive these 3. + * + * U8TO16_LE(x) Read a little endian unsigned 32-bit int + * U8TO32_LE(x) Read a little endian unsigned 32-bit int + * U8TO28_LE(x) Read a little endian unsigned 32-bit int + * ROTL32(x,r) Rotate x left by r bits + * ROTL64(x,r) Rotate x left by r bits + * ROTR32(x,r) Rotate x right by r bits + * ROTR64(x,r) Rotate x right by r bits + */ + +#ifndef U32_ALIGNMENT_REQUIRED + #if (BYTEORDER == 0x1234 || BYTEORDER == 0x12345678) + #define U8TO16_LE(ptr) (*((const U16*)(ptr))) + #define U8TO32_LE(ptr) (*((const U32*)(ptr))) + #define U8TO64_LE(ptr) (*((const U64*)(ptr))) + #elif (BYTEORDER == 0x4321 || BYTEORDER == 0x87654321) + #if defined(__GNUC__) && (__GNUC__>4 || (__GNUC__==4 && __GNUC_MINOR__>=3)) + #define U8TO16_LE(ptr) (__builtin_bswap16(*((U16*)(ptr)))) + #define U8TO32_LE(ptr) (__builtin_bswap32(*((U32*)(ptr)))) + #define U8TO64_LE(ptr) (__builtin_bswap64(*((U64*)(ptr)))) + #endif + #endif +#endif + +#ifndef U8TO16_LE + /* Without a known fast bswap32 we're just as well off doing this */ + #define U8TO16_LE(ptr) ((U32)(ptr)[0]|(U32)(ptr)[1]<<8) + #define U8TO32_LE(ptr) ((U32)(ptr)[0]|(U32)(ptr)[1]<<8|(U32)(ptr)[2]<<16|(U32)(ptr)[3]<<24) + #define U8TO64_LE(ptr) ((U64)(ptr)[0]|(U64)(ptr)[1]<<8|(U64)(ptr)[2]<<16|(U64)(ptr)[3]<<24|\ + (U64)(ptr)[4]<<32|(U64)(ptr)[5]<<40|\ + (U64)(ptr)[6]<<48|(U64)(ptr)[7]<<56) +#endif + +#ifdef CAN64BITHASH + #ifndef U64TYPE + /* This probably isn't going to work, but failing with a compiler error due to + lack of uint64_t is no worse than failing right now with an #error. */ + #define U64 uint64_t + #endif +#endif + +/* Find best way to ROTL32/ROTL64 */ +#if defined(_MSC_VER) + #include /* Microsoft put _rotl declaration in here */ + #define ROTL32(x,r) _rotl(x,r) + #define ROTR32(x,r) _rotr(x,r) + #define ROTL64(x,r) _rotl64(x,r) + #define ROTR64(x,r) _rotr64(x,r) +#else + /* gcc recognises this code and generates a rotate instruction for CPUs with one */ + #define ROTL32(x,r) (((U32)(x) << (r)) | ((U32)(x) >> (32 - (r)))) + #define ROTR32(x,r) (((U32)(x) << (32 - (r))) | ((U32)(x) >> (r))) + #define ROTL64(x,r) ( ( (U64)(x) << (r) ) | ( (U64)(x) >> ( 64 - (r) ) ) ) + #define ROTR64(x,r) ( ( (U64)(x) << ( 64 - (r) ) ) | ( (U64)(x) >> (r) ) ) +#endif + + +#ifdef UV_IS_QUAD +#define ROTL_UV(x,r) ROTL64(x,r) +#define ROTR_UV(x,r) ROTL64(x,r) +#else +#define ROTL_UV(x,r) ROTL32(x,r) +#define ROTR_UV(x,r) ROTR32(x,r) +#endif +#if IVSIZE == 8 +#define CAN64BITHASH +#endif + +#endif -- 1.8.3.1