From 40089428c5fdd72f26bd8749a70b770537fe7043 Mon Sep 17 00:00:00 2001 From: Jay D Dee Date: Mon, 8 Mar 2021 22:44:44 -0500 Subject: [PATCH] v3.15.7 --- INSTALL_WINDOWS | 141 +++- RELEASE_NOTES | 8 + algo/echo/aes_ni/hash.c | 4 +- algo/echo/echo-hash-4way.c | 17 +- algo/fugue/fugue-aesni.h | 6 +- algo/groestl/groestl256-hash-4way.c | 54 +- algo/groestl/groestl512-hash-4way.c | 20 +- algo/hamsi/hamsi-hash-4way.c | 18 +- algo/keccak/keccak-macros.c | 48 +- algo/luffa/luffa-hash-2way.c | 72 +- algo/luffa/luffa_for_sse2.c | 12 +- algo/sha/sph_sha2.c | 2 + algo/shavite/shavite-hash-2way.c | 27 +- algo/shavite/shavite-hash-4way.c | 10 +- algo/swifftx/stdbool.h | 47 -- algo/swifftx/swifftx.c.bak | 1155 --------------------------- configure | 20 +- configure.ac | 2 +- cpu-miner.c | 130 +-- miner.h | 5 +- simd-utils.h | 2 +- simd-utils/simd-128.h | 160 ++-- simd-utils/simd-256.h | 359 ++------- simd-utils/simd-512.h | 283 +++---- simd-utils/simd-64.h | 83 +- simd-utils/simd-int.h | 74 +- 26 files changed, 598 insertions(+), 2161 deletions(-) delete mode 100644 algo/swifftx/stdbool.h delete mode 100644 algo/swifftx/swifftx.c.bak diff --git a/INSTALL_WINDOWS b/INSTALL_WINDOWS index f2e2c80a..02a829ed 100644 --- a/INSTALL_WINDOWS +++ b/INSTALL_WINDOWS @@ -1,5 +1,9 @@ Instructions for compiling cpuminer-opt for Windows. +Thwaw intructions nay be out of date. Please consult the wiki for +the latest: + +https://github.com/JayDDee/cpuminer-opt/wiki/Compiling-from-source Windows compilation using Visual Studio is not supported. Mingw64 is used on a Linux system (bare metal or virtual machine) to cross-compile @@ -24,79 +28,76 @@ Refer to Linux compile instructions and install required packages. Additionally, install mingw-w64. -sudo apt-get install mingw-w64 +sudo apt-get install mingw-w64 libz-mingw-w64-dev 2. Create a local library directory for packages to be compiled in the next step. Suggested location is $HOME/usr/lib/ +$ mkdir $HOME/usr/lib + 3. Download and build other packages for mingw that don't have a mingw64 version available in the repositories. Download the following source code packages from their respective and respected download locations, copy them to ~/usr/lib/ and uncompress them. -openssl -curl -gmp +openssl: https://github.com/openssl/openssl/releases + +curl: https://github.com/curl/curl/releases + +gmp: https://gmplib.org/download/gmp/ -In most cases the latest vesrion is ok but it's safest to download -the same major and minor version as included in your distribution. +In most cases the latest version is ok but it's safest to download the same major and minor version as included in your distribution. The following uses versions from Ubuntu 20.04. Change version numbers as required. -Run the following commands or follow the supplied instructions. -Do not run "make install" unless you are using ~/usr/lib, which isn't -recommended. +Run the following commands or follow the supplied instructions. Do not run "make install" unless you are using /usr/lib, which isn't recommended. -Some instructions insist on running "make check". If make check fails -it may still work, YMMV. +Some instructions insist on running "make check". If make check fails it may still work, YMMV. -You can speed up "make" by using all CPU cores available with "-j n" where -n is the number of CPU threads you want to use. +You can speed up "make" by using all CPU cores available with "-j n" where n is the number of CPU threads you want to use. openssl: -./Configure mingw64 shared --cross-compile-prefix=x86_64-w64-mingw32 -make +$ ./Configure mingw64 shared --cross-compile-prefix=x86_64-w64-mingw32- +$ make + +Make may fail with an ld error, just ensure libcrypto-1_1-x64.dll is created. curl: -./configure --with-winssl --with-winidn --host=x86_64-w64-mingw32 -make +$ ./configure --with-winssl --with-winidn --host=x86_64-w64-mingw32 +$ make gmp: -./configure --host=x86_64-w64-mingw32 -make - - +$ ./configure --host=x86_64-w64-mingw32 +$ make 4. Tweak the environment. -This step is required everytime you login or the commands can be added to -.bashrc. +This step is required everytime you login or the commands can be added to .bashrc. -Define some local variables to point to local library. +Define some local variables to point to local library. -export LOCAL_LIB="$HOME/usr/lib" +$ export LOCAL_LIB="$HOME/usr/lib" -export LDFLAGS="-L$LOCAL_LIB/curl/lib/.libs -L$LOCAL_LIB/gmp/.libs -L$LOCAL_LIB/openssl" +$ export LDFLAGS="-L$LOCAL_LIB/curl/lib/.libs -L$LOCAL_LIB/gmp/.libs -L$LOCAL_LIB/openssl" -export CONFIGURE_ARGS="--with-curl=$LOCAL_LIB/curl --with-crypto=$LOCAL_LIB/openssl --host=x86_64-w64-mingw32" +$ export CONFIGURE_ARGS="--with-curl=$LOCAL_LIB/curl --with-crypto=$LOCAL_LIB/openssl --host=x86_64-w64-mingw32" -Create a release directory and copy some dll files previously built. -This can be done outside of cpuminer-opt and only needs to be done once. -If the release directory is in cpuminer-opt directory it needs to be -recreated every a source package is decompressed. +Adjust for gcc version: -mkdir release -cp /usr/x86_64-w64-mingw32/lib/zlib1.dll release/ -cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll release/ -cp /usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/libstdc++-6.dll release/ -cp /usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/libgcc_s_seh-1.dll release/ -cp $LOCAL_LIB/openssl/libcrypto-1_1-x64.dll release/ -cp $LOCAL_LIB/curl/lib/.libs/libcurl-4.dll release/ +$ export GCC_MINGW_LIB="/usr/lib/gcc/x86_64-w64-mingw32/9.3-win32" +Create a release directory and copy some dll files previously built. This can be done outside of cpuminer-opt and only needs to be done once. If the release directory is in cpuminer-opt directory it needs to be recreated every time a source package is decompressed. +$ mkdir release +$ cp /usr/x86_64-w64-mingw32/lib/zlib1.dll release/ +$ cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll release/ +$ cp $GCC_MINGW_LIB/libstdc++-6.dll release/ +$ cp $GCC_MINGW_LIB/libgcc_s_seh-1.dll release/ +$ cp $LOCAL_LIB/openssl/libcrypto-1_1-x64.dll release/ +$ cp $LOCAL_LIB/curl/lib/.libs/libcurl-4.dll release/ The following steps need to be done every time a new source package is opened. @@ -110,13 +111,73 @@ https://github.com/JayDDee/cpuminer-opt/releases Decompress and change to the cpuminer-opt directory. +6. compile + +Create a link to the locally compiled version of gmp.h + +$ ln -s $LOCAL_LIB/gmp-version/gmp.h ./gmp.h + +$ ./autogen.sh + +Configure the compiler for the CPU architecture of the host machine: + +CFLAGS="-O3 -march=native -Wall" ./configure $CONFIGURE_ARGS + +or cross compile for a specific CPU architecture: + +CFLAGS="-O3 -march=znver1 -Wall" ./configure $CONFIGURE_ARGS + +This will compile for AMD Ryzen. + +You can compile more generically for a set of specific CPU features if you know what features you want: + +CFLAGS="-O3 -maes -msse4.2 -Wall" ./configure $CONFIGURE_ARGS + +This will compile for an older CPU that does not have AVX. + +You can find several examples in README.txt + +If you have a CPU with more than 64 threads and Windows 7 or higher you can enable the CPU Groups feature by adding the following to CFLAGS: + +"-D_WIN32_WINNT=0x0601" + +Once you have run configure successfully run the compiler with n CPU threads: + +$ make -j n + +Copy cpuminer.exe to the release directory, compress and copy the release directory to a Windows system and run cpuminer.exe from the command line. + +Run cpuminer + +In a command windows change directories to the unzipped release folder. to get a list of all options: + +cpuminer.exe --help + +Command options are specific to where you mine. Refer to the pool's instructions on how to set them. + + + + + + + + + + + + + + + + + + -6. Prepare to compile Create a link to the locally compiled version of gmp.h -ln -s $LOCAL_LIB/gmp-version/gmp.h ./gmp.h +$ ln -s $LOCAL_LIB/gmp-version/gmp.h ./gmp.h Edit configure.ac to fix lipthread package name. diff --git a/RELEASE_NOTES b/RELEASE_NOTES index 9415c721..732b5e64 100644 --- a/RELEASE_NOTES +++ b/RELEASE_NOTES @@ -65,6 +65,14 @@ If not what makes it happen or not happen? Change Log ---------- +v3.15.7 + +Added accepted/stale/rejected percentage to summary log report. +Added warning if share counters mismatch which could corrupt stats. +Linux: CPU temperature reporting is more responsive to rising temperature. +A few AVX2 & AVX512 tweaks. +Removed some dead code and other cleanup. + v3.15.6 Implement keccak pre-hash optimization for x16* algos. diff --git a/algo/echo/aes_ni/hash.c b/algo/echo/aes_ni/hash.c index 55b27c60..a4e3958c 100644 --- a/algo/echo/aes_ni/hash.c +++ b/algo/echo/aes_ni/hash.c @@ -55,8 +55,8 @@ MYALIGN const unsigned int mul2ipt[] = {0x728efc00, 0x6894e61a, 0x3fc3b14d, 0x2 #define ECHO_SUBBYTES(state, i, j) \ state[i][j] = _mm_aesenc_si128(state[i][j], k1);\ - state[i][j] = _mm_aesenc_si128(state[i][j], M128(zero));\ - k1 = _mm_add_epi32(k1, M128(const1)) + k1 = _mm_add_epi32(k1, M128(const1));\ + state[i][j] = _mm_aesenc_si128(state[i][j], M128(zero)) #define ECHO_MIXBYTES(state1, state2, j, t1, t2, s2) \ s2 = _mm_add_epi8(state1[0][j], state1[0][j]);\ diff --git a/algo/echo/echo-hash-4way.c b/algo/echo/echo-hash-4way.c index eb3c41c2..51a9f0a8 100644 --- a/algo/echo/echo-hash-4way.c +++ b/algo/echo/echo-hash-4way.c @@ -10,22 +10,20 @@ static const unsigned int mul2ipt[] __attribute__ ((aligned (64))) = 0xfd5ba600, 0x2a8c71d7, 0x1eb845e3, 0xc96f9234 }; */ -// do these need to be reversed? #if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) -#define mul2mask \ - m512_const2_64( 0, 0x00001b00 ) +//#define mul2mask m512_const2_64( 0, 0x00001b00 ) //_mm512_set4_epi32( 0, 0, 0, 0x00001b00 ) -// _mm512_set4_epi32( 0x00001b00, 0, 0, 0 ) +//_mm512_set4_epi32( 0x00001b00, 0, 0, 0 ) -#define lsbmask m512_const1_32( 0x01010101 ) +//#define lsbmask m512_const1_32( 0x01010101 ) #define ECHO_SUBBYTES( state, i, j ) \ state[i][j] = _mm512_aesenc_epi128( state[i][j], k1 ); \ - state[i][j] = _mm512_aesenc_epi128( state[i][j], m512_zero ); \ - k1 = _mm512_add_epi32( k1, m512_one_128 ); + k1 = _mm512_add_epi32( k1, one ); \ + state[i][j] = _mm512_aesenc_epi128( state[i][j], m512_zero ); #define ECHO_MIXBYTES( state1, state2, j, t1, t2, s2 ) do \ { \ @@ -140,6 +138,9 @@ void echo_4way_compress( echo_4way_context *ctx, const __m512i *pmsg, unsigned int r, b, i, j; __m512i t1, t2, s2, k1; __m512i _state[4][4], _state2[4][4], _statebackup[4][4]; + __m512i one = m512_one_128; + __m512i mul2mask = m512_const2_64( 0, 0x00001b00 ); + __m512i lsbmask = m512_const1_32( 0x01010101 ); _state[ 0 ][ 0 ] = ctx->state[ 0 ][ 0 ]; _state[ 0 ][ 1 ] = ctx->state[ 0 ][ 1 ]; @@ -406,8 +407,8 @@ int echo_4way_full( echo_4way_context *ctx, void *hashval, int nHashSize, #define ECHO_SUBBYTES_2WAY( state, i, j ) \ state[i][j] = _mm256_aesenc_epi128( state[i][j], k1 ); \ + k1 = _mm256_add_epi32( k1, m256_one_128 ); \ state[i][j] = _mm256_aesenc_epi128( state[i][j], m256_zero ); \ - k1 = _mm256_add_epi32( k1, m256_one_128 ); #define ECHO_MIXBYTES_2WAY( state1, state2, j, t1, t2, s2 ) do \ { \ diff --git a/algo/fugue/fugue-aesni.h b/algo/fugue/fugue-aesni.h index be9806f4..d1536641 100644 --- a/algo/fugue/fugue-aesni.h +++ b/algo/fugue/fugue-aesni.h @@ -14,7 +14,11 @@ #ifndef FUGUE_HASH_API_H #define FUGUE_HASH_API_H -#if defined(__AES__) +#if defined(__AES__) + +#if !defined(__SSE4_1__) +#error "Unsupported configuration, AES needs SSE4.1. Compile without AES." +#endif #include "algo/sha/sha3_common.h" #include "simd-utils.h" diff --git a/algo/groestl/groestl256-hash-4way.c b/algo/groestl/groestl256-hash-4way.c index dd82a867..adbdf664 100644 --- a/algo/groestl/groestl256-hash-4way.c +++ b/algo/groestl/groestl256-hash-4way.c @@ -51,7 +51,7 @@ int groestl256_4way_full( groestl256_4way_context* ctx, void* output, const int hashlen_m128i = 32 >> 4; // bytes to __m128i const int hash_offset = SIZE256 - hashlen_m128i; int rem = ctx->rem_ptr; - int blocks = len / SIZE256; + uint64_t blocks = len / SIZE256; __m512i* in = (__m512i*)input; int i; @@ -89,21 +89,21 @@ int groestl256_4way_full( groestl256_4way_context* ctx, void* output, if ( i == SIZE256 - 1 ) { // only 1 vector left in buffer, all padding at once - ctx->buffer[i] = m512_const2_64( (uint64_t)blocks << 56, 0x80 ); + ctx->buffer[i] = m512_const2_64( blocks << 56, 0x80 ); } else { // add first padding - ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 ); + ctx->buffer[i] = m512_const2_64( 0, 0x80 ); // add zero padding for ( i += 1; i < SIZE256 - 1; i++ ) ctx->buffer[i] = m512_zero; // add length padding, second last byte is zero unless blocks > 255 - ctx->buffer[i] = m512_const2_64( (uint64_t)blocks << 56, 0 ); + ctx->buffer[i] = m512_const2_64( blocks << 56, 0 ); } -// digest final padding block and do output transform + // digest final padding block and do output transform TF512_4way( ctx->chaining, ctx->buffer ); OF512_4way( ctx->chaining ); @@ -122,7 +122,7 @@ int groestl256_4way_update_close( groestl256_4way_context* ctx, void* output, const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i const int hash_offset = SIZE256 - hashlen_m128i; int rem = ctx->rem_ptr; - int blocks = len / SIZE256; + uint64_t blocks = len / SIZE256; __m512i* in = (__m512i*)input; int i; @@ -146,20 +146,18 @@ int groestl256_4way_update_close( groestl256_4way_context* ctx, void* output, if ( i == SIZE256 - 1 ) { // only 1 vector left in buffer, all padding at once - ctx->buffer[i] = m512_const1_128( _mm_set_epi8( - blocks, blocks>>8,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x80 ) ); + ctx->buffer[i] = m512_const2_64( blocks << 56, 0x80 ); } else { // add first padding - ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 ); + ctx->buffer[i] = m512_const2_64( 0, 0x80 ); // add zero padding for ( i += 1; i < SIZE256 - 1; i++ ) ctx->buffer[i] = m512_zero; // add length padding, second last byte is zero unless blocks > 255 - ctx->buffer[i] = m512_const1_128( _mm_set_epi8( - blocks, blocks>>8, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 ) ); + ctx->buffer[i] = m512_const2_64( blocks << 56, 0 ); } // digest final padding block and do output transform @@ -209,23 +207,23 @@ int groestl256_2way_full( groestl256_2way_context* ctx, void* output, const int hashlen_m128i = 32 >> 4; // bytes to __m128i const int hash_offset = SIZE256 - hashlen_m128i; int rem = ctx->rem_ptr; - int blocks = len / SIZE256; + uint64_t blocks = len / SIZE256; __m256i* in = (__m256i*)input; int i; - if (ctx->chaining == NULL || ctx->buffer == NULL) - return 1; + if (ctx->chaining == NULL || ctx->buffer == NULL) + return 1; - for ( i = 0; i < SIZE256; i++ ) - { + for ( i = 0; i < SIZE256; i++ ) + { ctx->chaining[i] = m256_zero; ctx->buffer[i] = m256_zero; - } + } - // The only non-zero in the IV is len. It can be hard coded. - ctx->chaining[ 3 ] = m256_const2_64( 0, 0x0100000000000000 ); - ctx->buf_ptr = 0; - ctx->rem_ptr = 0; + // The only non-zero in the IV is len. It can be hard coded. + ctx->chaining[ 3 ] = m256_const2_64( 0, 0x0100000000000000 ); + ctx->buf_ptr = 0; + ctx->rem_ptr = 0; // --- update --- @@ -247,7 +245,7 @@ int groestl256_2way_full( groestl256_2way_context* ctx, void* output, if ( i == SIZE256 - 1 ) { // only 1 vector left in buffer, all padding at once - ctx->buffer[i] = m256_const2_64( (uint64_t)blocks << 56, 0x80 ); + ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 ); } else { @@ -258,10 +256,10 @@ int groestl256_2way_full( groestl256_2way_context* ctx, void* output, ctx->buffer[i] = m256_zero; // add length padding, second last byte is zero unless blocks > 255 - ctx->buffer[i] = m256_const2_64( (uint64_t)blocks << 56, 0 ); + ctx->buffer[i] = m256_const2_64( blocks << 56, 0 ); } -// digest final padding block and do output transform + // digest final padding block and do output transform TF512_2way( ctx->chaining, ctx->buffer ); OF512_2way( ctx->chaining ); @@ -279,7 +277,7 @@ int groestl256_2way_update_close( groestl256_2way_context* ctx, void* output, const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i const int hash_offset = SIZE256 - hashlen_m128i; int rem = ctx->rem_ptr; - int blocks = len / SIZE256; + uint64_t blocks = len / SIZE256; __m256i* in = (__m256i*)input; int i; @@ -303,8 +301,7 @@ int groestl256_2way_update_close( groestl256_2way_context* ctx, void* output, if ( i == SIZE256 - 1 ) { // only 1 vector left in buffer, all padding at once - ctx->buffer[i] = m256_const1_128( _mm_set_epi8( - blocks, blocks>>8,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x80 ) ); + ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 ); } else { @@ -315,8 +312,7 @@ int groestl256_2way_update_close( groestl256_2way_context* ctx, void* output, ctx->buffer[i] = m256_zero; // add length padding, second last byte is zero unless blocks > 255 - ctx->buffer[i] = m256_const1_128( _mm_set_epi8( - blocks, blocks>>8, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 ) ); + ctx->buffer[i] = m256_const2_64( blocks << 56, 0 ); } // digest final padding block and do output transform diff --git a/algo/groestl/groestl512-hash-4way.c b/algo/groestl/groestl512-hash-4way.c index bff6af53..b7547339 100644 --- a/algo/groestl/groestl512-hash-4way.c +++ b/algo/groestl/groestl512-hash-4way.c @@ -43,7 +43,7 @@ int groestl512_4way_update_close( groestl512_4way_context* ctx, void* output, const int hashlen_m128i = 64 / 16; // bytes to __m128i const int hash_offset = SIZE512 - hashlen_m128i; int rem = ctx->rem_ptr; - int blocks = len / SIZE512; + uint64_t blocks = len / SIZE512; __m512i* in = (__m512i*)input; int i; @@ -64,16 +64,14 @@ int groestl512_4way_update_close( groestl512_4way_context* ctx, void* output, if ( i == SIZE512 - 1 ) { // only 1 vector left in buffer, all padding at once - ctx->buffer[i] = m512_const1_128( _mm_set_epi8( - blocks, blocks>>8,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x80 ) ); + ctx->buffer[i] = m512_const2_64( blocks << 56, 0x80 ); } else { - ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 ); + ctx->buffer[i] = m512_const2_64( 0, 0x80 ); for ( i += 1; i < SIZE512 - 1; i++ ) ctx->buffer[i] = m512_zero; - ctx->buffer[i] = m512_const1_128( _mm_set_epi8( - blocks, blocks>>8, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 ) ); + ctx->buffer[i] = m512_const2_64( blocks << 56, 0 ); } TF1024_4way( ctx->chaining, ctx->buffer ); @@ -124,7 +122,7 @@ int groestl512_4way_full( groestl512_4way_context* ctx, void* output, } else { - ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 ); + ctx->buffer[i] = m512_const2_64( 0, 0x80 ); for ( i += 1; i < SIZE512 - 1; i++ ) ctx->buffer[i] = m512_zero; ctx->buffer[i] = m512_const2_64( blocks << 56, 0 ); @@ -168,7 +166,7 @@ int groestl512_2way_update_close( groestl512_2way_context* ctx, void* output, const int hashlen_m128i = 64 / 16; // bytes to __m128i const int hash_offset = SIZE512 - hashlen_m128i; int rem = ctx->rem_ptr; - int blocks = len / SIZE512; + uint64_t blocks = len / SIZE512; __m256i* in = (__m256i*)input; int i; @@ -189,16 +187,14 @@ int groestl512_2way_update_close( groestl512_2way_context* ctx, void* output, if ( i == SIZE512 - 1 ) { // only 1 vector left in buffer, all padding at once - ctx->buffer[i] = m256_const1_128( _mm_set_epi8( - blocks, blocks>>8,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x80 ) ); + ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 ); } else { ctx->buffer[i] = m256_const2_64( 0, 0x80 ); for ( i += 1; i < SIZE512 - 1; i++ ) ctx->buffer[i] = m256_zero; - ctx->buffer[i] = m256_const1_128( _mm_set_epi8( - blocks, blocks>>8, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 ) ); + ctx->buffer[i] = m256_const2_64( blocks << 56, 0 ); } TF1024_2way( ctx->chaining, ctx->buffer ); diff --git a/algo/hamsi/hamsi-hash-4way.c b/algo/hamsi/hamsi-hash-4way.c index d86bd42d..2a952a73 100644 --- a/algo/hamsi/hamsi-hash-4way.c +++ b/algo/hamsi/hamsi-hash-4way.c @@ -548,7 +548,7 @@ static const sph_u32 T512[64][16] = { #if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) -// Hamsi 8 way +// Hamsi 8 way AVX512 #define INPUT_BIG8 \ do { \ @@ -849,13 +849,11 @@ void hamsi512_8way_update( hamsi_8way_big_context *sc, const void *data, void hamsi512_8way_close( hamsi_8way_big_context *sc, void *dst ) { __m512i pad[1]; - int ch, cl; + uint32_t ch, cl; sph_enc32be( &ch, sc->count_high ); sph_enc32be( &cl, sc->count_low + ( sc->partial_len << 3 ) ); - pad[0] = _mm512_set_epi32( cl, ch, cl, ch, cl, ch, cl, ch, - cl, ch, cl, ch, cl, ch, cl, ch ); -// pad[0] = m512_const2_32( cl, ch ); + pad[0] = _mm512_set1_epi64( ((uint64_t)cl << 32 ) | (uint64_t)ch ); sc->buf[0] = m512_const1_64( 0x80 ); hamsi_8way_big( sc, sc->buf, 1 ); hamsi_8way_big_final( sc, pad ); @@ -863,11 +861,9 @@ void hamsi512_8way_close( hamsi_8way_big_context *sc, void *dst ) mm512_block_bswap_32( (__m512i*)dst, sc->h ); } - #endif // AVX512 - -// Hamsi 4 way +// Hamsi 4 way AVX2 #define INPUT_BIG \ do { \ @@ -1186,14 +1182,12 @@ void hamsi512_4way_update( hamsi_4way_big_context *sc, const void *data, void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst ) { __m256i pad[1]; - int ch, cl; + uint32_t ch, cl; sph_enc32be( &ch, sc->count_high ); sph_enc32be( &cl, sc->count_low + ( sc->partial_len << 3 ) ); - pad[0] = _mm256_set_epi32( cl, ch, cl, ch, cl, ch, cl, ch ); + pad[0] = _mm256_set1_epi64x( ((uint64_t)cl << 32 ) | (uint64_t)ch ); sc->buf[0] = m256_const1_64( 0x80 ); -// sc->buf[0] = _mm256_set_epi32( 0UL, 0x80UL, 0UL, 0x80UL, -// 0UL, 0x80UL, 0UL, 0x80UL ); hamsi_big( sc, sc->buf, 1 ); hamsi_big_final( sc, pad ); diff --git a/algo/keccak/keccak-macros.c b/algo/keccak/keccak-macros.c index 9666e7d8..8d5197c3 100644 --- a/algo/keccak/keccak-macros.c +++ b/algo/keccak/keccak-macros.c @@ -134,65 +134,47 @@ do { \ DECL64(c0); \ DECL64(c1); \ - DECL64(c2); \ - DECL64(c3); \ - DECL64(c4); \ DECL64(bnn); \ NOT64(bnn, b20); \ KHI_XO(c0, b00, b10, b20); \ KHI_XO(c1, b10, bnn, b30); \ - KHI_XA(c2, b20, b30, b40); \ - KHI_XO(c3, b30, b40, b00); \ - KHI_XA(c4, b40, b00, b10); \ + KHI_XA(b20, b20, b30, b40); \ + KHI_XO(b30, b30, b40, b00); \ + KHI_XA(b40, b40, b00, b10); \ MOV64(b00, c0); \ MOV64(b10, c1); \ - MOV64(b20, c2); \ - MOV64(b30, c3); \ - MOV64(b40, c4); \ NOT64(bnn, b41); \ KHI_XO(c0, b01, b11, b21); \ KHI_XA(c1, b11, b21, b31); \ - KHI_XO(c2, b21, b31, bnn); \ - KHI_XO(c3, b31, b41, b01); \ - KHI_XA(c4, b41, b01, b11); \ + KHI_XO(b21, b21, b31, bnn); \ + KHI_XO(b31, b31, b41, b01); \ + KHI_XA(b41, b41, b01, b11); \ MOV64(b01, c0); \ MOV64(b11, c1); \ - MOV64(b21, c2); \ - MOV64(b31, c3); \ - MOV64(b41, c4); \ NOT64(bnn, b32); \ KHI_XO(c0, b02, b12, b22); \ KHI_XA(c1, b12, b22, b32); \ - KHI_XA(c2, b22, bnn, b42); \ - KHI_XO(c3, bnn, b42, b02); \ - KHI_XA(c4, b42, b02, b12); \ + KHI_XA(b22, b22, bnn, b42); \ + KHI_XO(b32, bnn, b42, b02); \ + KHI_XA(b42, b42, b02, b12); \ MOV64(b02, c0); \ MOV64(b12, c1); \ - MOV64(b22, c2); \ - MOV64(b32, c3); \ - MOV64(b42, c4); \ NOT64(bnn, b33); \ KHI_XA(c0, b03, b13, b23); \ KHI_XO(c1, b13, b23, b33); \ - KHI_XO(c2, b23, bnn, b43); \ - KHI_XA(c3, bnn, b43, b03); \ - KHI_XO(c4, b43, b03, b13); \ + KHI_XO(b23, b23, bnn, b43); \ + KHI_XA(b33, bnn, b43, b03); \ + KHI_XO(b43, b43, b03, b13); \ MOV64(b03, c0); \ MOV64(b13, c1); \ - MOV64(b23, c2); \ - MOV64(b33, c3); \ - MOV64(b43, c4); \ NOT64(bnn, b14); \ KHI_XA(c0, b04, bnn, b24); \ KHI_XO(c1, bnn, b24, b34); \ - KHI_XA(c2, b24, b34, b44); \ - KHI_XO(c3, b34, b44, b04); \ - KHI_XA(c4, b44, b04, b14); \ + KHI_XA(b24, b24, b34, b44); \ + KHI_XO(b34, b34, b44, b04); \ + KHI_XA(b44, b44, b04, b14); \ MOV64(b04, c0); \ MOV64(b14, c1); \ - MOV64(b24, c2); \ - MOV64(b34, c3); \ - MOV64(b44, c4); \ } while (0) #ifdef IOTA diff --git a/algo/luffa/luffa-hash-2way.c b/algo/luffa/luffa-hash-2way.c index aad56b63..bbc31b9b 100644 --- a/algo/luffa/luffa-hash-2way.c +++ b/algo/luffa/luffa-hash-2way.c @@ -66,6 +66,17 @@ static const uint32 CNS_INIT[128] __attribute((aligned(64))) = { a = _mm512_xor_si512(a,c0);\ b = _mm512_xor_si512(b,c1); +#define MULT24W( a0, a1 ) \ +do { \ + __m512i b = _mm512_xor_si512( a0, \ + _mm512_maskz_shuffle_epi32( 0xbbbb, a1, 16 ) ); \ + a0 = _mm512_or_si512( _mm512_bsrli_epi128( b, 4 ), \ + _mm512_bslli_epi128( a1,12 ) ); \ + a1 = _mm512_or_si512( _mm512_bsrli_epi128( a1, 4 ), \ + _mm512_bslli_epi128( b,12 ) ); \ +} while(0) + +/* #define MULT24W( a0, a1, mask ) \ do { \ __m512i b = _mm512_xor_si512( a0, \ @@ -73,6 +84,7 @@ do { \ a0 = _mm512_or_si512( _mm512_bsrli_epi128(b,4), _mm512_bslli_epi128(a1,12) );\ a1 = _mm512_or_si512( _mm512_bsrli_epi128(a1,4), _mm512_bslli_epi128(b,12) );\ } while(0) +*/ // confirm pointer arithmetic // ok but use array indexes @@ -235,7 +247,6 @@ void rnd512_4way( luffa_4way_context *state, __m512i *msg ) __m512i msg0, msg1; __m512i tmp[2]; __m512i x[8]; - const __m512i MASK = m512_const2_64( 0, 0x00000000ffffffff ); t0 = chainv[0]; t1 = chainv[1]; @@ -249,7 +260,7 @@ void rnd512_4way( luffa_4way_context *state, __m512i *msg ) t0 = _mm512_xor_si512( t0, chainv[8] ); t1 = _mm512_xor_si512( t1, chainv[9] ); - MULT24W( t0, t1, MASK ); + MULT24W( t0, t1 ); msg0 = _mm512_shuffle_epi32( msg[0], 27 ); msg1 = _mm512_shuffle_epi32( msg[1], 27 ); @@ -268,68 +279,67 @@ void rnd512_4way( luffa_4way_context *state, __m512i *msg ) t0 = chainv[0]; t1 = chainv[1]; - MULT24W( chainv[0], chainv[1], MASK ); + MULT24W( chainv[0], chainv[1] ); chainv[0] = _mm512_xor_si512( chainv[0], chainv[2] ); chainv[1] = _mm512_xor_si512( chainv[1], chainv[3] ); - MULT24W( chainv[2], chainv[3], MASK ); + MULT24W( chainv[2], chainv[3] ); chainv[2] = _mm512_xor_si512(chainv[2], chainv[4]); chainv[3] = _mm512_xor_si512(chainv[3], chainv[5]); - MULT24W( chainv[4], chainv[5], MASK ); + MULT24W( chainv[4], chainv[5] ); chainv[4] = _mm512_xor_si512(chainv[4], chainv[6]); chainv[5] = _mm512_xor_si512(chainv[5], chainv[7]); - MULT24W( chainv[6], chainv[7], MASK ); + MULT24W( chainv[6], chainv[7] ); chainv[6] = _mm512_xor_si512(chainv[6], chainv[8]); chainv[7] = _mm512_xor_si512(chainv[7], chainv[9]); - MULT24W( chainv[8], chainv[9], MASK ); + MULT24W( chainv[8], chainv[9] ); chainv[8] = _mm512_xor_si512( chainv[8], t0 ); chainv[9] = _mm512_xor_si512( chainv[9], t1 ); t0 = chainv[8]; t1 = chainv[9]; - MULT24W( chainv[8], chainv[9], MASK ); + MULT24W( chainv[8], chainv[9] ); chainv[8] = _mm512_xor_si512( chainv[8], chainv[6] ); chainv[9] = _mm512_xor_si512( chainv[9], chainv[7] ); - MULT24W( chainv[6], chainv[7], MASK ); + MULT24W( chainv[6], chainv[7] ); chainv[6] = _mm512_xor_si512( chainv[6], chainv[4] ); chainv[7] = _mm512_xor_si512( chainv[7], chainv[5] ); - MULT24W( chainv[4], chainv[5], MASK ); + MULT24W( chainv[4], chainv[5] ); chainv[4] = _mm512_xor_si512( chainv[4], chainv[2] ); chainv[5] = _mm512_xor_si512( chainv[5], chainv[3] ); - MULT24W( chainv[2], chainv[3], MASK ); + MULT24W( chainv[2], chainv[3] ); chainv[2] = _mm512_xor_si512( chainv[2], chainv[0] ); chainv[3] = _mm512_xor_si512( chainv[3], chainv[1] ); - MULT24W( chainv[0], chainv[1], MASK ); + MULT24W( chainv[0], chainv[1] ); chainv[0] = _mm512_xor_si512( _mm512_xor_si512( chainv[0], t0 ), msg0 ); chainv[1] = _mm512_xor_si512( _mm512_xor_si512( chainv[1], t1 ), msg1 ); - MULT24W( msg0, msg1, MASK ); + MULT24W( msg0, msg1 ); chainv[2] = _mm512_xor_si512( chainv[2], msg0 ); chainv[3] = _mm512_xor_si512( chainv[3], msg1 ); - MULT24W( msg0, msg1, MASK ); + MULT24W( msg0, msg1 ); chainv[4] = _mm512_xor_si512( chainv[4], msg0 ); chainv[5] = _mm512_xor_si512( chainv[5], msg1 ); - MULT24W( msg0, msg1, MASK ); + MULT24W( msg0, msg1 ); chainv[6] = _mm512_xor_si512( chainv[6], msg0 ); chainv[7] = _mm512_xor_si512( chainv[7], msg1 ); - MULT24W( msg0, msg1, MASK ); + MULT24W( msg0, msg1); chainv[8] = _mm512_xor_si512( chainv[8], msg0 ); chainv[9] = _mm512_xor_si512( chainv[9], msg1 ); - MULT24W( msg0, msg1, MASK ); + MULT24W( msg0, msg1 ); - // replace with ror chainv[3] = _mm512_rol_epi32( chainv[3], 1 ); chainv[5] = _mm512_rol_epi32( chainv[5], 2 ); chainv[7] = _mm512_rol_epi32( chainv[7], 3 ); @@ -496,7 +506,7 @@ int luffa_4way_update( luffa_4way_context *state, const void *data, { // remaining data bytes buffer[0] = _mm512_shuffle_epi8( vdata[0], shuff_bswap32 ); - buffer[1] = m512_const2_64( 0, 0x0000000080000000 ); + buffer[1] = m512_const1_i128( 0x0000000080000000 ); } return 0; } @@ -520,7 +530,7 @@ int luffa_4way_close( luffa_4way_context *state, void *hashval ) rnd512_4way( state, buffer ); else { // empty pad block, constant data - msg[0] = m512_const2_64( 0, 0x0000000080000000 ); + msg[0] = m512_const1_i128( 0x0000000080000000 ); msg[1] = m512_zero; rnd512_4way( state, msg ); } @@ -583,13 +593,13 @@ int luffa512_4way_full( luffa_4way_context *state, void *output, { // padding of partial block msg[0] = _mm512_shuffle_epi8( vdata[ 0 ], shuff_bswap32 ); - msg[1] = m512_const2_64( 0, 0x0000000080000000 ); + msg[1] = m512_const1_i128( 0x0000000080000000 ); rnd512_4way( state, msg ); } else { // empty pad block - msg[0] = m512_const2_64( 0, 0x0000000080000000 ); + msg[0] = m512_const1_i128( 0x0000000080000000 ); msg[1] = m512_zero; rnd512_4way( state, msg ); } @@ -631,13 +641,13 @@ int luffa_4way_update_close( luffa_4way_context *state, { // padding of partial block msg[0] = _mm512_shuffle_epi8( vdata[ 0 ], shuff_bswap32 ); - msg[1] = m512_const2_64( 0, 0x0000000080000000 ); + msg[1] = m512_const1_i128( 0x0000000080000000 ); rnd512_4way( state, msg ); } else { // empty pad block - msg[0] = m512_const2_64( 0, 0x0000000080000000 ); + msg[0] = m512_const1_i128( 0x0000000080000000 ); msg[1] = m512_zero; rnd512_4way( state, msg ); } @@ -832,7 +842,7 @@ void rnd512_2way( luffa_2way_context *state, __m256i *msg ) __m256i msg0, msg1; __m256i tmp[2]; __m256i x[8]; - const __m256i MASK = m256_const2_64( 0, 0x00000000ffffffff ); + const __m256i MASK = m256_const1_i128( 0x00000000ffffffff ); t0 = chainv[0]; t1 = chainv[1]; @@ -1088,7 +1098,7 @@ int luffa_2way_update( luffa_2way_context *state, const void *data, { // remaining data bytes buffer[0] = _mm256_shuffle_epi8( vdata[0], shuff_bswap32 ); - buffer[1] = m256_const2_64( 0, 0x0000000080000000 ); + buffer[1] = m256_const1_i128( 0x0000000080000000 ); } return 0; } @@ -1104,7 +1114,7 @@ int luffa_2way_close( luffa_2way_context *state, void *hashval ) rnd512_2way( state, buffer ); else { // empty pad block, constant data - msg[0] = m256_const2_64( 0, 0x0000000080000000 ); + msg[0] = m256_const1_i128( 0x0000000080000000 ); msg[1] = m256_zero; rnd512_2way( state, msg ); } @@ -1159,13 +1169,13 @@ int luffa512_2way_full( luffa_2way_context *state, void *output, { // padding of partial block msg[0] = _mm256_shuffle_epi8( vdata[ 0 ], shuff_bswap32 ); - msg[1] = m256_const2_64( 0, 0x0000000080000000 ); + msg[1] = m256_const1_i128( 0x0000000080000000 ); rnd512_2way( state, msg ); } else { // empty pad block - msg[0] = m256_const2_64( 0, 0x0000000080000000 ); + msg[0] = m256_const1_i128( 0x0000000080000000 ); msg[1] = m256_zero; rnd512_2way( state, msg ); } @@ -1206,13 +1216,13 @@ int luffa_2way_update_close( luffa_2way_context *state, { // padding of partial block msg[0] = _mm256_shuffle_epi8( vdata[ 0 ], shuff_bswap32 ); - msg[1] = m256_const2_64( 0, 0x0000000080000000 ); + msg[1] = m256_const1_i128( 0x0000000080000000 ); rnd512_2way( state, msg ); } else { // empty pad block - msg[0] = m256_const2_64( 0, 0x0000000080000000 ); + msg[0] = m256_const1_i128( 0x0000000080000000 ); msg[1] = m256_zero; rnd512_2way( state, msg ); } diff --git a/algo/luffa/luffa_for_sse2.c b/algo/luffa/luffa_for_sse2.c index 780e56d7..fee498a6 100644 --- a/algo/luffa/luffa_for_sse2.c +++ b/algo/luffa/luffa_for_sse2.c @@ -23,7 +23,7 @@ #include "simd-utils.h" #include "luffa_for_sse2.h" -#define MULT2(a0,a1) do \ +#define MULT2( a0, a1 ) do \ { \ __m128i b = _mm_xor_si128( a0, _mm_shuffle_epi32( _mm_and_si128(a1,MASK), 16 ) ); \ a0 = _mm_or_si128( _mm_srli_si128(b,4), _mm_slli_si128(a1,12) ); \ @@ -345,11 +345,11 @@ HashReturn update_and_final_luffa( hashState_luffa *state, BitSequence* output, // 16 byte partial block exists for 80 byte len if ( state->rembytes ) // padding of partial block - rnd512( state, m128_const_64( 0, 0x80000000 ), + rnd512( state, m128_const_i128( 0x80000000 ), mm128_bswap_32( cast_m128i( data ) ) ); else // empty pad block - rnd512( state, m128_zero, m128_const_64( 0, 0x80000000 ) ); + rnd512( state, m128_zero, m128_const_i128( 0x80000000 ) ); finalization512( state, (uint32*) output ); if ( state->hashbitlen > 512 ) @@ -394,11 +394,11 @@ int luffa_full( hashState_luffa *state, BitSequence* output, int hashbitlen, // 16 byte partial block exists for 80 byte len if ( state->rembytes ) // padding of partial block - rnd512( state, m128_const_64( 0, 0x80000000 ), + rnd512( state, m128_const_i128( 0x80000000 ), mm128_bswap_32( cast_m128i( data ) ) ); else // empty pad block - rnd512( state, m128_zero, m128_const_64( 0, 0x80000000 ) ); + rnd512( state, m128_zero, m128_const_i128( 0x80000000 ) ); finalization512( state, (uint32*) output ); if ( state->hashbitlen > 512 ) @@ -606,7 +606,6 @@ static void finalization512( hashState_luffa *state, uint32 *b ) casti_m256i( b, 0 ) = _mm256_shuffle_epi8( casti_m256i( hash, 0 ), shuff_bswap32 ); -// casti_m256i( b, 0 ) = mm256_bswap_32( casti_m256i( hash, 0 ) ); rnd512( state, zero, zero ); @@ -621,7 +620,6 @@ static void finalization512( hashState_luffa *state, uint32 *b ) casti_m256i( b, 1 ) = _mm256_shuffle_epi8( casti_m256i( hash, 0 ), shuff_bswap32 ); -// casti_m256i( b, 1 ) = mm256_bswap_32( casti_m256i( hash, 0 ) ); } #else diff --git a/algo/sha/sph_sha2.c b/algo/sha/sph_sha2.c index e87936dd..513a29fd 100644 --- a/algo/sha/sph_sha2.c +++ b/algo/sha/sph_sha2.c @@ -77,6 +77,7 @@ static const sph_u32 H256[8] = { #else // no SHA +/* static const sph_u32 K[64] = { SPH_C32(0x428A2F98), SPH_C32(0x71374491), SPH_C32(0xB5C0FBCF), SPH_C32(0xE9B5DBA5), @@ -111,6 +112,7 @@ static const sph_u32 K[64] = { SPH_C32(0x90BEFFFA), SPH_C32(0xA4506CEB), SPH_C32(0xBEF9A3F7), SPH_C32(0xC67178F2) }; +*/ #if SPH_SMALL_FOOTPRINT_SHA2 diff --git a/algo/shavite/shavite-hash-2way.c b/algo/shavite/shavite-hash-2way.c index 83f3e66b..1b774263 100644 --- a/algo/shavite/shavite-hash-2way.c +++ b/algo/shavite/shavite-hash-2way.c @@ -23,14 +23,23 @@ static const uint32_t IV512[] = _mm256_blend_epi32( mm256_ror128_32( a ), \ mm256_ror128_32( b ), 0x88 ) +#if defined(__VAES__) + +#define mm256_aesenc_2x128( x, k ) \ + _mm256_aesenc_epi128( x, _mm256_castsi128_si256( k ) ) + +#else + +#define mm256_aesenc_2x128( x, k ) \ + mm256_concat_128( _mm_aesenc_si128( mm128_extr_hi128_256( x ), k ), \ + _mm_aesenc_si128( mm128_extr_lo128_256( x ), k ) ) + +#endif + static void c512_2way( shavite512_2way_context *ctx, const void *msg ) { -#if defined(__VAES__) - const __m256i zero = _mm256_setzero_si256(); -#else const __m128i zero = _mm_setzero_si128(); -#endif __m256i p0, p1, p2, p3, x; __m256i k00, k01, k02, k03, k10, k11, k12, k13; __m256i *m = (__m256i*)msg; @@ -308,7 +317,7 @@ void shavite512_2way_close( shavite512_2way_context *ctx, void *dst ) uint32_t vp = ctx->ptr>>5; // Terminating byte then zero pad - casti_m256i( buf, vp++ ) = m256_const2_64( 0, 0x0000000000000080 ); + casti_m256i( buf, vp++ ) = m256_const1_i128( 0x0000000000000080 ); // Zero pad full vectors up to count for ( ; vp < 6; vp++ ) @@ -388,13 +397,13 @@ void shavite512_2way_update_close( shavite512_2way_context *ctx, void *dst, if ( vp == 0 ) // empty buf, xevan. { - casti_m256i( buf, 0 ) = m256_const2_64( 0, 0x0000000000000080 ); + casti_m256i( buf, 0 ) = m256_const1_i128( 0x0000000000000080 ); memset_zero_256( (__m256i*)buf + 1, 5 ); ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0; } else // half full buf, everyone else. { - casti_m256i( buf, vp++ ) = m256_const2_64( 0, 0x0000000000000080 ); + casti_m256i( buf, vp++ ) = m256_const1_i128( 0x0000000000000080 ); memset_zero_256( (__m256i*)buf + vp, 6 - vp ); } @@ -478,13 +487,13 @@ void shavite512_2way_full( shavite512_2way_context *ctx, void *dst, if ( vp == 0 ) // empty buf, xevan. { - casti_m256i( buf, 0 ) = m256_const2_64( 0, 0x0000000000000080 ); + casti_m256i( buf, 0 ) = m256_const1_i128( 0x0000000000000080 ); memset_zero_256( (__m256i*)buf + 1, 5 ); ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0; } else // half full buf, everyone else. { - casti_m256i( buf, vp++ ) = m256_const2_64( 0, 0x0000000000000080 ); + casti_m256i( buf, vp++ ) = m256_const1_i128( 0x0000000000000080 ); memset_zero_256( (__m256i*)buf + vp, 6 - vp ); } diff --git a/algo/shavite/shavite-hash-4way.c b/algo/shavite/shavite-hash-4way.c index eed4ba14..2b0b7353 100644 --- a/algo/shavite/shavite-hash-4way.c +++ b/algo/shavite/shavite-hash-4way.c @@ -292,7 +292,7 @@ void shavite512_4way_close( shavite512_4way_context *ctx, void *dst ) uint32_t vp = ctx->ptr>>6; // Terminating byte then zero pad - casti_m512i( buf, vp++ ) = m512_const2_64( 0, 0x0000000000000080 ); + casti_m512i( buf, vp++ ) = m512_const1_i128( 0x0000000000000080 ); // Zero pad full vectors up to count for ( ; vp < 6; vp++ ) @@ -372,13 +372,13 @@ void shavite512_4way_update_close( shavite512_4way_context *ctx, void *dst, if ( vp == 0 ) // empty buf, xevan. { - casti_m512i( buf, 0 ) = m512_const2_64( 0, 0x0000000000000080 ); + casti_m512i( buf, 0 ) = m512_const1_i128( 0x0000000000000080 ); memset_zero_512( (__m512i*)buf + 1, 5 ); ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0; } else // half full buf, everyone else. { - casti_m512i( buf, vp++ ) = m512_const2_64( 0, 0x0000000000000080 ); + casti_m512i( buf, vp++ ) = m512_const1_i128( 0x0000000000000080 ); memset_zero_512( (__m512i*)buf + vp, 6 - vp ); } @@ -463,13 +463,13 @@ void shavite512_4way_full( shavite512_4way_context *ctx, void *dst, if ( vp == 0 ) // empty buf, xevan. { - casti_m512i( buf, 0 ) = m512_const2_64( 0, 0x0000000000000080 ); + casti_m512i( buf, 0 ) = m512_const1_i128( 0x0000000000000080 ); memset_zero_512( (__m512i*)buf + 1, 5 ); ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0; } else // half full buf, everyone else. { - casti_m512i( buf, vp++ ) = m512_const2_64( 0, 0x0000000000000080 ); + casti_m512i( buf, vp++ ) = m512_const1_i128( 0x0000000000000080 ); memset_zero_512( (__m512i*)buf + vp, 6 - vp ); } diff --git a/algo/swifftx/stdbool.h b/algo/swifftx/stdbool.h deleted file mode 100644 index d6396c35..00000000 --- a/algo/swifftx/stdbool.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2000 Jeroen Ruigrok van der Werven - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD: src/include/stdbool.h,v 1.6 2002/08/16 07:33:14 alfred Exp $ - */ - -#ifndef _STDBOOL_H_ -#define _STDBOOL_H_ - -#define __bool_true_false_are_defined 1 - -#ifndef __cplusplus - -#define false 0 -#define true 1 - -//#define bool _Bool -//#if __STDC_VERSION__ < 199901L && __GNUC__ < 3 -//typedef int _Bool; -//#endif -typedef int bool; - -#endif /* !__cplusplus */ - -#endif /* !_STDBOOL_H_ */ diff --git a/algo/swifftx/swifftx.c.bak b/algo/swifftx/swifftx.c.bak deleted file mode 100644 index 24453e21..00000000 --- a/algo/swifftx/swifftx.c.bak +++ /dev/null @@ -1,1155 +0,0 @@ -/////////////////////////////////////////////////////////////////////////////////////////////// -// -// SWIFFTX ANSI C OPTIMIZED 32BIT IMPLEMENTATION FOR NIST SHA-3 COMPETITION -// -// SWIFFTX.c -// -// October 2008 -// -// This is the source file of the OPTIMIZED 32BIT implementation of SWIFFTX hash function. -// SWIFFTX is a candidate function for SHA-3 NIST competition. -// More details about SWIFFTX can be found in the accompanying submission documents. -// -/////////////////////////////////////////////////////////////////////////////////////////////// -#include "swifftx.h" -// See the remarks concerning compatibility issues inside stdint.h. -#include "stdint.h" -// Remove this while using gcc: -//#include "stdbool.h" -#include - -/////////////////////////////////////////////////////////////////////////////////////////////// -// Constants and static tables portion. -/////////////////////////////////////////////////////////////////////////////////////////////// - -// In SWIFFTX we work over Z_257, so this is the modulus and the arithmetic is performed modulo -// this number. -#define FIELD_SIZE 257 - -// The size of FFT we use: -#define N 64 - -#define LOGN 6 - -#define EIGHTH_N (N / 8) - -// The number of FFTS done on the input. -#define M (SWIFFTX_INPUT_BLOCK_SIZE / 8) // 32 - -// Omega is the 128th root of unity in Z_257. -// We choose w = 42. -#define OMEGA 42 - -// The size of the inner FFT lookup table: -#define W 8 - -// Calculates the sum and the difference of two numbers. -// -// Parameters: -// - A: the first operand. After the operation stores the sum of the two operands. -// - B: the second operand. After the operation stores the difference between the first and the -// second operands. -#define ADD_SUB(A, B) {register int temp = (B); B = ((A) - (B)); A = ((A) + (temp));} - -// Quickly reduces an integer modulo 257. -// -// Parameters: -// - A: the input. -#define Q_REDUCE(A) (((A) & 0xff) - ((A) >> 8)) - -// Since we need to do the setup only once, this is the indicator variable: -static bool wasSetupDone = false; - -// This array stores the powers of omegas that correspond to the indices, which are the input -// values. Known also as the "outer FFT twiddle factors". -swift_int16_t multipliers[N]; - -// This array stores the powers of omegas, multiplied by the corresponding values. -// We store this table to save computation time. -// -// To calculate the intermediate value of the compression function (the first out of two -// stages), we multiply the k-th bit of x_i by w^[(2i + 1) * k]. {x_i} is the input to the -// compression function, i is between 0 and 31, x_i is a 64-bit value. -// One can see the formula for this (intermediate) stage in the SWIFFT FSE 2008 paper -- -// formula (2), section 3, page 6. -swift_int16_t fftTable[256 * EIGHTH_N]; - -// The A's we use in SWIFFTX shall be random elements of Z_257. -// We generated these A's from the decimal expansion of PI as follows: we converted each -// triple of digits into a decimal number d. If d < (257 * 3) we used (d % 257) for the next A -// element, otherwise move to the next triple of digits in the expansion. This guarntees that -// the A's are random, provided that PI digits are. -const swift_int16_t As[3 * M * N] = -{141, 78, 139, 75, 238, 205, 129, 126, 22, 245, 197, 169, 142, 118, 105, 78, - 50, 149, 29, 208, 114, 34, 85, 117, 67, 148, 86, 256, 25, 49, 133, 93, - 95, 36, 68, 231, 211, 102, 151, 128, 224, 117, 193, 27, 102, 187, 7, 105, - 45, 130, 108, 124, 171, 151, 189, 128, 218, 134, 233, 165, 14, 201, 145, 134, - 52, 203, 91, 96, 197, 69, 134, 213, 136, 93, 3, 249, 141, 16, 210, 73, - 6, 92, 58, 74, 174, 6, 254, 91, 201, 107, 110, 76, 103, 11, 73, 16, - 34, 209, 7, 127, 146, 254, 95, 176, 57, 13, 108, 245, 77, 92, 186, 117, - 124, 97, 105, 118, 34, 74, 205, 122, 235, 53, 94, 238, 210, 227, 183, 11, - 129, 159, 105, 183, 142, 129, 86, 21, 137, 138, 224, 223, 190, 188, 179, 188, - 256, 25, 217, 176, 36, 176, 238, 127, 160, 210, 155, 148, 132, 0, 54, 127, - 145, 6, 46, 85, 243, 95, 173, 123, 178, 207, 211, 183, 224, 173, 146, 35, - 71, 114, 50, 22, 175, 1, 28, 19, 112, 129, 21, 34, 161, 159, 115, 52, - 4, 193, 211, 92, 115, 49, 59, 217, 218, 96, 61, 81, 24, 202, 198, 89, - 45, 128, 8, 51, 253, 87, 171, 35, 4, 188, 171, 10, 3, 137, 238, 73, - 19, 208, 124, 163, 103, 177, 155, 147, 46, 84, 253, 233, 171, 241, 211, 217, - 159, 48, 96, 79, 237, 18, 171, 226, 99, 1, 97, 195, 216, 163, 198, 95, - 0, 201, 65, 228, 21, 153, 124, 230, 44, 35, 44, 108, 85, 156, 249, 207, - 26, 222, 131, 1, 60, 242, 197, 150, 181, 19, 116, 213, 75, 98, 124, 240, - 123, 207, 62, 255, 60, 143, 187, 157, 139, 9, 12, 104, 89, 49, 193, 146, - 104, 196, 181, 82, 198, 253, 192, 191, 255, 122, 212, 104, 47, 20, 132, 208, - 46, 170, 2, 69, 234, 36, 56, 163, 28, 152, 104, 238, 162, 56, 24, 58, - 38, 150, 193, 254, 253, 125, 173, 35, 73, 126, 247, 239, 216, 6, 199, 15, - 90, 12, 97, 122, 9, 84, 207, 127, 219, 72, 58, 30, 29, 182, 41, 192, - 235, 248, 237, 74, 72, 176, 210, 252, 45, 64, 165, 87, 202, 241, 236, 223, - 151, 242, 119, 239, 52, 112, 169, 28, 13, 37, 160, 60, 158, 81, 133, 60, - 16, 145, 249, 192, 173, 217, 214, 93, 141, 184, 54, 34, 161, 104, 157, 95, - 38, 133, 218, 227, 211, 181, 9, 66, 137, 143, 77, 33, 248, 159, 4, 55, - 228, 48, 99, 219, 222, 184, 15, 36, 254, 256, 157, 237, 87, 139, 209, 113, - 232, 85, 126, 167, 197, 100, 103, 166, 64, 225, 125, 205, 117, 135, 84, 128, - 231, 112, 90, 241, 28, 22, 210, 147, 186, 49, 230, 21, 108, 39, 194, 47, - 123, 199, 107, 114, 30, 210, 250, 143, 59, 156, 131, 133, 221, 27, 76, 99, - 208, 250, 78, 12, 211, 141, 95, 81, 195, 106, 8, 232, 150, 212, 205, 221, - 11, 225, 87, 219, 126, 136, 137, 180, 198, 48, 68, 203, 239, 252, 194, 235, - 142, 137, 174, 172, 190, 145, 250, 221, 182, 204, 1, 195, 130, 153, 83, 241, - 161, 239, 211, 138, 11, 169, 155, 245, 174, 49, 10, 166, 16, 130, 181, 139, - 222, 222, 112, 99, 124, 94, 51, 243, 133, 194, 244, 136, 35, 248, 201, 177, - 178, 186, 129, 102, 89, 184, 180, 41, 149, 96, 165, 72, 225, 231, 134, 158, - 199, 28, 249, 16, 225, 195, 10, 210, 164, 252, 138, 8, 35, 152, 213, 199, - 82, 116, 97, 230, 63, 199, 241, 35, 79, 120, 54, 174, 67, 112, 1, 76, - 69, 222, 194, 96, 82, 94, 25, 228, 196, 145, 155, 136, 228, 234, 46, 101, - 246, 51, 103, 166, 246, 75, 9, 200, 161, 4, 108, 35, 129, 168, 208, 144, - 50, 14, 13, 220, 41, 132, 122, 127, 194, 9, 232, 234, 107, 28, 187, 8, - 51, 141, 97, 221, 225, 9, 113, 170, 166, 102, 135, 22, 231, 185, 227, 187, - 110, 145, 251, 146, 76, 22, 146, 228, 7, 53, 64, 25, 62, 198, 130, 190, - 221, 232, 169, 64, 188, 199, 237, 249, 173, 218, 196, 191, 48, 224, 5, 113, - 100, 166, 160, 21, 191, 197, 61, 162, 149, 171, 240, 183, 129, 231, 123, 204, - 192, 179, 134, 15, 47, 161, 142, 177, 239, 234, 186, 237, 231, 53, 208, 95, - 146, 36, 225, 231, 89, 142, 93, 248, 137, 124, 83, 39, 69, 77, 89, 208, - 182, 48, 85, 147, 244, 164, 246, 68, 38, 190, 220, 35, 202, 91, 157, 151, - 201, 240, 185, 218, 4, 152, 2, 132, 177, 88, 190, 196, 229, 74, 220, 135, - 137, 196, 11, 47, 5, 251, 106, 144, 163, 60, 222, 127, 52, 57, 202, 102, - 64, 140, 110, 206, 23, 182, 39, 245, 1, 163, 157, 186, 163, 80, 7, 230, - 44, 249, 176, 102, 164, 125, 147, 120, 18, 191, 186, 125, 64, 65, 198, 157, - 164, 213, 95, 61, 13, 181, 208, 91, 242, 197, 158, 34, 98, 169, 91, 14, - 17, 93, 157, 17, 65, 30, 183, 6, 139, 58, 255, 108, 100, 136, 209, 144, - 164, 6, 237, 33, 210, 110, 57, 126, 197, 136, 125, 244, 165, 151, 168, 3, - 143, 251, 247, 155, 136, 130, 88, 14, 74, 121, 250, 133, 21, 226, 185, 232, - 118, 132, 89, 64, 204, 161, 2, 70, 224, 159, 35, 204, 123, 180, 13, 52, - 231, 57, 25, 78, 66, 69, 97, 42, 198, 84, 176, 59, 8, 232, 125, 134, - 193, 2, 232, 109, 216, 69, 90, 142, 32, 38, 249, 37, 75, 180, 184, 188, - 19, 47, 120, 87, 146, 70, 232, 120, 191, 45, 33, 38, 19, 248, 110, 110, - 44, 64, 2, 84, 244, 228, 252, 228, 170, 123, 38, 144, 213, 144, 171, 212, - 243, 87, 189, 46, 128, 110, 84, 77, 65, 183, 61, 184, 101, 44, 168, 68, - 14, 106, 105, 8, 227, 211, 166, 39, 152, 43, 52, 254, 197, 55, 119, 89, - 168, 65, 53, 138, 177, 56, 219, 0, 58, 121, 148, 18, 44, 100, 215, 103, - 145, 229, 117, 196, 91, 89, 113, 143, 172, 239, 249, 184, 154, 39, 112, 65, - 204, 42, 84, 38, 155, 151, 151, 16, 100, 87, 174, 162, 145, 147, 149, 186, - 237, 145, 134, 144, 198, 235, 213, 163, 48, 230, 24, 47, 57, 71, 127, 0, - 150, 219, 12, 81, 197, 150, 131, 13, 169, 63, 175, 184, 48, 235, 65, 243, - 149, 200, 163, 254, 202, 114, 247, 67, 143, 250, 126, 228, 80, 130, 216, 214, - 36, 2, 230, 33, 119, 125, 3, 142, 237, 100, 3, 152, 197, 174, 244, 129, - 232, 30, 206, 199, 39, 210, 220, 43, 237, 221, 201, 54, 179, 42, 28, 133, - 246, 203, 198, 177, 0, 28, 194, 85, 223, 109, 155, 147, 221, 60, 133, 108, - 157, 254, 26, 75, 157, 185, 49, 142, 31, 137, 71, 43, 63, 64, 237, 148, - 237, 172, 159, 160, 155, 254, 234, 224, 140, 193, 114, 140, 62, 109, 136, 39, - 255, 8, 158, 146, 128, 49, 222, 96, 57, 209, 180, 249, 202, 127, 113, 231, - 78, 178, 46, 33, 228, 215, 104, 31, 207, 186, 82, 41, 42, 39, 103, 119, - 123, 133, 243, 254, 238, 156, 90, 186, 37, 212, 33, 107, 252, 51, 177, 36, - 237, 76, 159, 245, 93, 214, 97, 56, 190, 38, 160, 94, 105, 222, 220, 158, - 49, 16, 191, 52, 120, 87, 179, 2, 27, 144, 223, 230, 184, 6, 129, 227, - 69, 47, 215, 181, 162, 139, 72, 200, 45, 163, 159, 62, 2, 221, 124, 40, - 159, 242, 35, 208, 179, 166, 98, 67, 178, 68, 143, 225, 178, 146, 187, 159, - 57, 66, 176, 192, 236, 250, 168, 224, 122, 43, 159, 120, 133, 165, 122, 64, - 87, 74, 161, 241, 9, 87, 90, 24, 255, 113, 203, 220, 57, 139, 197, 159, - 31, 151, 27, 140, 77, 162, 7, 27, 84, 228, 187, 220, 53, 126, 162, 242, - 84, 181, 223, 103, 86, 177, 207, 31, 140, 18, 207, 256, 201, 166, 96, 23, - 233, 103, 197, 84, 161, 75, 59, 149, 138, 154, 119, 92, 16, 53, 116, 97, - 220, 114, 35, 45, 77, 209, 40, 196, 71, 22, 81, 178, 110, 14, 3, 180, - 110, 129, 112, 47, 18, 61, 134, 78, 73, 79, 254, 232, 125, 180, 205, 54, - 220, 119, 63, 89, 181, 52, 77, 109, 151, 77, 80, 207, 144, 25, 20, 6, - 208, 47, 201, 206, 192, 14, 73, 176, 256, 201, 207, 87, 216, 60, 56, 73, - 92, 243, 179, 113, 49, 59, 55, 168, 121, 137, 69, 154, 95, 57, 187, 47, - 129, 4, 15, 92, 6, 116, 69, 196, 48, 134, 84, 81, 111, 56, 38, 176, - 239, 6, 128, 72, 242, 134, 36, 221, 59, 48, 242, 68, 130, 110, 171, 89, - 13, 220, 48, 29, 5, 75, 104, 233, 91, 129, 105, 162, 44, 113, 163, 163, - 85, 147, 190, 111, 197, 80, 213, 153, 81, 68, 203, 33, 161, 165, 10, 61, - 120, 252, 0, 205, 28, 42, 193, 64, 39, 37, 83, 175, 5, 218, 215, 174, - 128, 121, 231, 11, 150, 145, 135, 197, 136, 91, 193, 5, 107, 88, 82, 6, - 4, 188, 256, 70, 40, 2, 167, 57, 169, 203, 115, 254, 215, 172, 84, 80, - 188, 167, 34, 137, 43, 243, 2, 79, 178, 38, 188, 135, 233, 194, 208, 13, - 11, 151, 231, 196, 12, 122, 162, 56, 17, 114, 191, 207, 90, 132, 64, 238, - 187, 6, 198, 176, 240, 88, 118, 236, 15, 226, 166, 22, 193, 229, 82, 246, - 213, 64, 37, 63, 31, 243, 252, 37, 156, 38, 175, 204, 138, 141, 211, 82, - 106, 217, 97, 139, 153, 56, 129, 218, 158, 9, 83, 26, 87, 112, 71, 21, - 250, 5, 65, 141, 68, 116, 231, 113, 10, 218, 99, 205, 201, 92, 157, 4, - 97, 46, 49, 220, 72, 139, 103, 171, 149, 129, 193, 19, 69, 245, 43, 31, - 58, 68, 36, 195, 159, 22, 54, 34, 233, 141, 205, 100, 226, 96, 22, 192, - 41, 231, 24, 79, 234, 138, 30, 120, 117, 216, 172, 197, 172, 107, 86, 29, - 181, 151, 0, 6, 146, 186, 68, 55, 54, 58, 213, 182, 60, 231, 33, 232, - 77, 210, 216, 154, 80, 51, 141, 122, 68, 148, 219, 122, 254, 48, 64, 175, - 41, 115, 62, 243, 141, 81, 119, 121, 5, 68, 121, 88, 239, 29, 230, 90, - 135, 159, 35, 223, 168, 112, 49, 37, 146, 60, 126, 134, 42, 145, 115, 90, - 73, 133, 211, 86, 120, 141, 122, 241, 127, 56, 130, 36, 174, 75, 83, 246, - 112, 45, 136, 194, 201, 115, 1, 156, 114, 167, 208, 12, 176, 147, 32, 170, - 251, 100, 102, 220, 122, 210, 6, 49, 75, 201, 38, 105, 132, 135, 126, 102, - 13, 121, 76, 228, 202, 20, 61, 213, 246, 13, 207, 42, 148, 168, 37, 253, - 34, 94, 141, 185, 18, 234, 157, 109, 104, 64, 250, 125, 49, 236, 86, 48, - 196, 77, 75, 237, 156, 103, 225, 19, 110, 229, 22, 68, 177, 93, 221, 181, - 152, 153, 61, 108, 101, 74, 247, 195, 127, 216, 30, 166, 168, 61, 83, 229, - 120, 156, 96, 120, 201, 124, 43, 27, 253, 250, 120, 143, 89, 235, 189, 243, - 150, 7, 127, 119, 149, 244, 84, 185, 134, 34, 128, 193, 236, 234, 132, 117, - 137, 32, 145, 184, 44, 121, 51, 76, 11, 228, 142, 251, 39, 77, 228, 251, - 41, 58, 246, 107, 125, 187, 9, 240, 35, 8, 11, 162, 242, 220, 158, 163, - 2, 184, 163, 227, 242, 2, 100, 101, 2, 78, 129, 34, 89, 28, 26, 157, - 79, 31, 107, 250, 194, 156, 186, 69, 212, 66, 41, 180, 139, 42, 211, 253, - 256, 239, 29, 129, 104, 248, 182, 68, 1, 189, 48, 226, 36, 229, 3, 158, - 41, 53, 241, 22, 115, 174, 16, 163, 224, 19, 112, 219, 177, 233, 42, 27, - 250, 134, 18, 28, 145, 122, 68, 34, 134, 31, 147, 17, 39, 188, 150, 76, - 45, 42, 167, 249, 12, 16, 23, 182, 13, 79, 121, 3, 70, 197, 239, 44, - 86, 177, 255, 81, 64, 171, 138, 131, 73, 110, 44, 201, 254, 198, 146, 91, - 48, 9, 104, 31, 29, 161, 101, 31, 138, 180, 231, 233, 79, 137, 61, 236, - 140, 15, 249, 218, 234, 119, 99, 195, 110, 137, 237, 207, 8, 31, 45, 24, - 90, 155, 203, 253, 192, 203, 65, 176, 210, 171, 142, 214, 220, 122, 136, 237, - 189, 186, 147, 40, 80, 254, 173, 33, 191, 46, 192, 26, 108, 255, 228, 205, - 61, 76, 39, 107, 225, 126, 228, 182, 140, 251, 143, 134, 252, 168, 221, 8, - 185, 85, 60, 233, 147, 244, 87, 137, 8, 140, 96, 80, 53, 45, 175, 160, - 124, 189, 112, 37, 144, 19, 70, 17, 170, 242, 2, 3, 28, 95, 120, 199, - 212, 43, 9, 117, 86, 151, 101, 241, 200, 145, 241, 19, 178, 69, 204, 197, - 227, 166, 94, 7, 193, 45, 247, 234, 19, 187, 212, 212, 236, 125, 33, 95, - 198, 121, 122, 103, 77, 155, 235, 49, 25, 237, 249, 11, 162, 7, 238, 24, - 16, 150, 129, 25, 152, 17, 42, 67, 247, 162, 77, 154, 31, 133, 55, 137, - 79, 119, 153, 10, 86, 28, 244, 186, 41, 169, 106, 44, 10, 49, 110, 179, - 32, 133, 155, 244, 61, 70, 131, 168, 170, 39, 231, 252, 32, 69, 92, 238, - 239, 35, 132, 136, 236, 167, 90, 32, 123, 88, 69, 22, 20, 89, 145, 166, - 30, 118, 75, 4, 49, 31, 225, 54, 11, 50, 56, 191, 246, 1, 187, 33, - 119, 107, 139, 68, 19, 240, 131, 55, 94, 113, 31, 252, 12, 179, 121, 2, - 120, 252, 0, 76, 41, 80, 185, 42, 62, 121, 105, 159, 121, 109, 111, 98, - 7, 118, 86, 29, 210, 70, 231, 179, 223, 229, 164, 70, 62, 47, 0, 206, - 204, 178, 168, 120, 224, 166, 99, 25, 103, 63, 246, 224, 117, 204, 75, 124, - 140, 133, 110, 110, 222, 88, 151, 118, 46, 37, 22, 143, 158, 40, 2, 50, - 153, 94, 190, 199, 13, 198, 127, 211, 180, 90, 183, 98, 0, 142, 210, 154, - 100, 187, 67, 231, 202, 100, 198, 235, 252, 160, 247, 124, 247, 14, 121, 221, - 57, 88, 253, 243, 185, 89, 45, 249, 221, 194, 108, 175, 193, 119, 50, 141, - 223, 133, 136, 64, 176, 250, 129, 100, 124, 94, 181, 159, 99, 185, 177, 240, - 135, 42, 103, 52, 202, 208, 143, 186, 193, 103, 154, 237, 102, 88, 225, 161, - 50, 188, 191, 109, 12, 87, 19, 227, 247, 183, 13, 52, 205, 170, 205, 146, - 89, 160, 18, 105, 192, 73, 231, 225, 184, 157, 252, 220, 61, 59, 169, 183, - 221, 20, 141, 20, 158, 101, 245, 7, 245, 225, 118, 137, 84, 55, 19, 27, - 164, 110, 35, 25, 202, 94, 150, 46, 91, 152, 130, 1, 7, 46, 16, 237, - 171, 109, 19, 200, 65, 38, 10, 213, 70, 96, 126, 226, 185, 225, 181, 46, - 10, 165, 11, 123, 53, 158, 22, 147, 64, 22, 227, 69, 182, 237, 197, 37, - 39, 49, 186, 223, 139, 128, 55, 36, 166, 178, 220, 20, 98, 172, 166, 253, - 45, 0, 120, 180, 189, 185, 158, 159, 196, 6, 214, 79, 141, 52, 156, 107, - 5, 109, 142, 159, 33, 64, 190, 133, 95, 132, 95, 202, 160, 63, 186, 23, - 231, 107, 163, 33, 234, 15, 244, 77, 108, 49, 51, 7, 164, 87, 142, 99, - 240, 202, 47, 256, 118, 190, 196, 178, 217, 42, 39, 153, 21, 192, 232, 202, - 14, 82, 179, 64, 233, 4, 219, 10, 133, 78, 43, 144, 146, 216, 202, 81, - 71, 252, 8, 201, 68, 256, 85, 233, 164, 88, 176, 30, 5, 152, 126, 179, - 249, 84, 140, 190, 159, 54, 118, 98, 2, 159, 27, 133, 74, 121, 239, 196, - 71, 149, 119, 135, 102, 20, 87, 112, 44, 75, 221, 3, 151, 158, 5, 98, - 152, 25, 97, 106, 63, 171, 240, 79, 234, 240, 230, 92, 76, 70, 173, 196, - 36, 225, 218, 133, 64, 240, 150, 41, 146, 66, 133, 51, 134, 73, 170, 238, - 140, 90, 45, 89, 46, 147, 96, 169, 174, 174, 244, 151, 90, 40, 32, 74, - 38, 154, 246, 57, 31, 14, 189, 151, 83, 243, 197, 183, 220, 185, 53, 225, - 51, 106, 188, 208, 222, 248, 93, 13, 93, 215, 131, 25, 142, 185, 113, 222, - 131, 215, 149, 50, 159, 85, 32, 5, 205, 192, 2, 227, 42, 214, 197, 42, - 126, 182, 68, 123, 109, 36, 237, 179, 170, 199, 77, 256, 5, 128, 214, 243, - 137, 177, 170, 253, 179, 180, 153, 236, 100, 196, 216, 231, 198, 37, 192, 80, - 121, 221, 246, 1, 16, 246, 29, 78, 64, 148, 124, 38, 96, 125, 28, 20, - 48, 51, 73, 187, 139, 208, 98, 253, 221, 188, 84, 129, 1, 205, 95, 205, - 117, 79, 71, 126, 134, 237, 19, 184, 137, 125, 129, 178, 223, 54, 188, 112, - 30, 7, 225, 228, 205, 184, 233, 87, 117, 22, 58, 10, 8, 42, 2, 114, - 254, 19, 17, 13, 150, 92, 233, 179, 63, 12, 60, 171, 127, 35, 50, 5, - 195, 113, 241, 25, 249, 184, 166, 44, 221, 35, 151, 116, 8, 54, 195, 89, - 218, 186, 132, 5, 41, 89, 226, 177, 11, 41, 87, 172, 5, 23, 20, 59, - 228, 94, 76, 33, 137, 43, 151, 221, 61, 232, 4, 120, 93, 217, 80, 228, - 228, 6, 58, 25, 62, 84, 91, 48, 209, 20, 247, 243, 55, 106, 80, 79, - 235, 34, 20, 180, 146, 2, 236, 13, 236, 206, 243, 222, 204, 83, 148, 213, - 214, 117, 237, 98, 0, 90, 204, 168, 32, 41, 126, 67, 191, 74, 27, 255, - 26, 75, 240, 113, 185, 105, 167, 154, 112, 67, 151, 63, 161, 134, 239, 176, - 42, 87, 249, 130, 45, 242, 17, 100, 107, 120, 212, 218, 237, 76, 231, 162, - 175, 172, 118, 155, 92, 36, 124, 17, 121, 71, 13, 9, 82, 126, 147, 142, - 218, 148, 138, 80, 163, 106, 164, 123, 140, 129, 35, 42, 186, 154, 228, 214, - 75, 73, 8, 253, 42, 153, 232, 164, 95, 24, 110, 90, 231, 197, 90, 196, - 57, 164, 252, 181, 31, 7, 97, 256, 35, 77, 200, 212, 99, 179, 92, 227, - 17, 180, 49, 176, 9, 188, 13, 182, 93, 44, 128, 219, 134, 92, 151, 6, - 23, 126, 200, 109, 66, 30, 140, 180, 146, 134, 67, 200, 7, 9, 223, 168, - 186, 221, 3, 154, 150, 165, 43, 53, 138, 27, 86, 213, 235, 160, 70, 2, - 240, 20, 89, 212, 84, 141, 168, 246, 183, 227, 30, 167, 138, 185, 253, 83, - 52, 143, 236, 94, 59, 65, 89, 218, 194, 157, 164, 156, 111, 95, 202, 168, - 245, 256, 151, 28, 222, 194, 72, 130, 217, 134, 253, 77, 246, 100, 76, 32, - 254, 174, 182, 193, 14, 237, 74, 1, 74, 26, 135, 216, 152, 208, 112, 38, - 181, 62, 25, 71, 61, 234, 254, 97, 191, 23, 92, 256, 190, 205, 6, 16, - 134, 147, 210, 219, 148, 59, 73, 185, 24, 247, 174, 143, 116, 220, 128, 144, - 111, 126, 101, 98, 130, 136, 101, 102, 69, 127, 24, 168, 146, 226, 226, 207, - 176, 122, 149, 254, 134, 196, 22, 151, 197, 21, 50, 205, 116, 154, 65, 116, - 177, 224, 127, 77, 177, 159, 225, 69, 176, 54, 100, 104, 140, 8, 11, 126, - 11, 188, 185, 159, 107, 16, 254, 142, 80, 28, 5, 157, 104, 57, 109, 82, - 102, 80, 173, 242, 238, 207, 57, 105, 237, 160, 59, 189, 189, 199, 26, 11, - 190, 156, 97, 118, 20, 12, 254, 189, 165, 147, 142, 199, 5, 213, 64, 133, - 108, 217, 133, 60, 94, 28, 116, 136, 47, 165, 125, 42, 183, 143, 14, 129, - 223, 70, 212, 205, 181, 180, 3, 201, 182, 46, 57, 104, 239, 60, 99, 181, - 220, 231, 45, 79, 156, 89, 149, 143, 190, 103, 153, 61, 235, 73, 136, 20, - 89, 243, 16, 130, 247, 141, 134, 93, 80, 68, 85, 84, 8, 72, 194, 4, - 242, 110, 19, 133, 199, 70, 172, 92, 132, 254, 67, 74, 36, 94, 13, 90, - 154, 184, 9, 109, 118, 243, 214, 71, 36, 95, 0, 90, 201, 105, 112, 215, - 69, 196, 224, 210, 236, 242, 155, 211, 37, 134, 69, 113, 157, 97, 68, 26, - 230, 149, 219, 180, 20, 76, 172, 145, 154, 40, 129, 8, 93, 56, 162, 124, - 207, 233, 105, 19, 3, 183, 155, 134, 8, 244, 213, 78, 139, 88, 156, 37, - 51, 152, 111, 102, 112, 250, 114, 252, 201, 241, 133, 24, 136, 153, 5, 90, - 210, 197, 216, 24, 131, 17, 147, 246, 13, 86, 3, 253, 179, 237, 101, 114, - 243, 191, 207, 2, 220, 133, 244, 53, 87, 125, 154, 158, 197, 20, 8, 83, - 32, 191, 38, 241, 204, 22, 168, 59, 217, 123, 162, 82, 21, 50, 130, 89, - 239, 253, 195, 56, 253, 74, 147, 125, 234, 199, 250, 28, 65, 193, 22, 237, - 193, 94, 58, 229, 139, 176, 69, 42, 179, 164, 150, 168, 246, 214, 86, 174, - 59, 117, 15, 19, 76, 37, 214, 238, 153, 226, 154, 45, 109, 114, 198, 107, - 45, 70, 238, 196, 142, 252, 244, 71, 123, 136, 134, 188, 99, 132, 25, 42, - 240, 0, 196, 33, 26, 124, 256, 145, 27, 102, 153, 35, 28, 132, 221, 167, - 138, 133, 41, 170, 95, 224, 40, 139, 239, 153, 1, 106, 255, 106, 170, 163, - 127, 44, 155, 232, 194, 119, 232, 117, 239, 143, 108, 41, 3, 9, 180, 256, - 144, 113, 133, 200, 79, 69, 128, 216, 31, 50, 102, 209, 249, 136, 150, 154, - 182, 51, 228, 39, 127, 142, 87, 15, 94, 92, 187, 245, 31, 236, 64, 58, - 114, 11, 17, 166, 189, 152, 218, 34, 123, 39, 58, 37, 153, 91, 63, 121, - 31, 34, 12, 254, 106, 96, 171, 14, 155, 247, 214, 69, 24, 98, 3, 204, - 202, 194, 207, 30, 253, 44, 119, 70, 14, 96, 82, 250, 63, 6, 232, 38, - 89, 144, 102, 191, 82, 254, 20, 222, 96, 162, 110, 6, 159, 58, 200, 226, - 98, 128, 42, 70, 84, 247, 128, 211, 136, 54, 143, 166, 60, 118, 99, 218, - 27, 193, 85, 81, 219, 223, 46, 41, 23, 233, 152, 222, 36, 236, 54, 181, - 56, 50, 4, 207, 129, 92, 78, 88, 197, 251, 131, 105, 31, 172, 38, 131, - 19, 204, 129, 47, 227, 106, 202, 183, 23, 6, 77, 224, 102, 147, 11, 218, - 131, 132, 60, 192, 208, 223, 236, 23, 103, 115, 89, 18, 185, 171, 70, 174, - 139, 0, 100, 160, 221, 11, 228, 60, 12, 122, 114, 12, 157, 235, 148, 57, - 83, 62, 173, 131, 169, 126, 85, 99, 93, 243, 81, 80, 29, 245, 206, 82, - 236, 227, 166, 14, 230, 213, 144, 97, 27, 111, 99, 164, 105, 150, 89, 111, - 252, 118, 140, 232, 120, 183, 137, 213, 232, 157, 224, 33, 134, 118, 186, 80, - 159, 2, 186, 193, 54, 242, 25, 237, 232, 249, 226, 213, 90, 149, 90, 160, - 118, 69, 64, 37, 10, 183, 109, 246, 30, 52, 219, 69, 189, 26, 116, 220, - 50, 244, 243, 243, 139, 137, 232, 98, 38, 45, 256, 143, 171, 101, 73, 238, - 123, 45, 194, 167, 250, 123, 12, 29, 136, 237, 141, 21, 89, 96, 199, 44, - 8, 214, 208, 17, 113, 41, 137, 26, 166, 155, 89, 85, 54, 58, 97, 160, - 50, 239, 58, 71, 21, 157, 139, 12, 37, 198, 182, 131, 149, 134, 16, 204, - 164, 181, 248, 166, 52, 216, 136, 201, 37, 255, 187, 240, 5, 101, 147, 231, - 14, 163, 253, 134, 146, 216, 8, 54, 224, 90, 220, 195, 75, 215, 186, 58, - 71, 204, 124, 105, 239, 53, 16, 85, 69, 163, 195, 223, 33, 38, 69, 88, - 88, 203, 99, 55, 176, 13, 156, 204, 236, 99, 194, 134, 75, 247, 126, 129, - 160, 124, 233, 206, 139, 144, 154, 45, 233, 51, 206, 61, 60, 55, 205, 107, - 84, 108, 96, 188, 203, 31, 89, 20, 115, 144, 137, 90, 237, 78, 231, 185, - 120, 217, 1, 176, 169, 30, 155, 176, 100, 113, 53, 42, 193, 108, 14, 121, - 176, 158, 137, 92, 178, 44, 110, 249, 108, 234, 94, 101, 128, 12, 250, 173, - 72, 202, 232, 66, 139, 152, 189, 18, 32, 197, 9, 238, 246, 55, 119, 183, - 196, 119, 113, 247, 191, 100, 200, 245, 46, 16, 234, 112, 136, 116, 232, 48, - 176, 108, 11, 237, 14, 153, 93, 177, 124, 72, 67, 121, 135, 143, 45, 18, - 97, 251, 184, 172, 136, 55, 213, 8, 103, 12, 221, 212, 13, 160, 116, 91, - 237, 127, 218, 190, 103, 131, 77, 82, 36, 100, 22, 252, 79, 69, 54, 26, - 65, 182, 115, 142, 247, 20, 89, 81, 188, 244, 27, 120, 240, 248, 13, 230, - 67, 133, 32, 201, 129, 87, 9, 245, 66, 88, 166, 34, 46, 184, 119, 218, - 144, 235, 163, 40, 138, 134, 127, 217, 64, 227, 116, 67, 55, 202, 130, 48, - 199, 42, 251, 112, 124, 153, 123, 194, 243, 49, 250, 12, 78, 157, 167, 134, - 210, 73, 156, 102, 21, 88, 216, 123, 45, 11, 208, 18, 47, 187, 20, 43, - 3, 180, 124, 2, 136, 176, 77, 111, 138, 139, 91, 225, 126, 8, 74, 255, - 88, 192, 193, 239, 138, 204, 139, 194, 166, 130, 252, 184, 140, 168, 30, 177, - 121, 98, 131, 124, 69, 171, 75, 49, 184, 34, 76, 122, 202, 115, 184, 253, - 120, 182, 33, 251, 1, 74, 216, 217, 243, 168, 70, 162, 119, 158, 197, 198, - 61, 89, 7, 5, 54, 199, 211, 170, 23, 226, 44, 247, 165, 195, 7, 225, - 91, 23, 50, 15, 51, 208, 106, 94, 12, 31, 43, 112, 146, 139, 246, 182, - 113, 1, 97, 15, 66, 2, 51, 76, 164, 184, 237, 200, 218, 176, 72, 98, - 33, 135, 38, 147, 140, 229, 50, 94, 81, 187, 129, 17, 238, 168, 146, 203, - 181, 99, 164, 3, 104, 98, 255, 189, 114, 142, 86, 102, 229, 102, 80, 129, - 64, 84, 79, 161, 81, 156, 128, 111, 164, 197, 18, 15, 55, 196, 198, 191, - 28, 113, 117, 96, 207, 253, 19, 158, 231, 13, 53, 130, 252, 211, 58, 180, - 212, 142, 7, 219, 38, 81, 62, 109, 167, 113, 33, 56, 97, 185, 157, 130, - 186, 129, 119, 182, 196, 26, 54, 110, 65, 170, 166, 236, 30, 22, 162, 0, - 106, 12, 248, 33, 48, 72, 159, 17, 76, 244, 172, 132, 89, 171, 196, 76, - 254, 166, 76, 218, 226, 3, 52, 220, 238, 181, 179, 144, 225, 23, 3, 166, - 158, 35, 228, 154, 204, 23, 203, 71, 134, 189, 18, 168, 236, 141, 117, 138, - 2, 132, 78, 57, 154, 21, 250, 196, 184, 40, 161, 40, 10, 178, 134, 120, - 132, 123, 101, 82, 205, 121, 55, 140, 231, 56, 231, 71, 206, 246, 198, 150, - 146, 192, 45, 105, 242, 1, 125, 18, 176, 46, 222, 122, 19, 80, 113, 133, - 131, 162, 81, 51, 98, 168, 247, 161, 139, 39, 63, 162, 22, 153, 170, 92, - 91, 130, 174, 200, 45, 112, 99, 164, 132, 184, 191, 186, 200, 167, 86, 145, - 167, 227, 130, 44, 12, 158, 172, 249, 204, 17, 54, 249, 16, 200, 21, 174, - 67, 223, 105, 201, 50, 36, 133, 203, 244, 131, 228, 67, 29, 195, 91, 91, - 55, 107, 167, 154, 170, 137, 218, 183, 169, 61, 99, 175, 128, 23, 142, 183, - 66, 255, 59, 187, 66, 85, 212, 109, 168, 82, 16, 43, 67, 139, 114, 176, - 216, 255, 130, 94, 152, 79, 183, 64, 100, 23, 214, 82, 34, 230, 48, 15, - 242, 130, 50, 241, 81, 32, 5, 125, 183, 182, 184, 99, 248, 109, 159, 210, - 226, 61, 119, 129, 39, 149, 78, 214, 107, 78, 147, 124, 228, 18, 143, 188, - 84, 180, 233, 119, 64, 39, 158, 133, 177, 168, 6, 150, 80, 117, 150, 56, - 49, 72, 49, 37, 30, 242, 49, 142, 33, 156, 34, 44, 44, 72, 58, 22, - 249, 46, 168, 80, 25, 196, 64, 174, 97, 179, 244, 134, 213, 105, 63, 151, - 21, 90, 168, 90, 245, 28, 157, 65, 250, 232, 188, 27, 99, 160, 156, 127, - 68, 193, 10, 80, 205, 36, 138, 229, 12, 223, 70, 169, 251, 41, 48, 94, - 41, 177, 99, 256, 158, 0, 6, 83, 231, 191, 120, 135, 157, 146, 218, 213, - 160, 7, 47, 234, 98, 211, 79, 225, 179, 95, 175, 105, 185, 79, 115, 0, - 104, 14, 65, 124, 15, 188, 52, 9, 253, 27, 132, 137, 13, 127, 75, 238, - 185, 253, 33, 8, 52, 157, 164, 68, 232, 188, 69, 28, 209, 233, 5, 129, - 216, 90, 252, 212, 33, 200, 222, 9, 112, 15, 43, 36, 226, 114, 15, 249, - 217, 8, 148, 22, 147, 23, 143, 67, 222, 116, 235, 250, 212, 210, 39, 142, - 108, 64, 209, 83, 73, 66, 99, 34, 17, 29, 45, 151, 244, 114, 28, 241, - 144, 208, 146, 179, 132, 89, 217, 198, 252, 219, 205, 165, 75, 107, 11, 173, - 76, 6, 196, 247, 152, 216, 248, 91, 209, 178, 57, 250, 174, 60, 79, 123, - 18, 135, 9, 241, 230, 159, 184, 68, 156, 251, 215, 9, 113, 234, 75, 235, - 103, 194, 205, 129, 230, 45, 96, 73, 157, 20, 200, 212, 212, 228, 161, 7, - 231, 228, 108, 43, 198, 87, 140, 140, 4, 182, 164, 3, 53, 104, 250, 213, - 85, 38, 89, 61, 52, 187, 35, 204, 86, 249, 100, 71, 248, 213, 163, 215, - 66, 106, 252, 129, 40, 111, 47, 24, 186, 221, 85, 205, 199, 237, 122, 181, - 32, 46, 182, 135, 33, 251, 142, 34, 208, 242, 128, 255, 4, 234, 15, 33, - 167, 222, 32, 186, 191, 34, 255, 244, 98, 240, 228, 204, 30, 142, 32, 70, - 69, 83, 110, 151, 10, 243, 141, 21, 223, 69, 61, 37, 59, 209, 102, 114, - 223, 33, 129, 254, 255, 103, 86, 247, 235, 72, 126, 177, 102, 226, 102, 30, - 149, 221, 62, 247, 251, 120, 163, 173, 57, 202, 204, 24, 39, 106, 120, 143, - 202, 176, 191, 147, 37, 38, 51, 133, 47, 245, 157, 132, 154, 71, 183, 111, - 30, 180, 18, 202, 82, 96, 170, 91, 157, 181, 212, 140, 256, 8, 196, 121, - 149, 79, 66, 127, 113, 78, 4, 197, 84, 256, 111, 222, 102, 63, 228, 104, - 136, 223, 67, 193, 93, 154, 249, 83, 204, 101, 200, 234, 84, 252, 230, 195, - 43, 140, 120, 242, 89, 63, 166, 233, 209, 94, 43, 170, 126, 5, 205, 78, - 112, 80, 143, 151, 146, 248, 137, 203, 45, 183, 61, 1, 155, 8, 102, 59, - 68, 212, 230, 61, 254, 191, 128, 223, 176, 123, 229, 27, 146, 120, 96, 165, - 213, 12, 232, 40, 186, 225, 66, 105, 200, 195, 212, 110, 237, 238, 151, 19, - 12, 171, 150, 82, 7, 228, 79, 52, 15, 78, 62, 43, 21, 154, 114, 21, - 12, 212, 256, 232, 125, 127, 5, 51, 37, 252, 136, 13, 47, 195, 168, 191, - 231, 55, 57, 251, 214, 116, 15, 86, 210, 41, 249, 242, 119, 27, 250, 203, - 107, 69, 90, 43, 206, 154, 127, 54, 100, 78, 187, 54, 244, 177, 234, 167, - 202, 136, 209, 171, 69, 114, 133, 173, 26, 139, 78, 141, 128, 32, 124, 39, - 45, 218, 96, 68, 90, 44, 67, 62, 83, 190, 188, 256, 103, 42, 102, 64, - 249, 0, 141, 11, 61, 69, 70, 66, 233, 237, 29, 200, 251, 157, 71, 51, - 64, 133, 113, 76, 35, 125, 76, 137, 217, 145, 35, 69, 226, 180, 56, 249, - 156, 163, 176, 237, 81, 54, 85, 169, 115, 211, 129, 70, 248, 40, 252, 192, - 194, 101, 247, 8, 181, 124, 217, 191, 194, 93, 99, 127, 117, 177, 144, 151, - 228, 121, 32, 11, 89, 81, 26, 29, 183, 76, 249, 132, 179, 70, 34, 102, - 20, 66, 87, 63, 124, 205, 174, 177, 87, 219, 73, 218, 91, 87, 176, 72, - 15, 211, 47, 61, 251, 165, 39, 247, 146, 70, 150, 57, 1, 212, 36, 162, - 39, 38, 16, 216, 3, 50, 116, 200, 32, 234, 77, 181, 155, 19, 90, 188, - 36, 6, 254, 46, 46, 203, 25, 230, 181, 196, 4, 151, 225, 65, 122, 216, - 168, 86, 158, 131, 136, 16, 49, 102, 233, 64, 154, 88, 228, 52, 146, 69, - 93, 157, 243, 121, 70, 209, 126, 213, 88, 145, 236, 65, 70, 96, 204, 47, - 10, 200, 77, 8, 103, 150, 48, 153, 5, 37, 52, 235, 209, 31, 181, 126, - 83, 142, 224, 140, 6, 32, 200, 171, 160, 179, 115, 229, 75, 194, 208, 39, - 59, 223, 52, 247, 38, 197, 135, 1, 6, 189, 106, 114, 168, 5, 211, 222, - 44, 63, 90, 160, 116, 172, 170, 133, 125, 138, 39, 131, 23, 178, 10, 214, - 36, 93, 28, 59, 68, 17, 123, 25, 255, 184, 204, 102, 194, 214, 129, 94, - 159, 245, 112, 141, 62, 11, 61, 197, 124, 221, 205, 11, 79, 71, 201, 54, - 58, 150, 29, 121, 87, 46, 240, 201, 68, 20, 194, 209, 47, 152, 158, 174, - 193, 164, 120, 255, 216, 165, 247, 58, 85, 130, 220, 23, 122, 223, 188, 98, - 21, 70, 72, 170, 150, 237, 76, 143, 112, 238, 206, 146, 215, 110, 4, 250, - 68, 44, 174, 177, 30, 98, 143, 241, 180, 127, 113, 48, 0, 1, 179, 199, - 59, 106, 201, 114, 29, 86, 173, 133, 217, 44, 200, 141, 107, 172, 16, 60, - 82, 58, 239, 94, 141, 234, 186, 235, 109, 173, 249, 139, 141, 59, 100, 248, - 84, 144, 49, 160, 51, 207, 164, 103, 74, 97, 146, 202, 193, 125, 168, 134, - 236, 111, 135, 121, 59, 145, 168, 200, 181, 173, 109, 2, 255, 6, 9, 245, - 90, 202, 214, 143, 121, 65, 85, 232, 132, 77, 228, 84, 26, 54, 184, 15, - 161, 29, 177, 79, 43, 0, 156, 184, 163, 165, 62, 90, 179, 93, 45, 239, - 1, 16, 120, 189, 127, 47, 74, 166, 20, 214, 233, 226, 89, 217, 229, 26, - 156, 53, 162, 60, 21, 3, 192, 72, 111, 51, 53, 101, 181, 208, 88, 82, - 179, 160, 219, 113, 240, 108, 43, 224, 162, 147, 62, 14, 95, 81, 205, 4, - 160, 177, 225, 115, 29, 69, 235, 168, 148, 29, 128, 114, 124, 129, 172, 165, - 215, 231, 214, 86, 160, 44, 157, 91, 248, 183, 73, 164, 56, 181, 162, 92, - 141, 118, 127, 240, 196, 77, 0, 9, 244, 79, 250, 100, 195, 25, 255, 85, - 94, 35, 212, 137, 107, 34, 110, 20, 200, 104, 17, 32, 231, 43, 150, 159, - 231, 216, 223, 190, 226, 109, 162, 197, 87, 92, 224, 11, 111, 73, 60, 225, - 238, 73, 246, 169, 19, 217, 119, 38, 121, 118, 70, 82, 99, 241, 110, 67, - 31, 76, 146, 215, 124, 240, 31, 103, 139, 224, 75, 160, 31, 78, 93, 4, - 64, 9, 103, 223, 6, 227, 119, 85, 116, 81, 21, 43, 46, 206, 234, 132, - 85, 99, 22, 131, 135, 97, 86, 13, 234, 188, 21, 14, 89, 169, 207, 238, - 219, 177, 190, 72, 157, 41, 114, 140, 92, 141, 186, 1, 63, 107, 225, 184, - 118, 150, 153, 254, 241, 106, 120, 210, 104, 144, 151, 161, 88, 206, 125, 164, - 15, 211, 173, 49, 146, 241, 71, 36, 58, 201, 46, 27, 33, 187, 91, 162, - 117, 19, 210, 213, 187, 97, 193, 50, 190, 114, 217, 60, 61, 167, 207, 213, - 213, 53, 135, 34, 156, 91, 115, 119, 46, 99, 242, 1, 90, 52, 198, 227, - 201, 91, 216, 146, 210, 82, 121, 38, 73, 133, 182, 193, 132, 148, 246, 75, - 109, 157, 179, 113, 176, 134, 205, 159, 148, 58, 103, 171, 132, 156, 133, 147, - 161, 231, 39, 100, 175, 97, 125, 28, 183, 129, 135, 191, 202, 181, 29, 218, - 43, 104, 148, 203, 189, 204, 4, 182, 169, 1, 134, 122, 141, 202, 13, 187, - 177, 112, 162, 35, 231, 6, 8, 241, 99, 6, 191, 45, 113, 113, 101, 104}; - -// The S-Box we use for further linearity breaking. -// We created it by taking the digits of decimal expansion of e. -// The code that created it can be found in 'ProduceRandomSBox.c'. -unsigned char SBox[256] = { -//0 1 2 3 4 5 6 7 8 9 A B C D E F -0x7d, 0xd1, 0x70, 0x0b, 0xfa, 0x39, 0x18, 0xc3, 0xf3, 0xbb, 0xa7, 0xd4, 0x84, 0x25, 0x3b, 0x3c, // 0 -0x2c, 0x15, 0x69, 0x9a, 0xf9, 0x27, 0xfb, 0x02, 0x52, 0xba, 0xa8, 0x4b, 0x20, 0xb5, 0x8b, 0x3a, // 1 -0x88, 0x8e, 0x26, 0xcb, 0x71, 0x5e, 0xaf, 0xad, 0x0c, 0xac, 0xa1, 0x93, 0xc6, 0x78, 0xce, 0xfc, // 2 -0x2a, 0x76, 0x17, 0x1f, 0x62, 0xc2, 0x2e, 0x99, 0x11, 0x37, 0x65, 0x40, 0xfd, 0xa0, 0x03, 0xc1, // 3 -0xca, 0x48, 0xe2, 0x9b, 0x81, 0xe4, 0x1c, 0x01, 0xec, 0x68, 0x7a, 0x5a, 0x50, 0xf8, 0x0e, 0xa3, // 4 -0xe8, 0x61, 0x2b, 0xa2, 0xeb, 0xcf, 0x8c, 0x3d, 0xb4, 0x95, 0x13, 0x08, 0x46, 0xab, 0x91, 0x7b, // 5 -0xea, 0x55, 0x67, 0x9d, 0xdd, 0x29, 0x6a, 0x8f, 0x9f, 0x22, 0x4e, 0xf2, 0x57, 0xd2, 0xa9, 0xbd, // 6 -0x38, 0x16, 0x5f, 0x4c, 0xf7, 0x9e, 0x1b, 0x2f, 0x30, 0xc7, 0x41, 0x24, 0x5c, 0xbf, 0x05, 0xf6, // 7 -0x0a, 0x31, 0xa5, 0x45, 0x21, 0x33, 0x6b, 0x6d, 0x6c, 0x86, 0xe1, 0xa4, 0xe6, 0x92, 0x9c, 0xdf, // 8 -0xe7, 0xbe, 0x28, 0xe3, 0xfe, 0x06, 0x4d, 0x98, 0x80, 0x04, 0x96, 0x36, 0x3e, 0x14, 0x4a, 0x34, // 9 -0xd3, 0xd5, 0xdb, 0x44, 0xcd, 0xf5, 0x54, 0xdc, 0x89, 0x09, 0x90, 0x42, 0x87, 0xff, 0x7e, 0x56, // A -0x5d, 0x59, 0xd7, 0x23, 0x75, 0x19, 0x97, 0x73, 0x83, 0x64, 0x53, 0xa6, 0x1e, 0xd8, 0xb0, 0x49, // B -0x3f, 0xef, 0xbc, 0x7f, 0x43, 0xf0, 0xc9, 0x72, 0x0f, 0x63, 0x79, 0x2d, 0xc0, 0xda, 0x66, 0xc8, // C -0x32, 0xde, 0x47, 0x07, 0xb8, 0xe9, 0x1d, 0xc4, 0x85, 0x74, 0x82, 0xcc, 0x60, 0x51, 0x77, 0x0d, // D -0xaa, 0x35, 0xed, 0x58, 0x7c, 0x5b, 0xb9, 0x94, 0x6e, 0x8d, 0xb1, 0xc5, 0xb7, 0xee, 0xb6, 0xae, // E -0x10, 0xe0, 0xd6, 0xd9, 0xe5, 0x4f, 0xf1, 0x12, 0x00, 0xd0, 0xf4, 0x1a, 0x6f, 0x8a, 0xb3, 0xb2 }; // F - -/////////////////////////////////////////////////////////////////////////////////////////////// -// -// Helper functions definition portion. -// -/////////////////////////////////////////////////////////////////////////////////////////////// - -// Translates an input array with values in base 257 to output array with values in base 256. -// Returns the carry bit. -// -// Parameters: -// - input: the input array of size EIGHTH_N. Each value in the array is a number in Z_257. -// The MSB is assumed to be the last one in the array. -// - output: the input array encoded in base 256. -// -// Returns: -// - The carry bit (MSB). -swift_int16_t TranslateToBase256(swift_int32_t input[EIGHTH_N], unsigned char output[EIGHTH_N]); - -// Translates an input integer into the range (-FIELD_SIZE / 2) <= result <= (FIELD_SIZE / 2). -// -// Parameters: -// - x: the input integer. -// -// Returns: -// - The result, which equals (x MOD FIELD_SIZE), such that |result| <= (FIELD_SIZE / 2). -int Center(int x); - -// Calculates bit reversal permutation. -// -// Parameters: -// - input: the input to reverse. -// - numOfBits: the number of bits in the input to reverse. -// -// Returns: -// - The resulting number, which is obtained from the input by reversing its bits. -int ReverseBits(int input, int numOfBits); - -// Initializes the FFT fast lookup table. -// Shall be called only once. -void InitializeSWIFFTX(); - -// Calculates the FFT. -// -// Parameters: -// - input: the input to the FFT. -// - output: the resulting output. -void FFT(const unsigned char input[EIGHTH_N], swift_int32_t *output); - -/////////////////////////////////////////////////////////////////////////////////////////////// -// Helper functions implementation portion. -/////////////////////////////////////////////////////////////////////////////////////////////// - -swift_int16_t TranslateToBase256(swift_int32_t input[EIGHTH_N], unsigned char output[EIGHTH_N]) -{ - swift_int32_t pairs[EIGHTH_N / 2]; - int i; - - for (i = 0; i < EIGHTH_N; i += 2) - { - // input[i] + 257 * input[i + 1] - pairs[i >> 1] = input[i] + input[i + 1] + (input[i + 1] << 8); - } - - for (i = (EIGHTH_N / 2) - 1; i > 0; --i) - { - int j; - - for (j = i - 1; j < (EIGHTH_N / 2) - 1; ++j) - { - // pairs[j + 1] * 513, because 257^2 = 513 % 256^2. - register swift_int32_t temp = pairs[j] + pairs[j + 1] + (pairs[j + 1] << 9); - pairs[j] = temp & 0xffff; - pairs[j + 1] += (temp >> 16); - } - } - - for (i = 0; i < EIGHTH_N; i += 2) - { - output[i] = (unsigned char) (pairs[i >> 1] & 0xff); - output[i + 1] = (unsigned char) ((pairs[i >> 1] >> 8) & 0xff); - } - - return (pairs[EIGHTH_N/2 - 1] >> 16); -} - -int Center(int x) -{ - int result = x % FIELD_SIZE; - - if (result > (FIELD_SIZE / 2)) - result -= FIELD_SIZE; - - if (result < (FIELD_SIZE / -2)) - result += FIELD_SIZE; - - return result; -} - -int ReverseBits(int input, int numOfBits) -{ - register int reversed = 0; - - for (input |= numOfBits; input > 1; input >>= 1) - reversed = (reversed << 1) | (input & 1); - - return reversed; -} - -void InitializeSWIFFTX() -{ - int i, j, k, x; - // The powers of OMEGA - int omegaPowers[2 * N]; - omegaPowers[0] = 1; - - if (wasSetupDone) - return; - - for (i = 1; i < (2 * N); ++i) - { - omegaPowers[i] = Center(omegaPowers[i - 1] * OMEGA); - } - - for (i = 0; i < (N / W); ++i) - { - for (j = 0; j < W; ++j) - { - multipliers[(i << 3) + j] = omegaPowers[ReverseBits(i, N / W) * (2 * j + 1)]; - } - } - - for (x = 0; x < 256; ++x) - { - for (j = 0; j < 8; ++j) - { - register int temp = 0; - for (k = 0; k < 8; ++k) - { - temp += omegaPowers[(EIGHTH_N * (2 * j + 1) * ReverseBits(k, W)) % (2 * N)] - * ((x >> k) & 1); - } - - fftTable[(x << 3) + j] = Center(temp); - } - } - - wasSetupDone = true; -} - -void FFT(const unsigned char input[EIGHTH_N], swift_int32_t *output) -{ - register swift_int16_t *mult = multipliers; - register swift_int32_t F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, - F10, F11, F12, F13, F14, F15, F16, F17, F18, F19, - F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, - F30, F31, F32, F33, F34, F35, F36, F37, F38, F39, - F40, F41, F42, F43, F44, F45, F46, F47, F48, F49, - F50, F51, F52, F53, F54, F55, F56, F57, F58, F59, - F60, F61, F62, F63; - - // First loop unrolling: - register swift_int16_t *table = &(fftTable[input[0] << 3]); - - F0 = mult[0] * table[0]; - F8 = mult[1] * table[1]; - F16 = mult[2] * table[2]; - F24 = mult[3] * table[3]; - F32 = mult[4] * table[4]; - F40 = mult[5] * table[5]; - F48 = mult[6] * table[6]; - F56 = mult[7] * table[7]; - - mult += 8; - table = &(fftTable[input[1] << 3]); - - F1 = mult[0] * table[0]; - F9 = mult[1] * table[1]; - F17 = mult[2] * table[2]; - F25 = mult[3] * table[3]; - F33 = mult[4] * table[4]; - F41 = mult[5] * table[5]; - F49 = mult[6] * table[6]; - F57 = mult[7] * table[7]; - - mult += 8; - table = &(fftTable[input[2] << 3]); - - F2 = mult[0] * table[0]; - F10 = mult[1] * table[1]; - F18 = mult[2] * table[2]; - F26 = mult[3] * table[3]; - F34 = mult[4] * table[4]; - F42 = mult[5] * table[5]; - F50 = mult[6] * table[6]; - F58 = mult[7] * table[7]; - - mult += 8; - table = &(fftTable[input[3] << 3]); - - F3 = mult[0] * table[0]; - F11 = mult[1] * table[1]; - F19 = mult[2] * table[2]; - F27 = mult[3] * table[3]; - F35 = mult[4] * table[4]; - F43 = mult[5] * table[5]; - F51 = mult[6] * table[6]; - F59 = mult[7] * table[7]; - - mult += 8; - table = &(fftTable[input[4] << 3]); - - F4 = mult[0] * table[0]; - F12 = mult[1] * table[1]; - F20 = mult[2] * table[2]; - F28 = mult[3] * table[3]; - F36 = mult[4] * table[4]; - F44 = mult[5] * table[5]; - F52 = mult[6] * table[6]; - F60 = mult[7] * table[7]; - - mult += 8; - table = &(fftTable[input[5] << 3]); - - F5 = mult[0] * table[0]; - F13 = mult[1] * table[1]; - F21 = mult[2] * table[2]; - F29 = mult[3] * table[3]; - F37 = mult[4] * table[4]; - F45 = mult[5] * table[5]; - F53 = mult[6] * table[6]; - F61 = mult[7] * table[7]; - - mult += 8; - table = &(fftTable[input[6] << 3]); - - F6 = mult[0] * table[0]; - F14 = mult[1] * table[1]; - F22 = mult[2] * table[2]; - F30 = mult[3] * table[3]; - F38 = mult[4] * table[4]; - F46 = mult[5] * table[5]; - F54 = mult[6] * table[6]; - F62 = mult[7] * table[7]; - - mult += 8; - table = &(fftTable[input[7] << 3]); - - F7 = mult[0] * table[0]; - F15 = mult[1] * table[1]; - F23 = mult[2] * table[2]; - F31 = mult[3] * table[3]; - F39 = mult[4] * table[4]; - F47 = mult[5] * table[5]; - F55 = mult[6] * table[6]; - F63 = mult[7] * table[7]; - - // Second loop unrolling: - // Iteration 0: - ADD_SUB(F0, F1); - ADD_SUB(F2, F3); - ADD_SUB(F4, F5); - ADD_SUB(F6, F7); - - F3 <<= 4; - F7 <<= 4; - - ADD_SUB(F0, F2); - ADD_SUB(F1, F3); - ADD_SUB(F4, F6); - ADD_SUB(F5, F7); - - F5 <<= 2; - F6 <<= 4; - F7 <<= 6; - - ADD_SUB(F0, F4); - ADD_SUB(F1, F5); - ADD_SUB(F2, F6); - ADD_SUB(F3, F7); - - output[0] = Q_REDUCE(F0); - output[8] = Q_REDUCE(F1); - output[16] = Q_REDUCE(F2); - output[24] = Q_REDUCE(F3); - output[32] = Q_REDUCE(F4); - output[40] = Q_REDUCE(F5); - output[48] = Q_REDUCE(F6); - output[56] = Q_REDUCE(F7); - - // Iteration 1: - ADD_SUB(F8, F9); - ADD_SUB(F10, F11); - ADD_SUB(F12, F13); - ADD_SUB(F14, F15); - - F11 <<= 4; - F15 <<= 4; - - ADD_SUB(F8, F10); - ADD_SUB(F9, F11); - ADD_SUB(F12, F14); - ADD_SUB(F13, F15); - - F13 <<= 2; - F14 <<= 4; - F15 <<= 6; - - ADD_SUB(F8, F12); - ADD_SUB(F9, F13); - ADD_SUB(F10, F14); - ADD_SUB(F11, F15); - - output[1] = Q_REDUCE(F8); - output[9] = Q_REDUCE(F9); - output[17] = Q_REDUCE(F10); - output[25] = Q_REDUCE(F11); - output[33] = Q_REDUCE(F12); - output[41] = Q_REDUCE(F13); - output[49] = Q_REDUCE(F14); - output[57] = Q_REDUCE(F15); - - // Iteration 2: - ADD_SUB(F16, F17); - ADD_SUB(F18, F19); - ADD_SUB(F20, F21); - ADD_SUB(F22, F23); - - F19 <<= 4; - F23 <<= 4; - - ADD_SUB(F16, F18); - ADD_SUB(F17, F19); - ADD_SUB(F20, F22); - ADD_SUB(F21, F23); - - F21 <<= 2; - F22 <<= 4; - F23 <<= 6; - - ADD_SUB(F16, F20); - ADD_SUB(F17, F21); - ADD_SUB(F18, F22); - ADD_SUB(F19, F23); - - output[2] = Q_REDUCE(F16); - output[10] = Q_REDUCE(F17); - output[18] = Q_REDUCE(F18); - output[26] = Q_REDUCE(F19); - output[34] = Q_REDUCE(F20); - output[42] = Q_REDUCE(F21); - output[50] = Q_REDUCE(F22); - output[58] = Q_REDUCE(F23); - - // Iteration 3: - ADD_SUB(F24, F25); - ADD_SUB(F26, F27); - ADD_SUB(F28, F29); - ADD_SUB(F30, F31); - - F27 <<= 4; - F31 <<= 4; - - ADD_SUB(F24, F26); - ADD_SUB(F25, F27); - ADD_SUB(F28, F30); - ADD_SUB(F29, F31); - - F29 <<= 2; - F30 <<= 4; - F31 <<= 6; - - ADD_SUB(F24, F28); - ADD_SUB(F25, F29); - ADD_SUB(F26, F30); - ADD_SUB(F27, F31); - - output[3] = Q_REDUCE(F24); - output[11] = Q_REDUCE(F25); - output[19] = Q_REDUCE(F26); - output[27] = Q_REDUCE(F27); - output[35] = Q_REDUCE(F28); - output[43] = Q_REDUCE(F29); - output[51] = Q_REDUCE(F30); - output[59] = Q_REDUCE(F31); - - // Iteration 4: - ADD_SUB(F32, F33); - ADD_SUB(F34, F35); - ADD_SUB(F36, F37); - ADD_SUB(F38, F39); - - F35 <<= 4; - F39 <<= 4; - - ADD_SUB(F32, F34); - ADD_SUB(F33, F35); - ADD_SUB(F36, F38); - ADD_SUB(F37, F39); - - F37 <<= 2; - F38 <<= 4; - F39 <<= 6; - - ADD_SUB(F32, F36); - ADD_SUB(F33, F37); - ADD_SUB(F34, F38); - ADD_SUB(F35, F39); - - output[4] = Q_REDUCE(F32); - output[12] = Q_REDUCE(F33); - output[20] = Q_REDUCE(F34); - output[28] = Q_REDUCE(F35); - output[36] = Q_REDUCE(F36); - output[44] = Q_REDUCE(F37); - output[52] = Q_REDUCE(F38); - output[60] = Q_REDUCE(F39); - - // Iteration 5: - ADD_SUB(F40, F41); - ADD_SUB(F42, F43); - ADD_SUB(F44, F45); - ADD_SUB(F46, F47); - - F43 <<= 4; - F47 <<= 4; - - ADD_SUB(F40, F42); - ADD_SUB(F41, F43); - ADD_SUB(F44, F46); - ADD_SUB(F45, F47); - - F45 <<= 2; - F46 <<= 4; - F47 <<= 6; - - ADD_SUB(F40, F44); - ADD_SUB(F41, F45); - ADD_SUB(F42, F46); - ADD_SUB(F43, F47); - - output[5] = Q_REDUCE(F40); - output[13] = Q_REDUCE(F41); - output[21] = Q_REDUCE(F42); - output[29] = Q_REDUCE(F43); - output[37] = Q_REDUCE(F44); - output[45] = Q_REDUCE(F45); - output[53] = Q_REDUCE(F46); - output[61] = Q_REDUCE(F47); - - // Iteration 6: - ADD_SUB(F48, F49); - ADD_SUB(F50, F51); - ADD_SUB(F52, F53); - ADD_SUB(F54, F55); - - F51 <<= 4; - F55 <<= 4; - - ADD_SUB(F48, F50); - ADD_SUB(F49, F51); - ADD_SUB(F52, F54); - ADD_SUB(F53, F55); - - F53 <<= 2; - F54 <<= 4; - F55 <<= 6; - - ADD_SUB(F48, F52); - ADD_SUB(F49, F53); - ADD_SUB(F50, F54); - ADD_SUB(F51, F55); - - output[6] = Q_REDUCE(F48); - output[14] = Q_REDUCE(F49); - output[22] = Q_REDUCE(F50); - output[30] = Q_REDUCE(F51); - output[38] = Q_REDUCE(F52); - output[46] = Q_REDUCE(F53); - output[54] = Q_REDUCE(F54); - output[62] = Q_REDUCE(F55); - - // Iteration 7: - ADD_SUB(F56, F57); - ADD_SUB(F58, F59); - ADD_SUB(F60, F61); - ADD_SUB(F62, F63); - - F59 <<= 4; - F63 <<= 4; - - ADD_SUB(F56, F58); - ADD_SUB(F57, F59); - ADD_SUB(F60, F62); - ADD_SUB(F61, F63); - - F61 <<= 2; - F62 <<= 4; - F63 <<= 6; - - ADD_SUB(F56, F60); - ADD_SUB(F57, F61); - ADD_SUB(F58, F62); - ADD_SUB(F59, F63); - - output[7] = Q_REDUCE(F56); - output[15] = Q_REDUCE(F57); - output[23] = Q_REDUCE(F58); - output[31] = Q_REDUCE(F59); - output[39] = Q_REDUCE(F60); - output[47] = Q_REDUCE(F61); - output[55] = Q_REDUCE(F62); - output[63] = Q_REDUCE(F63); -} - -// Calculates the FFT part of SWIFFT. -// We divided the SWIFFT calculation into two, because that way we could save 2 computations of -// the FFT part, since in the first stage of SWIFFTX the difference between the first 3 SWIFFTs -// is only the A's part. -// -// Parameters: -// - input: the input to FFT. -// - m: the input size divided by 8. The function performs m FFTs. -// - output: will store the result. -void SWIFFTFFT(const unsigned char *input, int m, swift_int32_t *output) -{ - int i; - - for (i = 0; - i < m; - i++, input += EIGHTH_N, output += N) - { - FFT(input, output); - } -} - -// Calculates the 'sum' part of SWIFFT, including the base change at the end. -// We divided the SWIFFT calculation into two, because that way we could save 2 computations of -// the FFT part, since in the first stage of SWIFFTX the difference between the first 3 SWIFFTs -// is only the A's part. -// -// Parameters: -// - input: the input. Of size 64 * m. -// - m: the input size divided by 64. -// - output: will store the result. -// - a: the coefficients in the sum. Of size 64 * m. -void SWIFFTSum(const swift_int32_t *input, int m, unsigned char *output, const swift_int16_t *a) -{ - int i, j; - swift_int32_t result[N]; - register swift_int16_t carry = 0; - - for (j = 0; j < N; ++j) - { - register swift_int32_t sum = 0; - const register swift_int32_t *f = input + j; - const register swift_int16_t *k = a + j; - - for (i = 0; i < m; i++, f += N,k += N) - { - sum += (*f) * (*k); - } - - result[j] = sum; - } - - for (j = 0; j < N; ++j) - { - result[j] = ((FIELD_SIZE << 22) + result[j]) % FIELD_SIZE; - } - - for (j = 0; j < 8; ++j) - { - int register carryBit = TranslateToBase256(result + (j << 3), output + (j << 3)); - carry |= carryBit << j; - } - - output[N] = carry; -} - -void ComputeSingleSWIFFTX(unsigned char input[SWIFFTX_INPUT_BLOCK_SIZE], - unsigned char output[SWIFFTX_OUTPUT_BLOCK_SIZE], - bool doSmooth) -{ - int i; - // Will store the result of the FFT parts: - swift_int32_t fftOut[N * M]; - unsigned char intermediate[N * 3 + 8]; - unsigned char carry0,carry1,carry2; - - // Do the three SWIFFTS while remembering the three carry bytes (each carry byte gets - // overriden by the following SWIFFT): - - // 1. Compute the FFT of the input - the common part for the first 3 SWIFFTs: - SWIFFTFFT(input, M, fftOut); - - // 2. Compute the sums of the 3 SWIFFTs, each using a different set of coefficients: - - // 2a. The first SWIFFT: - SWIFFTSum(fftOut, M, intermediate, As); - // Remember the carry byte: - carry0 = intermediate[N]; - - // 2b. The second one: - SWIFFTSum(fftOut, M, intermediate + N, As + (M * N)); - carry1 = intermediate[2 * N]; - - // 2c. The third one: - SWIFFTSum(fftOut, M, intermediate + (2 * N), As + 2 * (M * N)); - carry2 = intermediate[3 * N]; - - //2d. Put three carry bytes in their place - intermediate[3 * N] = carry0; - intermediate[(3 * N) + 1] = carry1; - intermediate[(3 * N) + 2] = carry2; - - // Padding intermediate output with 5 zeroes. - memset(intermediate + (3 * N) + 3, 0, 5); - - // Apply the S-Box: - for (i = 0; i < (3 * N) + 8; ++i) - { - intermediate[i] = SBox[intermediate[i]]; - } - - // 3. The final and last SWIFFT: - SWIFFTFFT(intermediate, 3 * (N/8) + 1, fftOut); - SWIFFTSum(fftOut, 3 * (N/8) + 1, output, As); - - if (doSmooth) - { - unsigned char sum[N]; - register int i, j; - memset(sum, 0, N); - - for (i = 0; i < (N + 1) * 8; ++i) - { - register const swift_int16_t *AsRow; - register int AShift; - - if (!(output[i >> 3] & (1 << (i & 7)))) - { - continue; - } - - AsRow = As + N * M + (i & ~(N - 1)) ; - AShift = i & 63; - - for (j = AShift; j < N; ++j) - { - sum[j] += AsRow[j - AShift]; - } - - for(j = 0; j < AShift; ++j) - { - sum[j] -= AsRow[N - AShift + j]; - } - } - - for (i = 0; i < N; ++i) - { - output[i] = sum[i]; - } - - output[N] = 0; - } -} diff --git a/configure b/configure index e18473f6..5ae117c0 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.15.6. +# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.15.7. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. @@ -577,8 +577,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='cpuminer-opt' PACKAGE_TARNAME='cpuminer-opt' -PACKAGE_VERSION='3.15.6' -PACKAGE_STRING='cpuminer-opt 3.15.6' +PACKAGE_VERSION='3.15.7' +PACKAGE_STRING='cpuminer-opt 3.15.7' PACKAGE_BUGREPORT='' PACKAGE_URL='' @@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures cpuminer-opt 3.15.6 to adapt to many kinds of systems. +\`configure' configures cpuminer-opt 3.15.7 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1404,7 +1404,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of cpuminer-opt 3.15.6:";; + short | recursive ) echo "Configuration of cpuminer-opt 3.15.7:";; esac cat <<\_ACEOF @@ -1509,7 +1509,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -cpuminer-opt configure 3.15.6 +cpuminer-opt configure 3.15.7 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by cpuminer-opt $as_me 3.15.6, which was +It was created by cpuminer-opt $as_me 3.15.7, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2993,7 +2993,7 @@ fi # Define the identity of the package. PACKAGE='cpuminer-opt' - VERSION='3.15.6' + VERSION='3.15.7' cat >>confdefs.h <<_ACEOF @@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by cpuminer-opt $as_me 3.15.6, which was +This file was extended by cpuminer-opt $as_me 3.15.7, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -6756,7 +6756,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -cpuminer-opt config.status 3.15.6 +cpuminer-opt config.status 3.15.7 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 95d825e3..bbe7a18b 100644 --- a/configure.ac +++ b/configure.ac @@ -1,4 +1,4 @@ -AC_INIT([cpuminer-opt], [3.15.6]) +AC_INIT([cpuminer-opt], [3.15.7]) AC_PREREQ([2.59c]) AC_CANONICAL_SYSTEM diff --git a/cpu-miner.c b/cpu-miner.c index 254f38f8..fe2aed0e 100644 --- a/cpu-miner.c +++ b/cpu-miner.c @@ -204,6 +204,7 @@ static double lowest_share = 9e99; // lowest accepted share diff static double last_targetdiff = 0.; #if !(defined(__WINDOWS__) || defined(_WIN64) || defined(_WIN32)) static uint32_t hi_temp = 0; +static uint32_t prev_temp = 0; #endif @@ -998,32 +999,67 @@ static struct timeval last_submit_time = {0}; static inline int stats_ptr_incr( int p ) { - return ++p < s_stats_size ? p : 0; + return ++p % s_stats_size; } void report_summary_log( bool force ) { struct timeval now, et, uptime, start_time; - pthread_mutex_lock( &stats_lock ); - gettimeofday( &now, NULL ); timeval_subtract( &et, &now, &five_min_start ); - if ( !( force && ( submit_sum || ( et.tv_sec > 5 ) ) ) - && ( et.tv_sec < 300 ) ) +#if !(defined(__WINDOWS__) || defined(_WIN64) || defined(_WIN32)) + + // Display CPU temperature and clock rate. + int curr_temp = cpu_temp(0); + static struct timeval cpu_temp_time = {0}; + struct timeval diff; + + if ( !opt_quiet || ( curr_temp >= 80 ) ) { - pthread_mutex_unlock( &stats_lock ); - return; + int wait_time = curr_temp >= 90 ? 5 : curr_temp >= 80 ? 30 : + curr_temp >= 70 ? 60 : 120; + timeval_subtract( &diff, &now, &cpu_temp_time ); + if ( ( diff.tv_sec > wait_time ) + || ( ( curr_temp > prev_temp ) && ( curr_temp >= 75 ) ) ) + { + char tempstr[32]; + float lo_freq = 0., hi_freq = 0.; + + memcpy( &cpu_temp_time, &now, sizeof(cpu_temp_time) ); + linux_cpu_hilo_freq( &lo_freq, &hi_freq ); + if ( use_colors && ( curr_temp >= 70 ) ) + { + if ( curr_temp >= 80 ) + sprintf( tempstr, "%s%d C%s", CL_RED, curr_temp, CL_WHT ); + else + sprintf( tempstr, "%s%d C%s", CL_YLW, curr_temp, CL_WHT ); + } + else + sprintf( tempstr, "%d C", curr_temp ); + + applog( LOG_NOTICE,"CPU temp: curr %s max %d, Freq: %.3f/%.3f GHz", + tempstr, hi_temp, lo_freq / 1e6, hi_freq / 1e6 ); + if ( curr_temp > hi_temp ) hi_temp = curr_temp; + prev_temp = curr_temp; + } } + +#endif + + if ( !( force && ( submit_sum || ( et.tv_sec > 5 ) ) ) + && ( et.tv_sec < 300 ) ) + return; // collect and reset periodic counters + pthread_mutex_lock( &stats_lock ); + uint64_t submits = submit_sum; submit_sum = 0; uint64_t accepts = accept_sum; accept_sum = 0; uint64_t rejects = reject_sum; reject_sum = 0; uint64_t stales = stale_sum; stale_sum = 0; uint64_t solved = solved_sum; solved_sum = 0; - memcpy( &start_time, &five_min_start, sizeof start_time ); memcpy( &five_min_start, &now, sizeof now ); @@ -1080,27 +1116,38 @@ void report_summary_log( bool force ) applog2( LOG_INFO,"Submitted %6d %6d", submits, submitted_share_count ); - applog2( LOG_INFO,"Accepted %6d %6d", - accepts, accepted_share_count ); + applog2( LOG_INFO,"Accepted %6d %6d %5.1f%%", + accepts, accepted_share_count, + 100. * accepted_share_count / submitted_share_count ); if ( stale_share_count ) - applog2( LOG_INFO,"Stale %6d %6d", - stales, stale_share_count ); + applog2( LOG_INFO,"Stale %6d %6d %5.1f%%", + stales, stale_share_count, + 100. * stale_share_count / submitted_share_count ); if ( rejected_share_count ) - applog2( LOG_INFO,"Rejected %6d %6d", - rejects, rejected_share_count ); + applog2( LOG_INFO,"Rejected %6d %6d %5.1f%%", + rejects, rejected_share_count, + 100. * rejected_share_count / submitted_share_count ); if ( solved_block_count ) applog2( LOG_INFO,"Blocks Solved %6d %6d", solved, solved_block_count ); applog2( LOG_INFO, "Hi/Lo Share Diff %.5g / %.5g", highest_share, lowest_share ); -} -bool lowdiff_debug = false; + static int64_t no_acks = 0; + if ( no_acks ) + { + no_acks = submitted_share_count + - ( accepted_share_count + stale_share_count + rejected_share_count ); + if ( no_acks ) // 2 consecutive cycles non zero + applog(LOG_WARNING,"Share count mismatch: %d, stats may be incorrect", + no_acks ); + } +} static int share_result( int result, struct work *work, const char *reason ) { - double share_time = 0.; //, share_ratio = 0.; + double share_time = 0.; double hashrate = 0.; int latency = 0; struct share_stats_t my_stats = {0}; @@ -1141,11 +1188,6 @@ static int share_result( int result, struct work *work, sizeof last_submit_time ); } -/* - share_ratio = my_stats.net_diff == 0. ? 0. : my_stats.share_diff / - my_stats.net_diff; -*/ - // check result if ( likely( result ) ) { @@ -2324,6 +2366,8 @@ static void *miner_thread( void *userdata ) pthread_mutex_unlock( &stats_lock ); } + // This code is deprecated, scanhash should never return true. + // This remains as a backup in case some old implementations still exist. // If unsubmiited nonce(s) found, submit now. if ( unlikely( nonce_found && !opt_benchmark ) ) { @@ -2350,48 +2394,6 @@ static void *miner_thread( void *userdata ) } } -#if !(defined(__WINDOWS__) || defined(_WIN64) || defined(_WIN32)) - - // Display CPU temperature and clock rate. - int curr_temp, prev_hi_temp; - static struct timeval cpu_temp_time = {0}; - - pthread_mutex_lock( &stats_lock ); - - prev_hi_temp = hi_temp; - curr_temp = cpu_temp(0); - if ( curr_temp > hi_temp ) hi_temp = curr_temp; - - pthread_mutex_unlock( &stats_lock ); - - if ( !opt_quiet || ( curr_temp >= 80 ) ) - { - int wait_time = curr_temp >= 80 ? 20 : curr_temp >= 70 ? 60 : 120; - timeval_subtract( &diff, &tv_end, &cpu_temp_time ); - if ( ( diff.tv_sec > wait_time ) || ( curr_temp > prev_hi_temp ) ) - { - char tempstr[32]; - float lo_freq = 0., hi_freq = 0.; - - memcpy( &cpu_temp_time, &tv_end, sizeof(cpu_temp_time) ); - linux_cpu_hilo_freq( &lo_freq, &hi_freq ); - if ( use_colors && ( curr_temp >= 70 ) ) - { - if ( curr_temp >= 80 ) - sprintf( tempstr, "%s%d C%s", CL_RED, curr_temp, CL_WHT ); - else - sprintf( tempstr, "%s%d C%s", CL_YLW, curr_temp, CL_WHT ); - } - else - sprintf( tempstr, "%d C", curr_temp ); - - applog( LOG_NOTICE,"CPU temp: curr %s (max %d), Freq: %.3f/%.3f GHz", - tempstr, prev_hi_temp, lo_freq / 1e6, hi_freq / 1e6 ); - } - } - -#endif - // display hashrate if ( unlikely( opt_hash_meter ) ) { diff --git a/miner.h b/miner.h index 119c8a75..234b1cc0 100644 --- a/miner.h +++ b/miner.h @@ -457,9 +457,6 @@ bool stratum_subscribe(struct stratum_ctx *sctx); bool stratum_authorize(struct stratum_ctx *sctx, const char *user, const char *pass); bool stratum_handle_method(struct stratum_ctx *sctx, const char *s); -extern bool lowdiff_debug; - - extern bool aes_ni_supported; extern char *rpc_user; @@ -549,7 +546,7 @@ enum algos { ALGO_LYRA2REV3, ALGO_LYRA2Z, ALGO_LYRA2Z330, - ALGO_M7M, + ALGO_M7M, ALGO_MINOTAUR, ALGO_MYR_GR, ALGO_NEOSCRYPT, diff --git a/simd-utils.h b/simd-utils.h index f8ee35fd..55cc5529 100644 --- a/simd-utils.h +++ b/simd-utils.h @@ -131,7 +131,7 @@ // If a sequence of constants is to be used it can be more efficient to // use arithmetic with already existing constants to generate new ones. // -// ex: const __m512i one = _mm512_const1_64( 1 ); +// ex: const __m512i one = m512_one_64; // const __m512i two = _mm512_add_epi64( one, one ); // ////////////////////////////////////////////////////////////////////////// diff --git a/simd-utils/simd-128.h b/simd-utils/simd-128.h index 8b1fbeba..35be6109 100644 --- a/simd-utils/simd-128.h +++ b/simd-utils/simd-128.h @@ -27,13 +27,15 @@ // All of the utilities here assume all data is in registers except // in rare cases where arguments are pointers. // +// Some constants are generated using a memory overlay on the stack. +// // Intrinsics automatically promote from REX to VEX when AVX is available // but ASM needs to be done manually. // /////////////////////////////////////////////////////////////////////////// -// Efficient and convenient moving bwtween GP & low bits of XMM. +// Efficient and convenient moving between GP & low bits of XMM. // Use VEX when available to give access to xmm8-15 and zero extend for // larger vectors. @@ -81,6 +83,23 @@ static inline uint32_t mm128_mov128_32( const __m128i a ) return n; } +// Equivalent of set1, broadcast integer to all elements. +#define m128_const_i128( i ) mm128_mov64_128( i ) +#define m128_const1_64( i ) _mm_shuffle_epi32( mm128_mov64_128( i ), 0x44 ) +#define m128_const1_32( i ) _mm_shuffle_epi32( mm128_mov32_128( i ), 0x00 ) + +#if defined(__SSE4_1__) + +// Assign 64 bit integers to respective elements: {hi, lo} +#define m128_const_64( hi, lo ) \ + _mm_insert_epi64( mm128_mov64_128( lo ), hi, 1 ) + +#else // No insert in SSE2 + +#define m128_const_64 _mm_set_epi64x + +#endif + // Pseudo constants #define m128_zero _mm_setzero_si128() @@ -107,27 +126,53 @@ static inline __m128i mm128_neg1_fn() } #define m128_neg1 mm128_neg1_fn() +#if defined(__SSE4_1__) -// const functions work best when arguments are immediate constants or -// are known to be in registers. If data needs to loaded from memory or cache -// use set. - -// Equivalent of set1, broadcast 64 bit integer to all elements. -#define m128_const1_64( i ) _mm_shuffle_epi32( mm128_mov64_128( i ), 0x44 ) -#define m128_const1_32( i ) _mm_shuffle_epi32( mm128_mov32_128( i ), 0x00 ) +///////////////////////////// +// +// _mm_insert_ps( _mm128i v1, __m128i v2, imm8 c ) +// +// Fast and powerful but very limited in its application. +// It requires SSE4.1 but only works with 128 bit vectors with 32 bit +// elements. There is no equivalent instruction for 256 bit or 512 bit vectors. +// There's no integer version. There's no 64 bit, 16 bit or byte element +// sizing. It's unique. +// +// It can: +// - zero 32 bit elements of a 128 bit vector. +// - extract any 32 bit element from one 128 bit vector and insert the +// data to any 32 bit element of another 128 bit vector, or the same vector. +// - do both simultaneoulsly. +// +// It can be used as a more efficient replacement for _mm_insert_epi32 +// or _mm_extract_epi32. +// +// Control byte definition: +// c[3:0] zero mask +// c[5:4] destination element selector +// c[7:6] source element selector -#if defined(__SSE4_1__) +// Convert type and abbreviate name: e"x"tract "i"nsert "m"ask +#define mm128_xim_32( v1, v2, c ) \ + _mm_castps_si128( _mm_insert_ps( _mm_castsi128_ps( v1 ), \ + _mm_castsi128_ps( v2 ), c ) ) -// Assign 64 bit integers to respective elements: {hi, lo} -#define m128_const_64( hi, lo ) \ - _mm_insert_epi64( mm128_mov64_128( lo ), hi, 1 ) +// Some examples of simple operations: -#else // No insert in SSE2 +// Insert 32 bit integer into v at element c and return modified v. +static inline __m128i mm128_insert_32( const __m128i v, const uint32_t i, + const int c ) +{ return mm128_xim_32( v, mm128_mov32_128( i ), c<<4 ); } -#define m128_const_64 _mm_set_epi64x +// Extract 32 bit element c from v and return as integer. +static inline uint32_t mm128_extract_32( const __m128i v, const int c ) +{ return mm128_mov128_32( mm128_xim_32( v, v, c<<6 ) ); } -#endif +// Clear (zero) 32 bit elements based on bits set in 4 bit mask. +static inline __m128i mm128_mask_32( const __m128i v, const int m ) +{ return mm128_xim_32( v, v, m ); } +#endif // SSE4_1 // // Basic operations without equivalent SIMD intrinsic @@ -140,11 +185,6 @@ static inline __m128i mm128_neg1_fn() #define mm128_negate_32( v ) _mm_sub_epi32( m128_zero, v ) #define mm128_negate_16( v ) _mm_sub_epi16( m128_zero, v ) -// Clear (zero) 32 bit elements based on bits set in 4 bit mask. -// Fast, avoids using vector mask, but only available for 128 bit vectors. -#define mm128_mask_32( a, mask ) \ - _mm_castps_si128( _mm_insert_ps( _mm_castsi128_ps( a ), \ - _mm_castsi128_ps( a ), mask ) ) // Add 4 values, fewer dependencies than sequential addition. #define mm128_add4_64( a, b, c, d ) \ @@ -162,27 +202,6 @@ static inline __m128i mm128_neg1_fn() #define mm128_xor4( a, b, c, d ) \ _mm_xor_si128( _mm_xor_si128( a, b ), _mm_xor_si128( c, d ) ) -// Horizontal vector testing - -#if defined(__SSE4_1__) - -#define mm128_allbits0( a ) _mm_testz_si128( a, a ) -#define mm128_allbits1( a ) _mm_testc_si128( a, m128_neg1 ) -// probably broken, avx2 is -//#define mm128_allbitsne( a ) _mm_testnzc_si128( a, m128_neg1 ) -#define mm128_anybits0( a ) mm128_allbits1( a ) -#define mm128_anybits1( a ) mm128_allbits0( a ) - -#else // SSE2 - -// Bit-wise test of entire vector, useful to test results of cmp. -#define mm128_anybits0( a ) (uint128_t)(a) -#define mm128_anybits1( a ) (((uint128_t)(a))+1) - -#define mm128_allbits0( a ) ( !mm128_anybits1(a) ) -#define mm128_allbits1( a ) ( !mm128_anybits0(a) ) - -#endif // SSE4.1 else SSE2 // // Vector pointer cast @@ -204,11 +223,6 @@ static inline __m128i mm128_neg1_fn() #define casto_m128i(p,o) (((__m128i*)(p))+(o)) -// Memory functions -// Mostly for convenience, avoids calculating bytes. -// Assumes data is alinged and integral. -// n = number of __m128i, bytes/16 - // Memory functions // Mostly for convenience, avoids calculating bytes. // Assumes data is alinged and integral. @@ -256,14 +270,14 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n ) #define mm128_ror_32 _mm_ror_epi32 #define mm128_rol_32 _mm_rol_epi32 -#else +#else // SSE2 #define mm128_ror_64 mm128_ror_var_64 #define mm128_rol_64 mm128_rol_var_64 #define mm128_ror_32 mm128_ror_var_32 #define mm128_rol_32 mm128_rol_var_32 -#endif // AVX512 else +#endif // AVX512 else SSE2 #define mm128_ror_16( v, c ) \ _mm_or_si128( _mm_srli_epi16( v, c ), _mm_slli_epi16( v, 16-(c) ) ) @@ -280,58 +294,19 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n ) //#define mm128_swap_64( v ) _mm_alignr_epi8( v, v, 8 ) //#define mm128_ror_1x32( v ) _mm_alignr_epi8( v, v, 4 ) //#define mm128_rol_1x32( v ) _mm_alignr_epi8( v, v, 12 ) -#define mm128_ror_1x16( v ) _mm_alignr_epi8( v, v, 2 ) -#define mm128_rol_1x16( v ) _mm_alignr_epi8( v, v, 14 ) -#define mm128_ror_1x8( v ) _mm_alignr_epi8( v, v, 1 ) -#define mm128_rol_1x8( v ) _mm_alignr_epi8( v, v, 15 ) - -// Rotate by c bytes -#define mm128_ror_x8( v, c ) _mm_alignr_epi8( v, c ) -#define mm128_rol_x8( v, c ) _mm_alignr_epi8( v, 16-(c) ) - - -// Invert vector: {3,2,1,0} -> {0,1,2,3} -#define mm128_invert_32( v ) _mm_shuffle_epi32( v, 0x1b ) - -#if defined(__SSSE3__) - -#define mm128_invert_16( v ) \ - _mm_shuffle_epi8( v, mm128_const_64( 0x0100030205040706, \ - 0x09080b0a0d0c0f0e ) -#define mm128_invert_8( v ) \ - _mm_shuffle_epi8( v, mm128_const_64( 0x0001020304050607, \ - 0x08090a0b0c0d0e0f ) - -#endif // SSSE3 - - -// -// Rotate elements within lanes. +// Swap 32 bit elements in 64 bit lanes #define mm128_swap64_32( v ) _mm_shuffle_epi32( v, 0xb1 ) -#define mm128_rol64_8( v, c ) \ - _mm_or_si128( _mm_slli_epi64( v, ( ( (c)<<3 ) ), \ - _mm_srli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) ) - -#define mm128_ror64_8( v, c ) \ - _mm_or_si128( _mm_srli_epi64( v, ( ( (c)<<3 ) ), \ - _mm_slli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) ) - -#define mm128_rol32_8( v, c ) \ - _mm_or_si128( _mm_slli_epi32( v, ( ( (c)<<3 ) ), \ - _mm_srli_epi32( v, ( ( 32 - ( (c)<<3 ) ) ) ) +#if defined(__SSSE3__) -#define mm128_ror32_8( v, c ) \ - _mm_or_si128( _mm_srli_epi32( v, ( ( (c)<<3 ) ), \ - _mm_slli_epi32( v, ( ( 32 - ( (c)<<3 ) ) ) ) - +// Rotate right by c bytes +static inline __m128i mm128_ror_x8( const __m128i v, const int c ) +{ return _mm_alignr_epi8( v, v, c ); } // // Endian byte swap. -#if defined(__SSSE3__) - #define mm128_bswap_64( v ) \ _mm_shuffle_epi8( v, m128_const_64( 0x08090a0b0c0d0e0f, \ 0x0001020304050607 ) ) @@ -374,7 +349,6 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n ) #else // SSE2 -// Use inline function instead of macro due to multiple statements. static inline __m128i mm128_bswap_64( __m128i v ) { v = _mm_or_si128( _mm_slli_epi16( v, 8 ), _mm_srli_epi16( v, 8 ) ); diff --git a/simd-utils/simd-256.h b/simd-utils/simd-256.h index 5f94cbc8..635eb4f2 100644 --- a/simd-utils/simd-256.h +++ b/simd-utils/simd-256.h @@ -15,33 +15,35 @@ // is available. // Move integer to low element of vector, other elements are set to zero. +#define mm256_mov64_256( i ) _mm256_castsi128_si256( mm128_mov64_128( i ) ) +#define mm256_mov32_256( i ) _mm256_castsi128_si256( mm128_mov32_128( i ) ) -#define mm256_mov64_256( n ) _mm256_castsi128_si256( mm128_mov64_128( n ) ) -#define mm256_mov32_256( n ) _mm256_castsi128_si256( mm128_mov32_128( n ) ) - -#define mm256_mov256_64( a ) mm128_mov128_64( _mm256_castsi256_si128( a ) ) -#define mm256_mov256_32( a ) mm128_mov128_32( _mm256_castsi256_si128( a ) ) +// Mo0ve low element of vector to integer. +#define mm256_mov256_64( v ) mm128_mov128_64( _mm256_castsi256_si128( v ) ) +#define mm256_mov256_32( v ) mm128_mov128_32( _mm256_castsi256_si128( v ) ) // concatenate two 128 bit vectors into one 256 bit vector: { hi, lo } #define mm256_concat_128( hi, lo ) \ _mm256_inserti128_si256( _mm256_castsi128_si256( lo ), hi, 1 ) -// Equavalent of set, move 64 bit integer constants to respective 64 bit +// Equivalent of set, move 64 bit integer constants to respective 64 bit // elements. static inline __m256i m256_const_64( const uint64_t i3, const uint64_t i2, const uint64_t i1, const uint64_t i0 ) { - __m128i hi, lo; - lo = mm128_mov64_128( i0 ); - hi = mm128_mov64_128( i2 ); - lo = _mm_insert_epi64( lo, i1, 1 ); - hi = _mm_insert_epi64( hi, i3, 1 ); - return mm256_concat_128( hi, lo ); + union { __m256i m256i; + uint64_t u64[4]; } v; + v.u64[0] = i0; v.u64[1] = i1; v.u64[2] = i2; v.u64[3] = i3; + return v.m256i; } -// Equivalent of set1, broadcast integer constant to all elements. -#define m256_const1_128( v ) _mm256_broadcastsi128_si256( v ) +// Equivalent of set1. +// 128 bit vector argument +#define m256_const1_128( v ) \ + _mm256_permute4x64_epi64( _mm256_castsi128_si256( v ), 0x44 ) +// 64 bit integer argument +#define m256_const1_i128( i ) m256_const1_128( mm128_mov64_128( i ) ) #define m256_const1_64( i ) _mm256_broadcastq_epi64( mm128_mov64_128( i ) ) #define m256_const1_32( i ) _mm256_broadcastd_epi32( mm128_mov32_128( i ) ) #define m256_const1_16( i ) _mm256_broadcastw_epi16( mm128_mov32_128( i ) ) @@ -50,119 +52,29 @@ static inline __m256i m256_const_64( const uint64_t i3, const uint64_t i2, #define m256_const2_64( i1, i0 ) \ m256_const1_128( m128_const_64( i1, i0 ) ) -#define m126_const2_32( i1, i0 ) \ - m256_const1_64( ( (uint64_t)(i1) << 32 ) | ( (uint64_t)(i0) & 0xffffffff ) ) - - // // All SIMD constant macros are actually functions containing executable // code and therefore can't be used as compile time initializers. -#define m256_zero _mm256_setzero_si256() -#define m256_one_256 mm256_mov64_256( 1 ) -#define m256_one_128 \ - _mm256_permute4x64_epi64( _mm256_castsi128_si256( \ - mm128_mov64_128( 1 ) ), 0x44 ) -#define m256_one_64 _mm256_broadcastq_epi64( mm128_mov64_128( 1 ) ) -#define m256_one_32 _mm256_broadcastd_epi32( mm128_mov64_128( 1 ) ) -#define m256_one_16 _mm256_broadcastw_epi16( mm128_mov64_128( 1 ) ) -#define m256_one_8 _mm256_broadcastb_epi8 ( mm128_mov64_128( 1 ) ) +#define m256_zero _mm256_setzero_si256() +#define m256_one_256 mm256_mov64_256( 1 ) +#define m256_one_128 m256_const1_i128( 1 ) +#define m256_one_64 _mm256_broadcastq_epi64( mm128_mov64_128( 1 ) ) +#define m256_one_32 _mm256_broadcastd_epi32( mm128_mov64_128( 1 ) ) +#define m256_one_16 _mm256_broadcastw_epi16( mm128_mov64_128( 1 ) ) +#define m256_one_8 _mm256_broadcastb_epi8 ( mm128_mov64_128( 1 ) ) static inline __m256i mm256_neg1_fn() { - __m256i a; - asm( "vpcmpeqq %0, %0, %0\n\t" : "=x"(a) ); - return a; + __m256i v; + asm( "vpcmpeqq %0, %0, %0\n\t" : "=x"(v) ); + return v; } #define m256_neg1 mm256_neg1_fn() - -// -// Vector size conversion. -// -// Allows operations on either or both halves of a 256 bit vector serially. -// Handy for parallel AES. -// Caveats when writing: -// _mm256_castsi256_si128 is free and without side effects. -// _mm256_castsi128_si256 is also free but leaves the high half -// undefined. That's ok if the hi half will be subseqnently assigned. -// If assigning both, do lo first, If assigning only 1, use -// _mm256_inserti128_si256. -// -#define mm128_extr_lo128_256( a ) _mm256_castsi256_si128( a ) -#define mm128_extr_hi128_256( a ) _mm256_extracti128_si256( a, 1 ) - -// Extract integers from 256 bit vector, ineficient, avoid if possible.. -#define mm256_extr_4x64( a3, a2, a1, a0, src ) \ -do { \ - __m128i hi = _mm256_extracti128_si256( src, 1 ); \ - a0 = mm128_mov128_64( _mm256_castsi256_si128( src) ); \ - a1 = _mm_extract_epi64( _mm256_castsi256_si128( src ), 1 ); \ - a2 = mm128_mov128_64( hi ); \ - a3 = _mm_extract_epi64( hi, 1 ); \ -} while(0) - -#define mm256_extr_8x32( a7, a6, a5, a4, a3, a2, a1, a0, src ) \ -do { \ - uint64_t t = _mm_extract_epi64( _mm256_castsi256_si128( src ), 1 ); \ - __m128i hi = _mm256_extracti128_si256( src, 1 ); \ - a0 = mm256_mov256_32( src ); \ - a1 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 1 ); \ - a2 = (uint32_t)( t ); \ - a3 = (uint32_t)( t<<32 ); \ - t = _mm_extract_epi64( hi, 1 ); \ - a4 = mm128_mov128_32( hi ); \ - a5 = _mm_extract_epi32( hi, 1 ); \ - a6 = (uint32_t)( t ); \ - a7 = (uint32_t)( t<<32 ); \ -} while(0) - - -// Bytewise test of all 256 bits -#define mm256_all0_8( a ) \ - ( _mm256_movemask_epi8( a ) == 0 ) - -#define mm256_all1_8( a ) \ - ( _mm256_movemask_epi8( a ) == -1 ) - - -#define mm256_anybits0( a ) \ - ( _mm256_movemask_epi8( a ) & 0xffffffff ) - -#define mm256_anybits1( a ) \ - ( ( _mm256_movemask_epi8( a ) & 0xffffffff ) != 0xffffffff ) - - -// Bitwise test of all 256 bits -#define mm256_allbits0( a ) _mm256_testc_si256( a, m256_neg1 ) -#define mm256_allbits1( a ) _mm256_testc_si256( m256_zero, a ) -//#define mm256_anybits0( a ) !mm256_allbits1( a ) -//#define mm256_anybits1( a ) !mm256_allbits0( a ) - - -// Parallel AES, for when x is expected to be in a 256 bit register. -// Use same 128 bit key. - -#if defined(__VAES__) - -#define mm256_aesenc_2x128( x, k ) \ - _mm256_aesenc_epi128( x, k ) - -#else - -#define mm256_aesenc_2x128( x, k ) \ - mm256_concat_128( _mm_aesenc_si128( mm128_extr_hi128_256( x ), k ), \ - _mm_aesenc_si128( mm128_extr_lo128_256( x ), k ) ) - -#endif - -#define mm256_paesenc_2x128( y, x, k ) do \ -{ \ - __m128i *X = (__m128i*)x; \ - __m128i *Y = (__m128i*)y; \ - Y[0] = _mm_aesenc_si128( X[0], k ); \ - Y[1] = _mm_aesenc_si128( X[1], k ); \ -} while(0); +// Consistent naming for similar operations. +#define mm128_extr_lo128_256( v ) _mm256_castsi256_si128( v ) +#define mm128_extr_hi128_256( v ) _mm256_extracti128_si256( v, 1 ) // // Pointer casting @@ -201,13 +113,13 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n ) // // Basic operations without SIMD equivalent -// Bitwise not ( ~x ) -#define mm256_not( x ) _mm256_xor_si256( (x), m256_neg1 ) \ +// Bitwise not ( ~v ) +#define mm256_not( v ) _mm256_xor_si256( v, m256_neg1 ) \ -// Unary negation of each element ( -a ) -#define mm256_negate_64( a ) _mm256_sub_epi64( m256_zero, a ) -#define mm256_negate_32( a ) _mm256_sub_epi32( m256_zero, a ) -#define mm256_negate_16( a ) _mm256_sub_epi16( m256_zero, a ) +// Unary negation of each element ( -v ) +#define mm256_negate_64( v ) _mm256_sub_epi64( m256_zero, v ) +#define mm256_negate_32( v ) _mm256_sub_epi32( m256_zero, v ) +#define mm256_negate_16( v ) _mm256_sub_epi16( m256_zero, v ) // Add 4 values, fewer dependencies than sequential addition. @@ -265,17 +177,14 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n ) #define mm256_ror_32 _mm256_ror_epi32 #define mm256_rol_32 _mm256_rol_epi32 -#else - - -// No AVX512, use fallback. +#else // AVX2 #define mm256_ror_64 mm256_ror_var_64 #define mm256_rol_64 mm256_rol_var_64 #define mm256_ror_32 mm256_ror_var_32 #define mm256_rol_32 mm256_rol_var_32 -#endif // AVX512 else +#endif // AVX512 else AVX2 #define mm256_ror_16( v, c ) \ _mm256_or_si256( _mm256_srli_epi16( v, c ), \ @@ -285,46 +194,6 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n ) _mm256_or_si256( _mm256_slli_epi16( v, c ), \ _mm256_srli_epi16( v, 16-(c) ) ) -// Rotate bits in each element of v by the amount in corresponding element of -// index vector c -#define mm256_rorv_64( v, c ) \ - _mm256_or_si256( \ - _mm256_srlv_epi64( v, c ), \ - _mm256_sllv_epi64( v, _mm256_sub_epi64( \ - _mm256_set1_epi64x( 64 ), c ) ) ) - -#define mm256_rolv_64( v, c ) \ - _mm256_or_si256( \ - _mm256_sllv_epi64( v, c ), \ - _mm256_srlv_epi64( v, _mm256_sub_epi64( \ - _mm256_set1_epi64x( 64 ), c ) ) ) - -#define mm256_rorv_32( v, c ) \ - _mm256_or_si256( \ - _mm256_srlv_epi32( v, c ), \ - _mm256_sllv_epi32( v, _mm256_sub_epi32( \ - _mm256_set1_epi32( 32 ), c ) ) ) - -#define mm256_rolv_32( v, c ) \ - _mm256_or_si256( \ - _mm256_sllv_epi32( v, c ), \ - _mm256_srlv_epi32( v, _mm256_sub_epi32( \ - _mm256_set1_epi32( 32 ), c ) ) ) - -// AVX512 can do 16 bit elements. -#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) - -#define mm256_rorv_16( v, c ) \ - _mm256_or_si256( \ - _mm256_srlv_epi16( v, _mm256_set1_epi16( c ) ), \ - _mm256_sllv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) ) - -#define mm256_rolv_16( v, c ) \ - _mm256_or_si256( \ - _mm256_sllv_epi16( v, _mm256_set1_epi16( c ) ), \ - _mm256_srlv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) ) - -#endif // AVX512 // // Rotate elements accross all lanes. @@ -336,13 +205,26 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n ) #if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) -#define mm256_swap_128( v ) _mm256_alignr_epi64( v, v, 2 ) -#define mm256_ror_1x64( v ) _mm256_alignr_epi64( v, v, 1 ) -#define mm256_rol_1x64( v ) _mm256_alignr_epi64( v, v, 3 ) -#define mm256_ror_1x32( v ) _mm256_alignr_epi32( v, v, 1 ) -#define mm256_rol_1x32( v ) _mm256_alignr_epi32( v, v, 7 ) -#define mm256_ror_3x32( v ) _mm256_alignr_epi32( v, v, 3 ) -#define mm256_rol_3x32( v ) _mm256_alignr_epi32( v, v, 5 ) +static inline __m256i mm256_swap_128( const __m256i v ) +{ return _mm256_alignr_epi64( v, v, 2 ); } + +static inline __m256i mm256_ror_1x64( const __m256i v ) +{ return _mm256_alignr_epi64( v, v, 1 ); } + +static inline __m256i mm256_rol_1x64( const __m256i v ) +{ return _mm256_alignr_epi64( v, v, 3 ); } + +static inline __m256i mm256_ror_1x32( const __m256i v ) +{ return _mm256_alignr_epi32( v, v, 1 ); } + +static inline __m256i mm256_rol_1x32( const __m256i v ) +{ return _mm256_alignr_epi32( v, v, 7 ); } + +static inline __m256i mm256_ror_3x32( const __m256i v ) +{ return _mm256_alignr_epi32( v, v, 3 ); } + +static inline __m256i mm256_rol_3x32( const __m256i v ) +{ return _mm256_alignr_epi32( v, v, 5 ); } #else // AVX2 @@ -377,131 +259,18 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n ) #endif // AVX512 else AVX2 - -// AVX512 can do 16 & 8 bit elements. -#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) - -// Rotate 256 bit vector by one 16 bit element. -#define mm256_ror_1x16( v ) \ - _mm256_permutexvar_epi16( m256_const_64( \ - 0x0000000f000e000d, 0x000c000b000a0009, \ - 0x0008000700060005, 0x0004000300020001 ), v ) - -#define mm256_rol_1x16( v ) \ - _mm256_permutexvar_epi16( m256_const_64( \ - 0x000e000d000c000b, 0x000a000900080007, \ - 0x0006000500040003, 0x000200010000000f ), v ) - -#if defined (__AVX512VBMI__) - -// Rotate 256 bit vector by one byte. -#define mm256_ror_1x8( v ) _mm256_permutexvar_epi8( m256_const_64( \ - 0x001f1e1d1c1b1a19, 0x1817161514131211, \ - 0x100f0e0d0c0b0a09, 0x0807060504030201 ), v ) - -#define mm256_rol_1x8( v ) _mm256_permutexvar_epi16( m256_const_64( \ - 0x1e1d1c1b1a191817, 0x161514131211100f, \ - 0x0e0d0c0b0a090807, 0x060504030201001f ), v ) - -#endif // VBMI - -#endif // AVX512 - - -// Invert vector: {3,2,1,0} -> {0,1,2,3} - -#define mm256_invert_64 ( v ) _mm256_permute4x64_epi64( v, 0x1b ) - -#define mm256_invert_32 ( v ) _mm256_permutevar8x32_epi32( v, \ - m256_const_64( 0x0000000000000001, 0x0000000200000003 \ - 0x0000000400000005, 0x0000000600000007 ) - -#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) - -// Invert vector: {7,6,5,4,3,2,1,0} -> {0,1,2,3,4,5,6,7} -#define mm256_invert_16 ( v ) \ - _mm256_permutexvar_epi16( m256_const_64( \ - 0x0000000100020003, 0x0004000500060007, \ - 0x00080009000a000b, 0x000c000d000e000f ), v ) - -#if defined(__AVX512VBMI__) - -#define mm256_invert_8( v ) \ - _mm256_permutexvar_epi8( m256_const_64( \ - 0x0001020304050607, 0x08090a0b0c0d0e0f, \ - 0x1011121314151617, 0x18191a1b1c1d1e1f ), v ) -#endif // VBMI -#endif // AVX512 - - // // Rotate elements within each 128 bit lane of 256 bit vector. -#define mm256_swap128_64( v ) _mm256_shuffle_epi32( v, 0x4e ) - -#define mm256_ror128_32( v ) _mm256_shuffle_epi32( v, 0x39 ) - -#define mm256_rol128_32( v ) _mm256_shuffle_epi32( v, 0x93 ) - -#define mm256_ror128_x8( v, c ) _mm256_alignr_epi8( v, v, c ) - -/* -// Rotate each 128 bit lane by c elements. -#define mm256_ror128_8( v, c ) \ - _mm256_or_si256( _mm256_bsrli_epi128( v, c ), \ - _mm256_bslli_epi128( v, 16-(c) ) ) -#define mm256_rol128_8( v, c ) \ - _mm256_or_si256( _mm256_bslli_epi128( v, c ), \ - _mm256_bsrli_epi128( v, 16-(c) ) ) -*/ - -// Rotate elements in each 64 bit lane - -#define mm256_swap64_32( v ) _mm256_shuffle_epi32( v, 0xb1 ) - -#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) - -#define mm256_rol64_8( v, c ) _mm256_rol_epi64( v, ((c)<<3) ) -#define mm256_ror64_8( v, c ) _mm256_ror_epi64( v, ((c)<<3) ) - -#else - -#define mm256_rol64_8( v, c ) \ - _mm256_or_si256( _mm256_slli_epi64( v, ( ( (c)<<3 ) ), \ - _mm256_srli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) ) - -#define mm256_ror64_8( v, c ) \ - _mm256_or_si256( _mm256_srli_epi64( v, ( ( (c)<<3 ) ), \ - _mm256_slli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) ) - -#endif - - -// Rotate elements in each 32 bit lane - -#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) - -#define mm256_swap32_16( v ) _mm256_rol_epi32( v, 16 ) - -#define mm256_rol32_8( v ) _mm256_rol_epi32( v, 8 ) -#define mm256_ror32_8( v ) _mm256_ror_epi32( v, 8 ) - -#else - -#define mm256_swap32_16( v ) \ - _mm256_or_si256( _mm256_slli_epi32( v, 16 ), \ - _mm256_srli_epi32( v, 16 ) ) - -#define mm256_rol32_8( v ) \ - _mm256_or_si256( _mm256_slli_epi32( v, 8 ), \ - _mm256_srli_epi32( v, 8 ) ) - -#define mm256_ror32_8( v, c ) \ - _mm256_or_si256( _mm256_srli_epi32( v, 8 ), \ - _mm256_slli_epi32( v, 8 ) ) +#define mm256_swap128_64( v ) _mm256_shuffle_epi32( v, 0x4e ) +#define mm256_ror128_32( v ) _mm256_shuffle_epi32( v, 0x39 ) +#define mm256_rol128_32( v ) _mm256_shuffle_epi32( v, 0x93 ) -#endif +static inline __m256i mm256_ror128_x8( const __m256i v, const int c ) +{ return _mm256_alignr_epi8( v, v, c ); } +// Swap 32 bit elements in each 64 bit lane. +#define mm256_swap64_32( v ) _mm256_shuffle_epi32( v, 0xb1 ) // // Swap bytes in vector elements, endian bswap. diff --git a/simd-utils/simd-512.h b/simd-utils/simd-512.h index a13e88f4..22c5331a 100644 --- a/simd-utils/simd-512.h +++ b/simd-utils/simd-512.h @@ -26,9 +26,6 @@ // _mm512_permutex_epi64 only shuffles within 256 bit lanes. Permute // usually shuffles accross all lanes. // -// Some instructions like cmp and blend use a mask regsiter now instead -// a mask vector. -// // permutexvar has args reversed, index is first arg. Previously all // permutes and shuffles have the index last. // @@ -85,52 +82,43 @@ #define mm512_mov256_64( a ) mm128_mov128_64( _mm256_castsi512_si128( a ) ) #define mm512_mov256_32( a ) mm128_mov128_32( _mm256_castsi512_si128( a ) ) - -// Insert and extract integers is a multistage operation. -// Insert integer into __m128i, then insert __m128i to __m256i, finally -// insert __256i into __m512i. Reverse the order for extract. -// Do not use __m512_insert_epi64 or _mm256_insert_epi64 to perform multiple -// inserts. -// Avoid small integers for multiple inserts. -// Shortcuts: -// Use castsi to reference the low bits of a vector or sub-vector. (free) -// Use mov to insert integer into low bits of vector or sub-vector. (cheap) -// Use _mm_insert only to reference the high bits of __m128i. (expensive) -// Sequence instructions to minimize data dependencies. -// Use const or const1 only when integer is either immediate or known to be in -// a GP register. Use set/set1 when data needs to be loaded from memory or -// cache. +// A simple 128 bit permute, using function instead of macro avoids +// problems if the v arg passed as an expression. +static inline __m512i mm512_perm_128( const __m512i v, const int c ) +{ return _mm512_shuffle_i64x2( v, v, c ); } // Concatenate two 256 bit vectors into one 512 bit vector {hi, lo} #define mm512_concat_256( hi, lo ) \ _mm512_inserti64x4( _mm512_castsi256_si512( lo ), hi, 1 ) // Equivalent of set, assign 64 bit integers to respective 64 bit elements. +// Use stack memory overlay static inline __m512i m512_const_64( const uint64_t i7, const uint64_t i6, const uint64_t i5, const uint64_t i4, const uint64_t i3, const uint64_t i2, const uint64_t i1, const uint64_t i0 ) { - __m256i hi, lo; - __m128i hi1, lo1; - lo = mm256_mov64_256( i0 ); - lo1 = mm128_mov64_128( i2 ); - hi = mm256_mov64_256( i4 ); - hi1 = mm128_mov64_128( i6 ); - lo = _mm256_castsi128_si256( - _mm_insert_epi64( _mm256_castsi256_si128( lo ), i1, 1 ) ); - lo1 = _mm_insert_epi64( lo1, i3, 1 ); - hi = _mm256_castsi128_si256( - _mm_insert_epi64( _mm256_castsi256_si128( hi ), i5, 1 ) ); - hi1 = _mm_insert_epi64( hi1, i7, 1 ); - lo = _mm256_inserti128_si256( lo, lo1, 1 ); - hi = _mm256_inserti128_si256( hi, hi1, 1 ); - return mm512_concat_256( hi, lo ); + union { __m512i m512i; + uint64_t u64[8]; } v; + v.u64[0] = i0; v.u64[1] = i1; + v.u64[2] = i2; v.u64[3] = i3; + v.u64[4] = i4; v.u64[5] = i5; + v.u64[6] = i6; v.u64[7] = i7; + return v.m512i; } -// Equivalent of set1, broadcast 64 bit constant to all 64 bit elements. -#define m512_const1_256( v ) _mm512_broadcast_i64x4( v ) -#define m512_const1_128( v ) _mm512_broadcast_i64x2( v ) +// Equivalent of set1, broadcast lo element all elements. +static inline __m512i m512_const1_256( const __m256i v ) +{ return _mm512_inserti64x4( _mm512_castsi256_si512( v ), v, 1 ); } + +#define m512_const1_128( v ) \ + mm512_perm_128( _mm512_castsi128_si512( v ), 0 ) +// Integer input argument up to 64 bits +#define m512_const1_i128( i ) \ + mm512_perm_128( _mm512_castsi128_si512( mm128_mov64_128( i ) ), 0 ) + +//#define m512_const1_256( v ) _mm512_broadcast_i64x4( v ) +//#define m512_const1_128( v ) _mm512_broadcast_i64x2( v ) #define m512_const1_64( i ) _mm512_broadcastq_epi64( mm128_mov64_128( i ) ) #define m512_const1_32( i ) _mm512_broadcastd_epi32( mm128_mov32_128( i ) ) #define m512_const1_16( i ) _mm512_broadcastw_epi16( mm128_mov32_128( i ) ) @@ -142,23 +130,17 @@ static inline __m512i m512_const_64( const uint64_t i7, const uint64_t i6, #define m512_const2_64( i1, i0 ) \ m512_const1_128( m128_const_64( i1, i0 ) ) -#define m512_const2_32( i1, i0 ) \ - m512_const1_64( ( (uint64_t)(i1) << 32 ) | ( (uint64_t)(i0) & 0xffffffff ) ) - -// { m128_1, m128_1, m128_0, m128_0 } -#define m512_const_2x128( v1, v0 ) \ - m512_mask_blend_epi64( 0x0f, m512_const1_128( v1 ), m512_const1_128( v0 ) ) static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2, const uint64_t i1, const uint64_t i0 ) { - __m256i lo = mm256_mov64_256( i0 ); - __m128i hi = mm128_mov64_128( i2 ); - lo = _mm256_castsi128_si256( - _mm_insert_epi64( _mm256_castsi256_si128( - lo ), i1, 1 ) ); - hi = _mm_insert_epi64( hi, i3, 1 ); - return _mm512_broadcast_i64x4( _mm256_inserti128_si256( lo, hi, 1 ) ); + union { __m512i m512i; + uint64_t u64[8]; } v; + v.u64[0] = v.u64[4] = i0; + v.u64[1] = v.u64[5] = i1; + v.u64[2] = v.u64[6] = i2; + v.u64[3] = v.u64[7] = i3; + return v.m512i; } // @@ -170,14 +152,15 @@ static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2, #define m512_zero _mm512_setzero_si512() #define m512_one_512 mm512_mov64_512( 1 ) -#define m512_one_256 _mm512_broadcast_i64x4 ( mm256_mov64_256( 1 ) ) -#define m512_one_128 _mm512_broadcast_i64x2 ( mm128_mov64_128( 1 ) ) -#define m512_one_64 _mm512_broadcastq_epi64( mm128_mov64_128( 1 ) ) -#define m512_one_32 _mm512_broadcastd_epi32( mm128_mov64_128( 1 ) ) -#define m512_one_16 _mm512_broadcastw_epi16( mm128_mov64_128( 1 ) ) -#define m512_one_8 _mm512_broadcastb_epi8 ( mm128_mov64_128( 1 ) ) +#define m512_one_256 _mm512_inserti64x4( m512_one_512, m256_one_256, 1 ) +#define m512_one_128 m512_const1_i128( 1 ) +#define m512_one_64 m512_const1_64( 1 ) +#define m512_one_32 m512_const1_32( 1 ) +#define m512_one_16 m512_const1_16( 1 ) +#define m512_one_8 m512_const1_8( 1 ) -#define m512_neg1 m512_const1_64( 0xffffffffffffffff ) +//#define m512_neg1 m512_const1_64( 0xffffffffffffffff ) +#define m512_neg1 _mm512_movm_epi64( 0xff ) // // Basic operations without SIMD equivalent @@ -242,15 +225,6 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n ) _mm512_xor_si512( _mm512_xor_si512( a, b ), _mm512_xor_si512( c, d ) ) - -// Horizontal vector testing -// Returns bit __mmask8 -#define mm512_allbits0( a ) _mm512_cmpeq_epi64_mask( a, m512_zero ) -#define mm512_allbits1( a ) _mm512_cmpeq_epi64_mask( a, m512_neg1 ) -#define mm512_anybits0( a ) _mm512_cmpneq_epi64_mask( a, m512_neg1 ) -#define mm512_anybits1( a ) _mm512_cmpneq_epi64_mask( a, m512_zero ) - - // // Bit rotations. @@ -262,37 +236,47 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n ) // _mm512_rolv_epi64, _mm512_rorv_epi64, _mm512_rolv_epi32, _mm512_rorv_epi32 // +// For convenience and consistency with AVX2 #define mm512_ror_64 _mm512_ror_epi64 #define mm512_rol_64 _mm512_rol_epi64 #define mm512_ror_32 _mm512_ror_epi32 #define mm512_rol_32 _mm512_rol_epi32 -#define mm512_ror_var_64( v, c ) \ - _mm512_or_si512( _mm512_srli_epi64( v, c ), \ - _mm512_slli_epi64( v, 64-(c) ) ) - -#define mm512_rol_var_64( v, c ) \ - _mm512_or_si512( _mm512_slli_epi64( v, c ), \ - _mm512_srli_epi64( v, 64-(c) ) ) - -#define mm512_ror_var_32( v, c ) \ - _mm512_or_si512( _mm512_srli_epi32( v, c ), \ - _mm512_slli_epi32( v, 32-(c) ) ) +static inline __m512i mm512_ror_var_64( const __m512i v, const int c ) +{ + return _mm512_or_si512( _mm512_srli_epi64( v, c ), + _mm512_slli_epi64( v, 64-c ) ); +} -#define mm512_rol_var_32( v, c ) \ - _mm512_or_si512( _mm512_slli_epi32( v, c ), \ - _mm512_srli_epi32( v, 32-(c) ) ) +static inline __m512i mm512_rol_var_64( const __m512i v, const int c ) +{ + return _mm512_or_si512( _mm512_slli_epi64( v, c ), + _mm512_srli_epi64( v, 64-c ) ); +} +static inline __m512i mm512_ror_var_32( const __m512i v, const int c ) +{ + return _mm512_or_si512( _mm512_srli_epi32( v, c ), + _mm512_slli_epi32( v, 32-c ) ); +} -// Here is a fixed bit rotate for 16 bit elements: -#define mm512_ror_16( v, c ) \ - _mm512_or_si512( _mm512_srli_epi16( v, c ), \ - _mm512_slli_epi16( v, 16-(c) ) -#define mm512_rol_16( v, c ) \ - _mm512_or_si512( _mm512_slli_epi16( v, c ), \ - _mm512_srli_epi16( v, 16-(c) ) +static inline __m512i mm512_rol_var_32( const __m512i v, const int c ) +{ + return _mm512_or_si512( _mm512_slli_epi32( v, c ), + _mm512_srli_epi32( v, 32-c ) ); +} +static inline __m512i mm512_ror_16( __m512i const v, const int c ) +{ + return _mm512_or_si512( _mm512_srli_epi16( v, c ), + _mm512_slli_epi16( v, 16-c ) ); +} +static inline __m512i mm512_rol_16( const __m512i v, const int c ) +{ + return _mm512_or_si512( _mm512_slli_epi16( v, c ), + _mm512_srli_epi16( v, 16-c ) ); +} // Rotations using a vector control index are very slow due to overhead // to generate the index vector. Repeated rotations using the same index @@ -363,25 +347,32 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n ) // // Rotate elements in 512 bit vector. +static inline __m512i mm512_swap_256( const __m512i v ) +{ return _mm512_alignr_epi64( v, v, 4 ); } + +static inline __m512i mm512_ror_1x128( const __m512i v ) +{ return _mm512_alignr_epi64( v, v, 2 ); } + +static inline __m512i mm512_rol_1x128( const __m512i v ) +{ return _mm512_alignr_epi64( v, v, 6 ); } -#define mm512_swap_256( v ) _mm512_alignr_epi64( v, v, 4 ) +static inline __m512i mm512_ror_1x64( const __m512i v ) +{ return _mm512_alignr_epi64( v, v, 1 ); } -// 1x64 notation used to disinguish from bit rotation. -#define mm512_ror_1x128( v ) _mm512_alignr_epi64( v, v, 2 ) -#define mm512_rol_1x128( v ) _mm512_alignr_epi64( v, v, 6 ) +static inline __m512i mm512_rol_1x64( const __m512i v ) +{ return _mm512_alignr_epi64( v, v, 7 ); } -#define mm512_ror_1x64( v ) _mm512_alignr_epi64( v, v, 1 ) -#define mm512_rol_1x64( v ) _mm512_alignr_epi64( v, v, 7 ) +static inline __m512i mm512_ror_1x32( const __m512i v ) +{ return _mm512_alignr_epi32( v, v, 1 ); } -#define mm512_ror_1x32( v ) _mm512_alignr_epi32( v, v, 1 ) -#define mm512_rol_1x32( v ) _mm512_alignr_epi32( v, v, 15 ) +static inline __m512i mm512_rol_1x32( const __m512i v ) +{ return _mm512_alignr_epi32( v, v, 15 ); } -// Generic for odd rotations -#define mm512_ror_x64( v, n ) _mm512_alignr_epi64( v, v, n ) -#define mm512_rol_x64( v, n ) _mm512_alignr_epi64( v, v, 8-(n) ) +static inline __m512i mm512_ror_x64( const __m512i v, const int n ) +{ return _mm512_alignr_epi64( v, v, n ); } -#define mm512_ror_x32( v, n ) _mm512_alignr_epi32( v, v, n ) -#define mm512_rol_x32( v, n ) _mm512_alignr_epi32( v, v, 16-(n) ) +static inline __m512i mm512_ror_x32( const __m512i v, const int n ) +{ return _mm512_alignr_epi32( v, v, n ); } #define mm512_ror_1x16( v ) \ _mm512_permutexvar_epi16( m512_const_64( \ @@ -411,38 +402,6 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n ) 0x1E1D1C1B1A191817, 0x161514131211100F, \ 0x0E0D0C0B0A090807, 0x060504030201003F ) ) - -// Invert vector: {3,2,1,0} -> {0,1,2,3} -#define mm512_invert_256( v ) \ - _mm512_permutexvar_epi64( v, m512_const_64( 3,2,1,0,7,6,5,4 ) ) - -#define mm512_invert_128( v ) \ - _mm512_permutexvar_epi64( v, m512_const_64( 1,0,3,2,5,4,7,6 ) ) - -#define mm512_invert_64( v ) \ - _mm512_permutexvar_epi64( v, m512_const_64( 0,1,2,3,4,5,6,7 ) ) - -#define mm512_invert_32( v ) \ - _mm512_permutexvar_epi32( m512_const_64( \ - 0x0000000000000001,0x0000000200000003, \ - 0x0000000400000005,0x0000000600000007, \ - 0x0000000800000009,0x0000000a0000000b, \ - 0x0000000c0000000d,0x0000000e0000000f ), v ) - -#define mm512_invert_16( v ) \ - _mm512_permutexvar_epi16( m512_const_64( \ - 0x0000000100020003, 0x0004000500060007, \ - 0x00080009000A000B, 0x000C000D000E000F, \ - 0x0010001100120013, 0x0014001500160017, \ - 0x00180019001A001B, 0x001C001D001E001F ), v ) - -#define mm512_invert_8( v ) \ - _mm512_shuffle_epi8( v, m512_const_64( \ - 0x0001020304050607, 0x08090A0B0C0D0E0F, \ - 0x1011121314151617, 0x18191A1B1C1D1E1F, \ - 0x2021222324252627, 0x28292A2B2C2D2E2F, \ - 0x3031323334353637, 0x38393A3B3C3D3E3F ) ) - // // Rotate elements within 256 bit lanes of 512 bit vector. @@ -450,11 +409,10 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n ) #define mm512_swap256_128( v ) _mm512_permutex_epi64( v, 0x4e ) // Rotate 256 bit lanes by one 64 bit element -#define mm512_ror256_64( v ) _mm512_permutex_epi64( v, 0x39 ) -#define mm512_rol256_64( v ) _mm512_permutex_epi64( v, 0x93 ) +#define mm512_ror256_64( v ) _mm512_permutex_epi64( v, 0x39 ) +#define mm512_rol256_64( v ) _mm512_permutex_epi64( v, 0x93 ) // Rotate 256 bit lanes by one 32 bit element - #define mm512_ror256_32( v ) \ _mm512_permutexvar_epi32( m512_const_64( \ 0x000000080000000f, 0x0000000e0000000d, \ @@ -488,68 +446,41 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n ) 0x203f3e3d3c3b3a39, 0x3837363534333231, \ 0x302f2e2d2c2b2a29, 0x2827262524232221, \ 0x001f1e1d1c1b1a19, 0x1817161514131211, \ - 0x100f0e0d0c0b0a09, 0x0807060504030201 ), v ) + 0x100f0e0d0c0b0a09, 0x0807060504030201 ) ) #define mm512_rol256_8( v ) \ _mm512_shuffle_epi8( v, m512_const_64( \ 0x3e3d3c3b3a393837, 0x363534333231302f, \ 0x2e2d2c2b2a292827, 0x262524232221203f, \ 0x1e1d1c1b1a191817, 0x161514131211100f, \ - 0x0e0d0c0b0a090807, 0x060504030201001f ), v ) + 0x0e0d0c0b0a090807, 0x060504030201001f ) ) // // Rotate elements within 128 bit lanes of 512 bit vector. -// Swap hi & lo 64 bits in each 128 bit lane -#define mm512_swap128_64( v ) _mm512_shuffle_epi32( v, 0x4e ) +// Swap 64 bits in each 128 bit lane +#define mm512_swap128_64( v ) _mm512_shuffle_epi32( v, 0x4e ) // Rotate 128 bit lanes by one 32 bit element -#define mm512_ror128_32( v ) _mm512_shuffle_epi32( v, 0x39 ) -#define mm512_rol128_32( v ) _mm512_shuffle_epi32( v, 0x93 ) - -#define mm512_ror128_x8( v, c ) _mm512_alignr_epi8( v, v, c ) - -/* -// Rotate 128 bit lanes by c bytes, faster than building that monstrous -// constant above. -#define mm512_ror128_8( v, c ) \ - _mm512_or_si512( _mm512_bsrli_epi128( v, c ), \ - _mm512_bslli_epi128( v, 16-(c) ) ) -#define mm512_rol128_8( v, c ) \ - _mm512_or_si512( _mm512_bslli_epi128( v, c ), \ - _mm512_bsrli_epi128( v, 16-(c) ) ) -*/ - -// -// Rotate elements within 64 bit lanes. - -#define mm512_rol64_x8( v, c ) _mm512_rol_epi64( v, ((c)<<3) ) -#define mm512_ror64_x8( v, c ) _mm512_ror_epi64( v, ((c)<<3) ) - -// Swap 32 bit elements in each 64 bit lane -#define mm512_swap64_32( v ) _mm512_shuffle_epi32( v, 0xb1 ) +#define mm512_ror128_32( v ) _mm512_shuffle_epi32( v, 0x39 ) +#define mm512_rol128_32( v ) _mm512_shuffle_epi32( v, 0x93 ) -// Rotate each 64 bit lane by one 16 bit element. -#define mm512_ror64_16( v ) _mm512_ror_epi64( v, 16 ) -#define mm512_rol64_16( v ) _mm512_rol_epi64( v, 16 ) -#define mm512_ror64_8( v ) _mm512_ror_epi64( v, 8 ) -#define mm512_rol64_8( v ) _mm512_rol_epi64( v, 8 ) - -// -// Rotate elements within 32 bit lanes. +// Rotate right 128 bit lanes by c bytes +static inline __m512i mm512_ror128_x8( const __m512i v, const int c ) +{ return _mm512_alignr_epi8( v, v, c ); } -#define mm512_rol32_x8( v, c ) _mm512_rol_epi32( v, ((c)<<2) ) -#define mm512_ror32_x8( v, c ) _mm512_ror_epi32( v, ((c)<<2) ) +// Swap 32 bits in each 64 bit lane. +#define mm512_swap64_32( v ) _mm512_shuffle_epi32( v, 0xb1 ) // // Rotate elements from 2 512 bit vectors in place, source arguments // are overwritten. -#define mm512_swap1024_512(v1, v2) \ - v1 = _mm512_xor_si512(v1, v2); \ - v2 = _mm512_xor_si512(v1, v2); \ - v1 = _mm512_xor_si512(v1, v2); +#define mm512_swap1024_512( v1, v2 ) \ + v1 = _mm512_xor_si512( v1, v2 ); \ + v2 = _mm512_xor_si512( v1, v2 ); \ + v1 = _mm512_xor_si512( v1, v2 ); #define mm512_ror1024_256( v1, v2 ) \ do { \ diff --git a/simd-utils/simd-64.h b/simd-utils/simd-64.h index 2f50ec1a..e74066b6 100644 --- a/simd-utils/simd-64.h +++ b/simd-utils/simd-64.h @@ -1,18 +1,18 @@ #if !defined(SIMD_64_H__) #define SIMD_64_H__ 1 -#if defined(__MMX__) +#if defined(__MMX__) && defined(__SSE__) //////////////////////////////////////////////////////////////// // // 64 bit MMX vectors. // -// There are rumours MMX wil be removed. Although casting with int64 -// works there is likely some overhead to move the data to An MMX register -// and back. - +// This code is not used anywhere annd likely never will. It's intent was +// to support 2 way parallel hashing using SSE2 for 64 bit, and MMX for 32 +// bit hash functions, but was never implemented. // Pseudo constants + /* #define m64_zero _mm_setzero_si64() #define m64_one_64 _mm_set_pi32( 0UL, 1UL ) @@ -30,79 +30,67 @@ #define casti_m64(p,i) (((__m64*)(p))[(i)]) -// cast all arguments as the're likely to be uint64_t - // Bitwise not: ~(a) //#define mm64_not( a ) _mm_xor_si64( (__m64)a, m64_neg1 ) #define mm64_not( a ) ( (__m64)( ~( (uint64_t)(a) ) ) // Unary negate elements -#define mm64_negate_32( v ) _mm_sub_pi32( m64_zero, (__m64)v ) -#define mm64_negate_16( v ) _mm_sub_pi16( m64_zero, (__m64)v ) -#define mm64_negate_8( v ) _mm_sub_pi8( m64_zero, (__m64)v ) +#define mm64_negate_32( v ) _mm_sub_pi32( m64_zero, v ) +#define mm64_negate_16( v ) _mm_sub_pi16( m64_zero, v ) +#define mm64_negate_8( v ) _mm_sub_pi8( m64_zero, v ) // Rotate bits in packed elements of 64 bit vector #define mm64_rol_64( a, n ) \ - _mm_or_si64( _mm_slli_si64( (__m64)(a), n ), \ - _mm_srli_si64( (__m64)(a), 64-(n) ) ) + _mm_or_si64( _mm_slli_si64( a, n ), \ + _mm_srli_si64( a, 64-(n) ) ) #define mm64_ror_64( a, n ) \ - _mm_or_si64( _mm_srli_si64( (__m64)(a), n ), \ - _mm_slli_si64( (__m64)(a), 64-(n) ) ) + _mm_or_si64( _mm_srli_si64( a, n ), \ + _mm_slli_si64( a, 64-(n) ) ) #define mm64_rol_32( a, n ) \ - _mm_or_si64( _mm_slli_pi32( (__m64)(a), n ), \ - _mm_srli_pi32( (__m64)(a), 32-(n) ) ) + _mm_or_si64( _mm_slli_pi32( a, n ), \ + _mm_srli_pi32( a, 32-(n) ) ) #define mm64_ror_32( a, n ) \ - _mm_or_si64( _mm_srli_pi32( (__m64)(a), n ), \ - _mm_slli_pi32( (__m64)(a), 32-(n) ) ) + _mm_or_si64( _mm_srli_pi32( a, n ), \ + _mm_slli_pi32( a, 32-(n) ) ) #define mm64_rol_16( a, n ) \ - _mm_or_si64( _mm_slli_pi16( (__m64)(a), n ), \ - _mm_srli_pi16( (__m64)(a), 16-(n) ) ) + _mm_or_si64( _mm_slli_pi16( a, n ), \ + _mm_srli_pi16( a, 16-(n) ) ) #define mm64_ror_16( a, n ) \ - _mm_or_si64( _mm_srli_pi16( (__m64)(a), n ), \ - _mm_slli_pi16( (__m64)(a), 16-(n) ) ) + _mm_or_si64( _mm_srli_pi16( a, n ), \ + _mm_slli_pi16( a, 16-(n) ) ) // Rotate packed elements accross lanes. Useful for byte swap and byte // rotation. -// _mm_shuffle_pi8 requires SSSE3 while _mm_shuffle_pi16 requires SSE -// even though these are MMX instructions. - // Swap hi & lo 32 bits. -#define mm64_swap32( a ) _mm_shuffle_pi16( (__m64)(a), 0x4e ) +#define mm64_swap_32( a ) _mm_shuffle_pi16( a, 0x4e ) -#define mm64_ror1x16_64( a ) _mm_shuffle_pi16( (__m64)(a), 0x39 ) -#define mm64_rol1x16_64( a ) _mm_shuffle_pi16( (__m64)(a), 0x93 ) +#define mm64_ror64_1x16( a ) _mm_shuffle_pi16( a, 0x39 ) +#define mm64_rol64_1x16( a ) _mm_shuffle_pi16( a, 0x93 ) // Swap hi & lo 16 bits of each 32 bit element -#define mm64_swap16_32( a ) _mm_shuffle_pi16( (__m64)(a), 0xb1 ) +#define mm64_swap32_16( a ) _mm_shuffle_pi16( a, 0xb1 ) #if defined(__SSSE3__) // Endian byte swap packed elements -// A vectorized version of the u64 bswap, use when data already in MMX reg. -#define mm64_bswap_64( v ) \ - _mm_shuffle_pi8( (__m64)v, (__m64)0x0001020304050607 ) - #define mm64_bswap_32( v ) \ - _mm_shuffle_pi8( (__m64)v, (__m64)0x0405060700010203 ) + _mm_shuffle_pi8( v, (__m64)0x0405060700010203 ) #define mm64_bswap_16( v ) \ - _mm_shuffle_pi8( (__m64)v, (__m64)0x0607040502030001 ); + _mm_shuffle_pi8( v, (__m64)0x0607040502030001 ); -#else +// Rotate right by c bytes +static inline __m64 mm64_ror_x8( __m64 v, const int c ) +{ return _mm_alignr_pi8( v, v, c ); } -#define mm64_bswap_64( v ) \ - (__m64)__builtin_bswap64( (uint64_t)v ) +#else -// These exist only for compatibility with CPUs without SSSE3. MMX doesn't -// have extract 32 instruction so pointers are needed to access elements. -// It' more efficient for the caller to use scalar variables and call -// bswap_32 directly. #define mm64_bswap_32( v ) \ _mm_set_pi32( __builtin_bswap32( ((uint32_t*)&v)[1] ), \ __builtin_bswap32( ((uint32_t*)&v)[0] ) ) @@ -115,17 +103,6 @@ #endif -// 64 bit mem functions use integral sizes instead of bytes, data must -// be aligned to 64 bits. -static inline void memcpy_m64( __m64 *dst, const __m64 *src, int n ) -{ for ( int i = 0; i < n; i++ ) dst[i] = src[i]; } - -static inline void memset_zero_m64( __m64 *src, int n ) -{ for ( int i = 0; i < n; i++ ) src[i] = (__m64)0ULL; } - -static inline void memset_m64( __m64 *dst, const __m64 a, int n ) -{ for ( int i = 0; i < n; i++ ) dst[i] = a; } - #endif // MMX #endif // SIMD_64_H__ diff --git a/simd-utils/simd-int.h b/simd-utils/simd-int.h index 711134c8..5fff450f 100644 --- a/simd-utils/simd-int.h +++ b/simd-utils/simd-int.h @@ -1,69 +1,16 @@ #if !defined(SIMD_INT_H__) #define SIMD_INT_H__ 1 -/////////////////////////////////// -// -// Integers up to 128 bits. -// -// These utilities enhance support for integers up to 128 bits. -// All standard operations are supported on 128 bit integers except -// numeric constant representation and IO. 128 bit integers must be built -// and displayed as 2 64 bit halves, just like the old times. -// -// Some utilities are also provided for smaller integers, most notably -// bit rotation. - - - -// MMX has no extract instruction for 32 bit elements so this: -// Lo is trivial, high is a simple shift. -// Input may be uint64_t or __m64, returns uint32_t. -#define u64_extr_lo32(a) ( (uint32_t)( (uint64_t)(a) ) ) -#define u64_extr_hi32(a) ( (uint32_t)( ((uint64_t)(a)) >> 32) ) - -#define u64_extr_32( a, n ) ( (uint32_t)( (a) >> ( ( 2-(n)) <<5 ) ) ) -#define u64_extr_16( a, n ) ( (uint16_t)( (a) >> ( ( 4-(n)) <<4 ) ) ) -#define u64_extr_8( a, n ) ( (uint8_t) ( (a) >> ( ( 8-(n)) <<3 ) ) ) - -// Rotate bits in various sized integers. -#define u64_ror_64( x, c ) \ - (uint64_t)( ( (uint64_t)(x) >> (c) ) | ( (uint64_t)(x) << (64-(c)) ) ) -#define u64_rol_64( x, c ) \ - (uint64_t)( ( (uint64_t)(x) << (c) ) | ( (uint64_t)(x) >> (64-(c)) ) ) -#define u32_ror_32( x, c ) \ - (uint32_t)( ( (uint32_t)(x) >> (c) ) | ( (uint32_t)(x) << (32-(c)) ) ) -#define u32_rol_32( x, c ) \ - (uint32_t)( ( (uint32_t)(x) << (c) ) | ( (uint32_t)(x) >> (32-(c)) ) ) -#define u16_ror_16( x, c ) \ - (uint16_t)( ( (uint16_t)(x) >> (c) ) | ( (uint16_t)(x) << (16-(c)) ) ) -#define u16_rol_16( x, c ) \ - (uint16_t)( ( (uint16_t)(x) << (c) ) | ( (uint16_t)(x) >> (16-(c)) ) ) -#define u8_ror_8( x, c ) \ - (uint8_t) ( ( (uint8_t) (x) >> (c) ) | ( (uint8_t) (x) << ( 8-(c)) ) ) -#define u8_rol_8( x, c ) \ - (uint8_t) ( ( (uint8_t) (x) << (c) ) | ( (uint8_t) (x) >> ( 8-(c)) ) ) - // Endian byte swap #define bswap_64( a ) __builtin_bswap64( a ) #define bswap_32( a ) __builtin_bswap32( a ) -// 64 bit mem functions use integral sizes instead of bytes, data must -// be aligned to 64 bits. Mostly for scaled indexing convenience. -static inline void memcpy_64( uint64_t *dst, const uint64_t *src, int n ) -{ for ( int i = 0; i < n; i++ ) dst[i] = src[i]; } - -static inline void memset_zero_64( uint64_t *src, int n ) -{ for ( int i = 0; i < n; i++ ) src[i] = 0ull; } - -static inline void memset_64( uint64_t *dst, const uint64_t a, int n ) -{ for ( int i = 0; i < n; i++ ) dst[i] = a; } - /////////////////////////////////////// // // 128 bit integers // -// 128 bit integers are inneficient and not a shortcut for __m128i. +// 128 bit integers are inneficient and not a shortcut for __m128i. // Native type __int128 supported starting with GCC-4.8. // // __int128 uses two 64 bit GPRs to hold the data. The main benefits are @@ -94,31 +41,12 @@ static inline void memset_64( uint64_t *dst, const uint64_t a, int n ) typedef __int128 int128_t; typedef unsigned __int128 uint128_t; - - -// Maybe usefull for making constants. -#define mk_uint128( hi, lo ) \ - ( ( (uint128_t)(hi) << 64 ) | ( (uint128_t)(lo) ) ) - - // Extracting the low bits is a trivial cast. // These specialized functions are optimized while providing a // consistent interface. #define u128_hi64( x ) ( (uint64_t)( (uint128_t)(x) >> 64 ) ) #define u128_lo64( x ) ( (uint64_t)(x) ) -// Generic extract, don't use for extracting low bits, cast instead. -#define u128_extr_64( a, n ) ( (uint64_t)( (a) >> ( ( 2-(n)) <<6 ) ) ) -#define u128_extr_32( a, n ) ( (uint32_t)( (a) >> ( ( 4-(n)) <<5 ) ) ) -#define u128_extr_16( a, n ) ( (uint16_t)( (a) >> ( ( 8-(n)) <<4 ) ) ) -#define u128_extr_8( a, n ) ( (uint8_t) ( (a) >> ( (16-(n)) <<3 ) ) ) - -// Not much need for this but it fills a gap. -#define u128_ror_128( x, c ) \ - ( ( (uint128_t)(x) >> (c) ) | ( (uint128_t)(x) << (128-(c)) ) ) -#define u128_rol_128( x, c ) \ - ( ( (uint128_t)(x) << (c) ) | ( (uint128_t)(x) >> (128-(c)) ) ) - #endif // GCC_INT128 #endif // SIMD_INT_H__