Merge #13386: SHA256 implementations based on Intel SHA Extensions
66b2cf1ccf
Use immintrin.h everywhere for intrinsics (Pieter Wuille)4c935e2eee
Add SHA256 implementation using using Intel SHA intrinsics (Pieter Wuille)268400d318
[Refactor] CPU feature detection logic for SHA256 (Pieter Wuille) Pull request description: Based on #13191. This adds SHA256 implementations that use Intel's SHA Extension instructions (using intrinsics). This needs GCC 4.9 or Clang 3.4. In addition to #13191, two extra implementations are provided: * (a) A variable-length SHA256 implementation using SHA extensions. * (b) A 2-way 64-byte input double-SHA256 implementation using SHA extensions. Benchmarks for 9001-element Merkle tree root computation on an AMD Ryzen 1800X system: * Using generic C++ code (pre-#10821): 6.1ms * Using SSE4 (master, #10821): 4.6ms * Using 4-way SSE4 specialized for 64-byte inputs (#13191): 2.8ms * Using 8-way AVX2 specialized for 64-byte inputs (#13191): 2.1ms * Using 2-way SHA-NI specialized for 64-byte inputs (this PR): 0.56ms Benchmarks for 32-byte SHA256 on the same system: * Using SSE4 (master, #10821): 190ns * Using SHA-NI (this PR): 53ns Benchmarks for 1000000-byte SHA256 on the same system: * Using SSE4 (master, #10821): 2.5ms * Using SHA-NI (this PR): 0.51ms Tree-SHA512: 2b319e33b22579f815d91f9daf7994a5e1e799c4f73c13e15070dd54ba71f3f6438ccf77ae9cbd1ce76f972d9cbeb5f0edfea3d86f101bbc1055db70e42743b7
This commit is contained in:
commit
3a3eabef40
7 changed files with 464 additions and 32 deletions
28
configure.ac
28
configure.ac
|
@ -320,6 +320,7 @@ fi
|
|||
AX_CHECK_COMPILE_FLAG([-msse4.2],[[SSE42_CXXFLAGS="-msse4.2"]],,[[$CXXFLAG_WERROR]])
|
||||
AX_CHECK_COMPILE_FLAG([-msse4.1],[[SSE41_CXXFLAGS="-msse4.1"]],,[[$CXXFLAG_WERROR]])
|
||||
AX_CHECK_COMPILE_FLAG([-mavx -mavx2],[[AVX2_CXXFLAGS="-mavx -mavx2"]],,[[$CXXFLAG_WERROR]])
|
||||
AX_CHECK_COMPILE_FLAG([-msse4 -msha],[[SHANI_CXXFLAGS="-msse4 -msha"]],,[[$CXXFLAG_WERROR]])
|
||||
|
||||
TEMP_CXXFLAGS="$CXXFLAGS"
|
||||
CXXFLAGS="$CXXFLAGS $SSE42_CXXFLAGS"
|
||||
|
@ -348,11 +349,7 @@ CXXFLAGS="$CXXFLAGS $SSE41_CXXFLAGS"
|
|||
AC_MSG_CHECKING(for SSE4.1 intrinsics)
|
||||
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
|
||||
#include <stdint.h>
|
||||
#if defined(_MSC_VER)
|
||||
#include <immintrin.h>
|
||||
#elif defined(__GNUC__)
|
||||
#include <x86intrin.h>
|
||||
#endif
|
||||
]],[[
|
||||
__m128i l = _mm_set1_epi32(0);
|
||||
return _mm_extract_epi32(l, 3);
|
||||
|
@ -367,11 +364,7 @@ CXXFLAGS="$CXXFLAGS $AVX2_CXXFLAGS"
|
|||
AC_MSG_CHECKING(for AVX2 intrinsics)
|
||||
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
|
||||
#include <stdint.h>
|
||||
#if defined(_MSC_VER)
|
||||
#include <immintrin.h>
|
||||
#elif defined(__GNUC__) && defined(__AVX2__)
|
||||
#include <x86intrin.h>
|
||||
#endif
|
||||
]],[[
|
||||
__m256i l = _mm256_set1_epi32(0);
|
||||
return _mm256_extract_epi32(l, 7);
|
||||
|
@ -381,6 +374,23 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
|
|||
)
|
||||
CXXFLAGS="$TEMP_CXXFLAGS"
|
||||
|
||||
TEMP_CXXFLAGS="$CXXFLAGS"
|
||||
CXXFLAGS="$CXXFLAGS $SHANI_CXXFLAGS"
|
||||
AC_MSG_CHECKING(for SHA-NI intrinsics)
|
||||
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
|
||||
#include <stdint.h>
|
||||
#include <immintrin.h>
|
||||
]],[[
|
||||
__m128i i = _mm_set1_epi32(0);
|
||||
__m128i j = _mm_set1_epi32(1);
|
||||
__m128i k = _mm_set1_epi32(2);
|
||||
return _mm_extract_epi32(_mm_sha256rnds2_epu32(i, i, k), 0);
|
||||
]])],
|
||||
[ AC_MSG_RESULT(yes); enable_shani=yes; AC_DEFINE(ENABLE_SHANI, 1, [Define this symbol to build code that uses SHA-NI intrinsics]) ],
|
||||
[ AC_MSG_RESULT(no)]
|
||||
)
|
||||
CXXFLAGS="$TEMP_CXXFLAGS"
|
||||
|
||||
CPPFLAGS="$CPPFLAGS -DHAVE_BUILD_INFO -D__STDC_FORMAT_MACROS"
|
||||
|
||||
AC_ARG_WITH([utils],
|
||||
|
@ -1309,6 +1319,7 @@ AM_CONDITIONAL([HARDEN],[test x$use_hardening = xyes])
|
|||
AM_CONDITIONAL([ENABLE_HWCRC32],[test x$enable_hwcrc32 = xyes])
|
||||
AM_CONDITIONAL([ENABLE_SSE41],[test x$enable_sse41 = xyes])
|
||||
AM_CONDITIONAL([ENABLE_AVX2],[test x$enable_avx2 = xyes])
|
||||
AM_CONDITIONAL([ENABLE_SHANI],[test x$enable_shani = xyes])
|
||||
AM_CONDITIONAL([USE_ASM],[test x$use_asm = xyes])
|
||||
|
||||
AC_DEFINE(CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MAJOR, [Major version])
|
||||
|
@ -1353,6 +1364,7 @@ AC_SUBST(SANITIZER_LDFLAGS)
|
|||
AC_SUBST(SSE42_CXXFLAGS)
|
||||
AC_SUBST(SSE41_CXXFLAGS)
|
||||
AC_SUBST(AVX2_CXXFLAGS)
|
||||
AC_SUBST(SHANI_CXXFLAGS)
|
||||
AC_SUBST(LIBTOOL_APP_LDFLAGS)
|
||||
AC_SUBST(USE_UPNP)
|
||||
AC_SUBST(USE_QRCODE)
|
||||
|
|
|
@ -52,6 +52,10 @@ if ENABLE_AVX2
|
|||
LIBBITCOIN_CRYPTO_AVX2 = crypto/libbitcoin_crypto_avx2.a
|
||||
LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_AVX2)
|
||||
endif
|
||||
if ENABLE_SHANI
|
||||
LIBBITCOIN_CRYPTO_SHANI = crypto/libbitcoin_crypto_shani.a
|
||||
LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_SHANI)
|
||||
endif
|
||||
|
||||
$(LIBSECP256K1): $(wildcard secp256k1/src/*.h) $(wildcard secp256k1/src/*.c) $(wildcard secp256k1/include/*)
|
||||
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C $(@D) $(@F)
|
||||
|
@ -318,6 +322,12 @@ crypto_libbitcoin_crypto_avx2_a_CXXFLAGS += $(AVX2_CXXFLAGS)
|
|||
crypto_libbitcoin_crypto_avx2_a_CPPFLAGS += -DENABLE_AVX2
|
||||
crypto_libbitcoin_crypto_avx2_a_SOURCES = crypto/sha256_avx2.cpp
|
||||
|
||||
crypto_libbitcoin_crypto_shani_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
|
||||
crypto_libbitcoin_crypto_shani_a_CPPFLAGS = $(AM_CPPFLAGS)
|
||||
crypto_libbitcoin_crypto_shani_a_CXXFLAGS += $(SHANI_CXXFLAGS)
|
||||
crypto_libbitcoin_crypto_shani_a_CPPFLAGS += -DENABLE_SHANI
|
||||
crypto_libbitcoin_crypto_shani_a_SOURCES = crypto/sha256_shani.cpp
|
||||
|
||||
# consensus: shared between all executables that validate any consensus rules.
|
||||
libbitcoin_consensus_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
|
||||
libbitcoin_consensus_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
|
||||
|
|
|
@ -137,6 +137,7 @@ test_test_bitcoin_fuzzy_LDADD = \
|
|||
$(LIBBITCOIN_CRYPTO) \
|
||||
$(LIBBITCOIN_CRYPTO_SSE41) \
|
||||
$(LIBBITCOIN_CRYPTO_AVX2) \
|
||||
$(LIBBITCOIN_CRYPTO_SHANI) \
|
||||
$(LIBSECP256K1)
|
||||
|
||||
test_test_bitcoin_fuzzy_LDADD += $(BOOST_LIBS) $(CRYPTO_LIBS)
|
||||
|
|
|
@ -29,6 +29,16 @@ namespace sha256d64_avx2
|
|||
void Transform_8way(unsigned char* out, const unsigned char* in);
|
||||
}
|
||||
|
||||
namespace sha256d64_shani
|
||||
{
|
||||
void Transform_2way(unsigned char* out, const unsigned char* in);
|
||||
}
|
||||
|
||||
namespace sha256_shani
|
||||
{
|
||||
void Transform(uint32_t* s, const unsigned char* chunk, size_t blocks);
|
||||
}
|
||||
|
||||
// Internal implementation code.
|
||||
namespace
|
||||
{
|
||||
|
@ -448,6 +458,7 @@ void TransformD64Wrapper(unsigned char* out, const unsigned char* in)
|
|||
|
||||
TransformType Transform = sha256::Transform;
|
||||
TransformD64Type TransformD64 = sha256::TransformD64;
|
||||
TransformD64Type TransformD64_2way = nullptr;
|
||||
TransformD64Type TransformD64_4way = nullptr;
|
||||
TransformD64Type TransformD64_8way = nullptr;
|
||||
|
||||
|
@ -512,6 +523,13 @@ bool SelfTest() {
|
|||
TransformD64(out, data + 1);
|
||||
if (!std::equal(out, out + 32, result_d64)) return false;
|
||||
|
||||
// Test TransformD64_2way, if available.
|
||||
if (TransformD64_2way) {
|
||||
unsigned char out[64];
|
||||
TransformD64_2way(out, data + 1);
|
||||
if (!std::equal(out, out + 64, result_d64)) return false;
|
||||
}
|
||||
|
||||
// Test TransformD64_4way, if available.
|
||||
if (TransformD64_4way) {
|
||||
unsigned char out[128];
|
||||
|
@ -556,32 +574,64 @@ std::string SHA256AutoDetect()
|
|||
{
|
||||
std::string ret = "standard";
|
||||
#if defined(USE_ASM) && (defined(__x86_64__) || defined(__amd64__) || defined(__i386__))
|
||||
(void)AVXEnabled; // Silence unused warning (in case ENABLE_AVX2 is not defined)
|
||||
bool have_sse4 = false;
|
||||
bool have_xsave = false;
|
||||
bool have_avx = false;
|
||||
bool have_avx2 = false;
|
||||
bool have_shani = false;
|
||||
bool enabled_avx = false;
|
||||
|
||||
(void)AVXEnabled;
|
||||
(void)have_sse4;
|
||||
(void)have_avx;
|
||||
(void)have_xsave;
|
||||
(void)have_avx2;
|
||||
(void)have_shani;
|
||||
(void)enabled_avx;
|
||||
|
||||
uint32_t eax, ebx, ecx, edx;
|
||||
cpuid(1, 0, eax, ebx, ecx, edx);
|
||||
if ((ecx >> 19) & 1) {
|
||||
have_sse4 = (ecx >> 19) & 1;
|
||||
have_xsave = (ecx >> 27) & 1;
|
||||
have_avx = (ecx >> 28) & 1;
|
||||
if (have_xsave && have_avx) {
|
||||
enabled_avx = AVXEnabled();
|
||||
}
|
||||
if (have_sse4) {
|
||||
cpuid(7, 0, eax, ebx, ecx, edx);
|
||||
have_avx2 = (ebx >> 5) & 1;
|
||||
have_shani = (ebx >> 29) & 1;
|
||||
}
|
||||
|
||||
#if defined(ENABLE_SHANI) && !defined(BUILD_BITCOIN_INTERNAL)
|
||||
if (have_shani) {
|
||||
Transform = sha256_shani::Transform;
|
||||
TransformD64 = TransformD64Wrapper<sha256_shani::Transform>;
|
||||
TransformD64_2way = sha256d64_shani::Transform_2way;
|
||||
ret = "shani(1way,2way)";
|
||||
have_sse4 = false; // Disable SSE4/AVX2;
|
||||
have_avx2 = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (have_sse4) {
|
||||
#if defined(__x86_64__) || defined(__amd64__)
|
||||
Transform = sha256_sse4::Transform;
|
||||
TransformD64 = TransformD64Wrapper<sha256_sse4::Transform>;
|
||||
ret = "sse4(1way)";
|
||||
#endif
|
||||
#if defined(ENABLE_SSE41) && !defined(BUILD_BITCOIN_INTERNAL)
|
||||
TransformD64_4way = sha256d64_sse41::Transform_4way;
|
||||
ret = "sse4(1way+4way)";
|
||||
#if defined(ENABLE_AVX2) && !defined(BUILD_BITCOIN_INTERNAL)
|
||||
if (((ecx >> 27) & 1) && ((ecx >> 28) & 1)) { // XSAVE and AVX
|
||||
cpuid(7, 0, eax, ebx, ecx, edx);
|
||||
if ((ebx >> 5) & 1) { // AVX2 flag
|
||||
if (AVXEnabled()) { // OS has enabled AVX registers
|
||||
TransformD64_8way = sha256d64_avx2::Transform_8way;
|
||||
ret += ",avx2(8way)";
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
ret = "sse4";
|
||||
ret += ",sse41(4way)";
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(ENABLE_AVX2) && !defined(BUILD_BITCOIN_INTERNAL)
|
||||
if (have_avx2 && have_avx && enabled_avx) {
|
||||
TransformD64_8way = sha256d64_avx2::Transform_8way;
|
||||
ret += ",avx2(8way)";
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
assert(SelfTest());
|
||||
|
@ -663,6 +713,14 @@ void SHA256D64(unsigned char* out, const unsigned char* in, size_t blocks)
|
|||
blocks -= 4;
|
||||
}
|
||||
}
|
||||
if (TransformD64_2way) {
|
||||
while (blocks >= 2) {
|
||||
TransformD64_2way(out, in);
|
||||
out += 64;
|
||||
in += 128;
|
||||
blocks -= 2;
|
||||
}
|
||||
}
|
||||
while (blocks) {
|
||||
TransformD64(out, in);
|
||||
out += 32;
|
||||
|
|
|
@ -1,11 +1,7 @@
|
|||
#ifdef ENABLE_AVX2
|
||||
|
||||
#include <stdint.h>
|
||||
#if defined(_MSC_VER)
|
||||
#include <immintrin.h>
|
||||
#elif defined(__GNUC__)
|
||||
#include <x86intrin.h>
|
||||
#endif
|
||||
|
||||
#include <crypto/sha256.h>
|
||||
#include <crypto/common.h>
|
||||
|
|
359
src/crypto/sha256_shani.cpp
Normal file
359
src/crypto/sha256_shani.cpp
Normal file
|
@ -0,0 +1,359 @@
|
|||
// Copyright (c) 2018 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
//
|
||||
// Based on https://github.com/noloader/SHA-Intrinsics/blob/master/sha256-x86.c,
|
||||
// Written and placed in public domain by Jeffrey Walton.
|
||||
// Based on code from Intel, and by Sean Gulley for the miTLS project.
|
||||
|
||||
#ifdef ENABLE_SHANI
|
||||
|
||||
#include <stdint.h>
|
||||
#include <immintrin.h>
|
||||
|
||||
#include <crypto/common.h>
|
||||
|
||||
|
||||
namespace {
|
||||
|
||||
const __m128i MASK = _mm_set_epi64x(0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL);
|
||||
const __m128i INIT0 = _mm_set_epi64x(0x6a09e667bb67ae85ull, 0x510e527f9b05688cull);
|
||||
const __m128i INIT1 = _mm_set_epi64x(0x3c6ef372a54ff53aull, 0x1f83d9ab5be0cd19ull);
|
||||
|
||||
void inline __attribute__((always_inline)) QuadRound(__m128i& state0, __m128i& state1, uint64_t k1, uint64_t k0)
|
||||
{
|
||||
const __m128i msg = _mm_set_epi64x(k1, k0);
|
||||
state1 = _mm_sha256rnds2_epu32(state1, state0, msg);
|
||||
state0 = _mm_sha256rnds2_epu32(state0, state1, _mm_shuffle_epi32(msg, 0x0e));
|
||||
}
|
||||
|
||||
void inline __attribute__((always_inline)) QuadRound(__m128i& state0, __m128i& state1, __m128i m, uint64_t k1, uint64_t k0)
|
||||
{
|
||||
const __m128i msg = _mm_add_epi32(m, _mm_set_epi64x(k1, k0));
|
||||
state1 = _mm_sha256rnds2_epu32(state1, state0, msg);
|
||||
state0 = _mm_sha256rnds2_epu32(state0, state1, _mm_shuffle_epi32(msg, 0x0e));
|
||||
}
|
||||
|
||||
void inline __attribute__((always_inline)) ShiftMessageA(__m128i& m0, __m128i m1)
|
||||
{
|
||||
m0 = _mm_sha256msg1_epu32(m0, m1);
|
||||
}
|
||||
|
||||
void inline __attribute__((always_inline)) ShiftMessageC(__m128i& m0, __m128i m1, __m128i& m2)
|
||||
{
|
||||
m2 = _mm_sha256msg2_epu32(_mm_add_epi32(m2, _mm_alignr_epi8(m1, m0, 4)), m1);
|
||||
}
|
||||
|
||||
void inline __attribute__((always_inline)) ShiftMessageB(__m128i& m0, __m128i m1, __m128i& m2)
|
||||
{
|
||||
ShiftMessageC(m0, m1, m2);
|
||||
ShiftMessageA(m0, m1);
|
||||
}
|
||||
|
||||
void inline __attribute__((always_inline)) Shuffle(__m128i& s0, __m128i& s1)
|
||||
{
|
||||
const __m128i t1 = _mm_shuffle_epi32(s0, 0xB1);
|
||||
const __m128i t2 = _mm_shuffle_epi32(s1, 0x1B);
|
||||
s0 = _mm_alignr_epi8(t1, t2, 0x08);
|
||||
s1 = _mm_blend_epi16(t2, t1, 0xF0);
|
||||
}
|
||||
|
||||
void inline __attribute__((always_inline)) Unshuffle(__m128i& s0, __m128i& s1)
|
||||
{
|
||||
const __m128i t1 = _mm_shuffle_epi32(s0, 0x1B);
|
||||
const __m128i t2 = _mm_shuffle_epi32(s1, 0xB1);
|
||||
s0 = _mm_blend_epi16(t1, t2, 0xF0);
|
||||
s1 = _mm_alignr_epi8(t2, t1, 0x08);
|
||||
}
|
||||
|
||||
__m128i inline __attribute__((always_inline)) Load(const unsigned char* in)
|
||||
{
|
||||
return _mm_shuffle_epi8(_mm_loadu_si128((const __m128i*)in), MASK);
|
||||
}
|
||||
|
||||
void inline __attribute__((always_inline)) Save(unsigned char* out, __m128i s)
|
||||
{
|
||||
_mm_storeu_si128((__m128i*)out, _mm_shuffle_epi8(s, MASK));
|
||||
}
|
||||
}
|
||||
|
||||
namespace sha256_shani {
|
||||
void Transform(uint32_t* s, const unsigned char* chunk, size_t blocks)
|
||||
{
|
||||
__m128i m0, m1, m2, m3, s0, s1, so0, so1;
|
||||
|
||||
/* Load state */
|
||||
s0 = _mm_loadu_si128((const __m128i*)s);
|
||||
s1 = _mm_loadu_si128((const __m128i*)(s + 4));
|
||||
Shuffle(s0, s1);
|
||||
|
||||
while (blocks--) {
|
||||
/* Remember old state */
|
||||
so0 = s0;
|
||||
so1 = s1;
|
||||
|
||||
/* Load data and transform */
|
||||
m0 = Load(chunk);
|
||||
QuadRound(s0, s1, m0, 0xe9b5dba5b5c0fbcfull, 0x71374491428a2f98ull);
|
||||
m1 = Load(chunk + 16);
|
||||
QuadRound(s0, s1, m1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull);
|
||||
ShiftMessageA(m0, m1);
|
||||
m2 = Load(chunk + 32);
|
||||
QuadRound(s0, s1, m2, 0x550c7dc3243185beull, 0x12835b01d807aa98ull);
|
||||
ShiftMessageA(m1, m2);
|
||||
m3 = Load(chunk + 48);
|
||||
QuadRound(s0, s1, m3, 0xc19bf1749bdc06a7ull, 0x80deb1fe72be5d74ull);
|
||||
ShiftMessageB(m2, m3, m0);
|
||||
QuadRound(s0, s1, m0, 0x240ca1cc0fc19dc6ull, 0xefbe4786E49b69c1ull);
|
||||
ShiftMessageB(m3, m0, m1);
|
||||
QuadRound(s0, s1, m1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full);
|
||||
ShiftMessageB(m0, m1, m2);
|
||||
QuadRound(s0, s1, m2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull);
|
||||
ShiftMessageB(m1, m2, m3);
|
||||
QuadRound(s0, s1, m3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull);
|
||||
ShiftMessageB(m2, m3, m0);
|
||||
QuadRound(s0, s1, m0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull);
|
||||
ShiftMessageB(m3, m0, m1);
|
||||
QuadRound(s0, s1, m1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull);
|
||||
ShiftMessageB(m0, m1, m2);
|
||||
QuadRound(s0, s1, m2, 0xc76c51A3c24b8b70ull, 0xa81a664ba2bfe8a1ull);
|
||||
ShiftMessageB(m1, m2, m3);
|
||||
QuadRound(s0, s1, m3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull);
|
||||
ShiftMessageB(m2, m3, m0);
|
||||
QuadRound(s0, s1, m0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull);
|
||||
ShiftMessageB(m3, m0, m1);
|
||||
QuadRound(s0, s1, m1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull);
|
||||
ShiftMessageC(m0, m1, m2);
|
||||
QuadRound(s0, s1, m2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull);
|
||||
ShiftMessageC(m1, m2, m3);
|
||||
QuadRound(s0, s1, m3, 0xc67178f2bef9A3f7ull, 0xa4506ceb90befffaull);
|
||||
|
||||
/* Combine with old state */
|
||||
s0 = _mm_add_epi32(s0, so0);
|
||||
s1 = _mm_add_epi32(s1, so1);
|
||||
|
||||
/* Advance */
|
||||
chunk += 64;
|
||||
}
|
||||
|
||||
Unshuffle(s0, s1);
|
||||
_mm_storeu_si128((__m128i*)s, s0);
|
||||
_mm_storeu_si128((__m128i*)(s + 4), s1);
|
||||
}
|
||||
}
|
||||
|
||||
namespace sha256d64_shani {
|
||||
|
||||
void Transform_2way(unsigned char* out, const unsigned char* in)
|
||||
{
|
||||
__m128i am0, am1, am2, am3, as0, as1, aso0, aso1;
|
||||
__m128i bm0, bm1, bm2, bm3, bs0, bs1, bso0, bso1;
|
||||
|
||||
/* Transform 1 */
|
||||
bs0 = as0 = INIT0;
|
||||
bs1 = as1 = INIT1;
|
||||
am0 = Load(in);
|
||||
bm0 = Load(in + 64);
|
||||
QuadRound(as0, as1, am0, 0xe9b5dba5b5c0fbcfull, 0x71374491428a2f98ull);
|
||||
QuadRound(bs0, bs1, bm0, 0xe9b5dba5b5c0fbcfull, 0x71374491428a2f98ull);
|
||||
am1 = Load(in + 16);
|
||||
bm1 = Load(in + 80);
|
||||
QuadRound(as0, as1, am1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull);
|
||||
QuadRound(bs0, bs1, bm1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull);
|
||||
ShiftMessageA(am0, am1);
|
||||
ShiftMessageA(bm0, bm1);
|
||||
am2 = Load(in + 32);
|
||||
bm2 = Load(in + 96);
|
||||
QuadRound(as0, as1, am2, 0x550c7dc3243185beull, 0x12835b01d807aa98ull);
|
||||
QuadRound(bs0, bs1, bm2, 0x550c7dc3243185beull, 0x12835b01d807aa98ull);
|
||||
ShiftMessageA(am1, am2);
|
||||
ShiftMessageA(bm1, bm2);
|
||||
am3 = Load(in + 48);
|
||||
bm3 = Load(in + 112);
|
||||
QuadRound(as0, as1, am3, 0xc19bf1749bdc06a7ull, 0x80deb1fe72be5d74ull);
|
||||
QuadRound(bs0, bs1, bm3, 0xc19bf1749bdc06a7ull, 0x80deb1fe72be5d74ull);
|
||||
ShiftMessageB(am2, am3, am0);
|
||||
ShiftMessageB(bm2, bm3, bm0);
|
||||
QuadRound(as0, as1, am0, 0x240ca1cc0fc19dc6ull, 0xefbe4786E49b69c1ull);
|
||||
QuadRound(bs0, bs1, bm0, 0x240ca1cc0fc19dc6ull, 0xefbe4786E49b69c1ull);
|
||||
ShiftMessageB(am3, am0, am1);
|
||||
ShiftMessageB(bm3, bm0, bm1);
|
||||
QuadRound(as0, as1, am1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full);
|
||||
QuadRound(bs0, bs1, bm1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full);
|
||||
ShiftMessageB(am0, am1, am2);
|
||||
ShiftMessageB(bm0, bm1, bm2);
|
||||
QuadRound(as0, as1, am2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull);
|
||||
QuadRound(bs0, bs1, bm2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull);
|
||||
ShiftMessageB(am1, am2, am3);
|
||||
ShiftMessageB(bm1, bm2, bm3);
|
||||
QuadRound(as0, as1, am3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull);
|
||||
QuadRound(bs0, bs1, bm3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull);
|
||||
ShiftMessageB(am2, am3, am0);
|
||||
ShiftMessageB(bm2, bm3, bm0);
|
||||
QuadRound(as0, as1, am0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull);
|
||||
QuadRound(bs0, bs1, bm0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull);
|
||||
ShiftMessageB(am3, am0, am1);
|
||||
ShiftMessageB(bm3, bm0, bm1);
|
||||
QuadRound(as0, as1, am1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull);
|
||||
QuadRound(bs0, bs1, bm1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull);
|
||||
ShiftMessageB(am0, am1, am2);
|
||||
ShiftMessageB(bm0, bm1, bm2);
|
||||
QuadRound(as0, as1, am2, 0xc76c51A3c24b8b70ull, 0xa81a664ba2bfe8a1ull);
|
||||
QuadRound(bs0, bs1, bm2, 0xc76c51A3c24b8b70ull, 0xa81a664ba2bfe8a1ull);
|
||||
ShiftMessageB(am1, am2, am3);
|
||||
ShiftMessageB(bm1, bm2, bm3);
|
||||
QuadRound(as0, as1, am3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull);
|
||||
QuadRound(bs0, bs1, bm3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull);
|
||||
ShiftMessageB(am2, am3, am0);
|
||||
ShiftMessageB(bm2, bm3, bm0);
|
||||
QuadRound(as0, as1, am0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull);
|
||||
QuadRound(bs0, bs1, bm0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull);
|
||||
ShiftMessageB(am3, am0, am1);
|
||||
ShiftMessageB(bm3, bm0, bm1);
|
||||
QuadRound(as0, as1, am1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull);
|
||||
QuadRound(bs0, bs1, bm1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull);
|
||||
ShiftMessageC(am0, am1, am2);
|
||||
ShiftMessageC(bm0, bm1, bm2);
|
||||
QuadRound(as0, as1, am2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull);
|
||||
QuadRound(bs0, bs1, bm2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull);
|
||||
ShiftMessageC(am1, am2, am3);
|
||||
ShiftMessageC(bm1, bm2, bm3);
|
||||
QuadRound(as0, as1, am3, 0xc67178f2bef9A3f7ull, 0xa4506ceb90befffaull);
|
||||
QuadRound(bs0, bs1, bm3, 0xc67178f2bef9A3f7ull, 0xa4506ceb90befffaull);
|
||||
as0 = _mm_add_epi32(as0, INIT0);
|
||||
bs0 = _mm_add_epi32(bs0, INIT0);
|
||||
as1 = _mm_add_epi32(as1, INIT1);
|
||||
bs1 = _mm_add_epi32(bs1, INIT1);
|
||||
|
||||
/* Transform 2 */
|
||||
aso0 = as0;
|
||||
bso0 = bs0;
|
||||
aso1 = as1;
|
||||
bso1 = bs1;
|
||||
QuadRound(as0, as1, 0xe9b5dba5b5c0fbcfull, 0x71374491c28a2f98ull);
|
||||
QuadRound(bs0, bs1, 0xe9b5dba5b5c0fbcfull, 0x71374491c28a2f98ull);
|
||||
QuadRound(as0, as1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull);
|
||||
QuadRound(bs0, bs1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull);
|
||||
QuadRound(as0, as1, 0x550c7dc3243185beull, 0x12835b01d807aa98ull);
|
||||
QuadRound(bs0, bs1, 0x550c7dc3243185beull, 0x12835b01d807aa98ull);
|
||||
QuadRound(as0, as1, 0xc19bf3749bdc06a7ull, 0x80deb1fe72be5d74ull);
|
||||
QuadRound(bs0, bs1, 0xc19bf3749bdc06a7ull, 0x80deb1fe72be5d74ull);
|
||||
QuadRound(as0, as1, 0x240cf2540fe1edc6ull, 0xf0fe4786649b69c1ull);
|
||||
QuadRound(bs0, bs1, 0x240cf2540fe1edc6ull, 0xf0fe4786649b69c1ull);
|
||||
QuadRound(as0, as1, 0x16f988fa61b9411eull, 0x6cc984be4fe9346full);
|
||||
QuadRound(bs0, bs1, 0x16f988fa61b9411eull, 0x6cc984be4fe9346full);
|
||||
QuadRound(as0, as1, 0xb9d99ec7b019fc65ull, 0xa88e5a6df2c65152ull);
|
||||
QuadRound(bs0, bs1, 0xb9d99ec7b019fc65ull, 0xa88e5a6df2c65152ull);
|
||||
QuadRound(as0, as1, 0xc7353eb0fdb1232bull, 0xe70eeaa09a1231c3ull);
|
||||
QuadRound(bs0, bs1, 0xc7353eb0fdb1232bull, 0xe70eeaa09a1231c3ull);
|
||||
QuadRound(as0, as1, 0xdc1eeefd5a0f118full, 0xcb976d5f3069bad5ull);
|
||||
QuadRound(bs0, bs1, 0xdc1eeefd5a0f118full, 0xcb976d5f3069bad5ull);
|
||||
QuadRound(as0, as1, 0xe15d5b1658f4ca9dull, 0xde0b7a040a35b689ull);
|
||||
QuadRound(bs0, bs1, 0xe15d5b1658f4ca9dull, 0xde0b7a040a35b689ull);
|
||||
QuadRound(as0, as1, 0x6fab9537a507ea32ull, 0x37088980007f3e86ull);
|
||||
QuadRound(bs0, bs1, 0x6fab9537a507ea32ull, 0x37088980007f3e86ull);
|
||||
QuadRound(as0, as1, 0xc0bbbe37cdaa3b6dull, 0x0d8cd6f117406110ull);
|
||||
QuadRound(bs0, bs1, 0xc0bbbe37cdaa3b6dull, 0x0d8cd6f117406110ull);
|
||||
QuadRound(as0, as1, 0x6fd15ca70b02e931ull, 0xdb48a36383613bdaull);
|
||||
QuadRound(bs0, bs1, 0x6fd15ca70b02e931ull, 0xdb48a36383613bdaull);
|
||||
QuadRound(as0, as1, 0x6d4378906ed41a95ull, 0x31338431521afacaull);
|
||||
QuadRound(bs0, bs1, 0x6d4378906ed41a95ull, 0x31338431521afacaull);
|
||||
QuadRound(as0, as1, 0x532fb63cb5c9a0e6ull, 0x9eccabbdc39c91f2ull);
|
||||
QuadRound(bs0, bs1, 0x532fb63cb5c9a0e6ull, 0x9eccabbdc39c91f2ull);
|
||||
QuadRound(as0, as1, 0x4c191d76a4954b68ull, 0x07237ea3d2c741c6ull);
|
||||
QuadRound(bs0, bs1, 0x4c191d76a4954b68ull, 0x07237ea3d2c741c6ull);
|
||||
as0 = _mm_add_epi32(as0, aso0);
|
||||
bs0 = _mm_add_epi32(bs0, bso0);
|
||||
as1 = _mm_add_epi32(as1, aso1);
|
||||
bs1 = _mm_add_epi32(bs1, bso1);
|
||||
|
||||
/* Extract hash */
|
||||
Unshuffle(as0, as1);
|
||||
Unshuffle(bs0, bs1);
|
||||
am0 = as0;
|
||||
bm0 = bs0;
|
||||
am1 = as1;
|
||||
bm1 = bs1;
|
||||
|
||||
/* Transform 3 */
|
||||
bs0 = as0 = INIT0;
|
||||
bs1 = as1 = INIT1;
|
||||
QuadRound(as0, as1, am0, 0xe9b5dba5B5c0fbcfull, 0x71374491428a2f98ull);
|
||||
QuadRound(bs0, bs1, bm0, 0xe9b5dba5B5c0fbcfull, 0x71374491428a2f98ull);
|
||||
QuadRound(as0, as1, am1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull);
|
||||
QuadRound(bs0, bs1, bm1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull);
|
||||
ShiftMessageA(am0, am1);
|
||||
ShiftMessageA(bm0, bm1);
|
||||
bm2 = am2 = _mm_set_epi64x(0x0ull, 0x80000000ull);
|
||||
QuadRound(as0, as1, 0x550c7dc3243185beull, 0x12835b015807aa98ull);
|
||||
QuadRound(bs0, bs1, 0x550c7dc3243185beull, 0x12835b015807aa98ull);
|
||||
ShiftMessageA(am1, am2);
|
||||
ShiftMessageA(bm1, bm2);
|
||||
bm3 = am3 = _mm_set_epi64x(0x10000000000ull, 0x0ull);
|
||||
QuadRound(as0, as1, 0xc19bf2749bdc06a7ull, 0x80deb1fe72be5d74ull);
|
||||
QuadRound(bs0, bs1, 0xc19bf2749bdc06a7ull, 0x80deb1fe72be5d74ull);
|
||||
ShiftMessageB(am2, am3, am0);
|
||||
ShiftMessageB(bm2, bm3, bm0);
|
||||
QuadRound(as0, as1, am0, 0x240ca1cc0fc19dc6ull, 0xefbe4786e49b69c1ull);
|
||||
QuadRound(bs0, bs1, bm0, 0x240ca1cc0fc19dc6ull, 0xefbe4786e49b69c1ull);
|
||||
ShiftMessageB(am3, am0, am1);
|
||||
ShiftMessageB(bm3, bm0, bm1);
|
||||
QuadRound(as0, as1, am1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full);
|
||||
QuadRound(bs0, bs1, bm1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full);
|
||||
ShiftMessageB(am0, am1, am2);
|
||||
ShiftMessageB(bm0, bm1, bm2);
|
||||
QuadRound(as0, as1, am2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull);
|
||||
QuadRound(bs0, bs1, bm2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull);
|
||||
ShiftMessageB(am1, am2, am3);
|
||||
ShiftMessageB(bm1, bm2, bm3);
|
||||
QuadRound(as0, as1, am3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull);
|
||||
QuadRound(bs0, bs1, bm3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull);
|
||||
ShiftMessageB(am2, am3, am0);
|
||||
ShiftMessageB(bm2, bm3, bm0);
|
||||
QuadRound(as0, as1, am0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull);
|
||||
QuadRound(bs0, bs1, bm0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull);
|
||||
ShiftMessageB(am3, am0, am1);
|
||||
ShiftMessageB(bm3, bm0, bm1);
|
||||
QuadRound(as0, as1, am1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull);
|
||||
QuadRound(bs0, bs1, bm1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull);
|
||||
ShiftMessageB(am0, am1, am2);
|
||||
ShiftMessageB(bm0, bm1, bm2);
|
||||
QuadRound(as0, as1, am2, 0xc76c51a3c24b8b70ull, 0xa81a664ba2bfe8A1ull);
|
||||
QuadRound(bs0, bs1, bm2, 0xc76c51a3c24b8b70ull, 0xa81a664ba2bfe8A1ull);
|
||||
ShiftMessageB(am1, am2, am3);
|
||||
ShiftMessageB(bm1, bm2, bm3);
|
||||
QuadRound(as0, as1, am3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull);
|
||||
QuadRound(bs0, bs1, bm3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull);
|
||||
ShiftMessageB(am2, am3, am0);
|
||||
ShiftMessageB(bm2, bm3, bm0);
|
||||
QuadRound(as0, as1, am0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull);
|
||||
QuadRound(bs0, bs1, bm0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull);
|
||||
ShiftMessageB(am3, am0, am1);
|
||||
ShiftMessageB(bm3, bm0, bm1);
|
||||
QuadRound(as0, as1, am1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull);
|
||||
QuadRound(bs0, bs1, bm1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull);
|
||||
ShiftMessageC(am0, am1, am2);
|
||||
ShiftMessageC(bm0, bm1, bm2);
|
||||
QuadRound(as0, as1, am2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull);
|
||||
QuadRound(bs0, bs1, bm2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull);
|
||||
ShiftMessageC(am1, am2, am3);
|
||||
ShiftMessageC(bm1, bm2, bm3);
|
||||
QuadRound(as0, as1, am3, 0xc67178f2bef9a3f7ull, 0xa4506ceb90befffaull);
|
||||
QuadRound(bs0, bs1, bm3, 0xc67178f2bef9a3f7ull, 0xa4506ceb90befffaull);
|
||||
as0 = _mm_add_epi32(as0, INIT0);
|
||||
bs0 = _mm_add_epi32(bs0, INIT0);
|
||||
as1 = _mm_add_epi32(as1, INIT1);
|
||||
bs1 = _mm_add_epi32(bs1, INIT1);
|
||||
|
||||
/* Extract hash into out */
|
||||
Unshuffle(as0, as1);
|
||||
Unshuffle(bs0, bs1);
|
||||
Save(out, as0);
|
||||
Save(out + 16, as1);
|
||||
Save(out + 32, bs0);
|
||||
Save(out + 48, bs1);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
|
@ -1,11 +1,7 @@
|
|||
#ifdef ENABLE_SSE41
|
||||
|
||||
#include <stdint.h>
|
||||
#if defined(_MSC_VER)
|
||||
#include <immintrin.h>
|
||||
#elif defined(__GNUC__)
|
||||
#include <x86intrin.h>
|
||||
#endif
|
||||
|
||||
#include <crypto/sha256.h>
|
||||
#include <crypto/common.h>
|
||||
|
|
Loading…
Reference in a new issue