Horribly mangle scrypt.c

amd64 linux speedup from 2.02 to 2.67 kH/s with default options, from 2.59 to 3.24kH/s with -O3
This commit is contained in:
Art Forz 2011-10-01 08:08:11 +02:00
parent cd0b57640c
commit 2a05bdd14c

450
scrypt.c
View file

@ -55,26 +55,6 @@ be32enc(void *pp, uint32_t x)
p[0] = (x >> 24) & 0xff; p[0] = (x >> 24) & 0xff;
} }
static inline uint32_t
le32dec(const void *pp)
{
const uint8_t *p = (uint8_t const *)pp;
return ((uint32_t)(p[0]) + ((uint32_t)(p[1]) << 8) +
((uint32_t)(p[2]) << 16) + ((uint32_t)(p[3]) << 24));
}
static inline void
le32enc(void *pp, uint32_t x)
{
uint8_t * p = (uint8_t *)pp;
p[0] = x & 0xff;
p[1] = (x >> 8) & 0xff;
p[2] = (x >> 16) & 0xff;
p[3] = (x >> 24) & 0xff;
}
typedef struct SHA256Context { typedef struct SHA256Context {
uint32_t state[8]; uint32_t state[8];
@ -91,7 +71,7 @@ typedef struct HMAC_SHA256Context {
* Encode a length len/4 vector of (uint32_t) into a length len vector of * Encode a length len/4 vector of (uint32_t) into a length len vector of
* (unsigned char) in big-endian form. Assumes len is a multiple of 4. * (unsigned char) in big-endian form. Assumes len is a multiple of 4.
*/ */
static void static inline void
be32enc_vect(unsigned char *dst, const uint32_t *src, size_t len) be32enc_vect(unsigned char *dst, const uint32_t *src, size_t len)
{ {
size_t i; size_t i;
@ -104,7 +84,7 @@ be32enc_vect(unsigned char *dst, const uint32_t *src, size_t len)
* Decode a big-endian length len vector of (unsigned char) into a length * Decode a big-endian length len vector of (unsigned char) into a length
* len/4 vector of (uint32_t). Assumes len is a multiple of 4. * len/4 vector of (uint32_t). Assumes len is a multiple of 4.
*/ */
static void static inline void
be32dec_vect(uint32_t *dst, const unsigned char *src, size_t len) be32dec_vect(uint32_t *dst, const unsigned char *src, size_t len)
{ {
size_t i; size_t i;
@ -227,11 +207,6 @@ SHA256_Transform(uint32_t * state, const unsigned char block[64])
/* 4. Mix local working variables into global state */ /* 4. Mix local working variables into global state */
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
state[i] += S[i]; state[i] += S[i];
/* Clean the stack. */
memset(W, 0, 256);
memset(S, 0, 32);
t0 = t1 = 0;
} }
static unsigned char PAD[64] = { static unsigned char PAD[64] = {
@ -242,7 +217,7 @@ static unsigned char PAD[64] = {
}; };
/* SHA-256 initialization. Begins a SHA-256 operation. */ /* SHA-256 initialization. Begins a SHA-256 operation. */
static void static inline void
SHA256_Init(SHA256_CTX * ctx) SHA256_Init(SHA256_CTX * ctx)
{ {
@ -261,7 +236,7 @@ SHA256_Init(SHA256_CTX * ctx)
} }
/* Add bytes into the hash */ /* Add bytes into the hash */
static void static inline void
SHA256_Update(SHA256_CTX * ctx, const void *in, size_t len) SHA256_Update(SHA256_CTX * ctx, const void *in, size_t len)
{ {
uint32_t bitlen[2]; uint32_t bitlen[2];
@ -304,7 +279,7 @@ SHA256_Update(SHA256_CTX * ctx, const void *in, size_t len)
} }
/* Add padding and terminating bit-count. */ /* Add padding and terminating bit-count. */
static void static inline void
SHA256_Pad(SHA256_CTX * ctx) SHA256_Pad(SHA256_CTX * ctx)
{ {
unsigned char len[8]; unsigned char len[8];
@ -329,7 +304,7 @@ SHA256_Pad(SHA256_CTX * ctx)
* SHA-256 finalization. Pads the input data, exports the hash value, * SHA-256 finalization. Pads the input data, exports the hash value,
* and clears the context state. * and clears the context state.
*/ */
static void static inline void
SHA256_Final(unsigned char digest[32], SHA256_CTX * ctx) SHA256_Final(unsigned char digest[32], SHA256_CTX * ctx)
{ {
@ -338,73 +313,6 @@ SHA256_Final(unsigned char digest[32], SHA256_CTX * ctx)
/* Write the hash */ /* Write the hash */
be32enc_vect(digest, ctx->state, 32); be32enc_vect(digest, ctx->state, 32);
/* Clear the context state */
memset((void *)ctx, 0, sizeof(*ctx));
}
/* Initialize an HMAC-SHA256 operation with the given key. */
static void
HMAC_SHA256_Init(HMAC_SHA256_CTX * ctx, const void * _K, size_t Klen)
{
unsigned char pad[64];
unsigned char khash[32];
const unsigned char * K = _K;
size_t i;
/* If Klen > 64, the key is really SHA256(K). */
if (Klen > 64) {
SHA256_Init(&ctx->ictx);
SHA256_Update(&ctx->ictx, K, Klen);
SHA256_Final(khash, &ctx->ictx);
K = khash;
Klen = 32;
}
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
SHA256_Init(&ctx->ictx);
memset(pad, 0x36, 64);
for (i = 0; i < Klen; i++)
pad[i] ^= K[i];
SHA256_Update(&ctx->ictx, pad, 64);
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
SHA256_Init(&ctx->octx);
memset(pad, 0x5c, 64);
for (i = 0; i < Klen; i++)
pad[i] ^= K[i];
SHA256_Update(&ctx->octx, pad, 64);
/* Clean the stack. */
memset(khash, 0, 32);
}
/* Add bytes to the HMAC-SHA256 operation. */
static void
HMAC_SHA256_Update(HMAC_SHA256_CTX * ctx, const void *in, size_t len)
{
/* Feed data to the inner SHA256 operation. */
SHA256_Update(&ctx->ictx, in, len);
}
/* Finish an HMAC-SHA256 operation. */
static void
HMAC_SHA256_Final(unsigned char digest[32], HMAC_SHA256_CTX * ctx)
{
unsigned char ihash[32];
/* Finish the inner SHA256 operation. */
SHA256_Final(ihash, &ctx->ictx);
/* Feed the inner hash to the outer SHA256 operation. */
SHA256_Update(&ctx->octx, ihash, 32);
/* Finish the outer SHA256 operation. */
SHA256_Final(digest, &ctx->octx);
/* Clean the stack. */
memset(ihash, 0, 32);
} }
/** /**
@ -412,67 +320,101 @@ HMAC_SHA256_Final(unsigned char digest[32], HMAC_SHA256_CTX * ctx)
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and * Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1). * write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
*/ */
static void static inline void
PBKDF2_SHA256(const uint8_t * passwd, size_t passwdlen, const uint8_t * salt, PBKDF2_SHA256_80_128(const uint8_t * passwd, uint8_t * buf)
size_t saltlen, uint64_t c, uint8_t * buf, size_t dkLen)
{ {
HMAC_SHA256_CTX PShctx, hctx; HMAC_SHA256_CTX PShctx, hctx;
size_t i; size_t i;
uint8_t ivec[4]; uint8_t ivec[4];
uint8_t U[32]; unsigned char ihash[32];
uint8_t T[32];
uint64_t j;
int k;
size_t clen;
/* Compute HMAC state after processing P and S. */ /* Compute HMAC state after processing P and S. */
HMAC_SHA256_Init(&PShctx, passwd, passwdlen); unsigned char pad[64];
HMAC_SHA256_Update(&PShctx, salt, saltlen); unsigned char khash[32];
/* If Klen > 64, the key is really SHA256(K). */
SHA256_Init(&PShctx.ictx);
SHA256_Update(&PShctx.ictx, passwd, 80);
SHA256_Final(khash, &PShctx.ictx);
SHA256_Init(&PShctx.ictx);
memset(pad, 0x36, 64);
for (i = 0; i < 32; i++)
pad[i] ^= khash[i];
SHA256_Update(&PShctx.ictx, pad, 64);
SHA256_Init(&PShctx.octx);
memset(pad, 0x5c, 64);
for (i = 0; i < 32; i++)
pad[i] ^= khash[i];
SHA256_Update(&PShctx.octx, pad, 64);
SHA256_Update(&PShctx.ictx, passwd, 80);
/* Iterate through the blocks. */ /* Iterate through the blocks. */
for (i = 0; i * 32 < dkLen; i++) { for (i = 0; i * 32 < 128; i++) {
/* Generate INT(i + 1). */ /* Generate INT(i + 1). */
be32enc(ivec, (uint32_t)(i + 1)); be32enc(ivec, (uint32_t)(i + 1));
/* Compute U_1 = PRF(P, S || INT(i)). */ /* Compute U_1 = PRF(P, S || INT(i)). */
memcpy(&hctx, &PShctx, sizeof(HMAC_SHA256_CTX)); memcpy(&hctx, &PShctx, sizeof(HMAC_SHA256_CTX));
HMAC_SHA256_Update(&hctx, ivec, 4); SHA256_Update(&hctx.ictx, ivec, 4);
HMAC_SHA256_Final(U, &hctx);
/* T_i = U_1 ... */ SHA256_Final(ihash, &hctx.ictx);
memcpy(T, U, 32); /* Feed the inner hash to the outer SHA256 operation. */
SHA256_Update(&hctx.octx, ihash, 32);
for (j = 2; j <= c; j++) { /* Finish the outer SHA256 operation. */
/* Compute U_j. */ SHA256_Final(&buf[i*32], &hctx.octx);
HMAC_SHA256_Init(&hctx, passwd, passwdlen);
HMAC_SHA256_Update(&hctx, U, 32);
HMAC_SHA256_Final(U, &hctx);
/* ... xor U_j ... */
for (k = 0; k < 32; k++)
T[k] ^= U[k];
} }
/* Copy as many bytes as necessary into buf. */
clen = dkLen - i * 32;
if (clen > 32)
clen = 32;
memcpy(&buf[i * 32], T, clen);
}
/* Clean PShctx, since we never called _Final on it. */
memset(&PShctx, 0, sizeof(HMAC_SHA256_CTX));
} }
static void blkcpy(void *, void *, size_t); static inline void
static void blkxor(void *, void *, size_t); PBKDF2_SHA256_80_128_32(const uint8_t * passwd, const uint8_t * salt, uint8_t * buf)
static void salsa20_8(uint32_t[16]); {
static void blockmix_salsa8(uint32_t *, uint32_t *, uint32_t *, size_t); HMAC_SHA256_CTX PShctx;
static uint64_t integerify(void *, size_t); size_t i;
static void smix(uint8_t *, size_t, uint64_t, uint32_t *, uint32_t *); uint8_t ivec[4];
unsigned char ihash[32];
static void /* Compute HMAC state after processing P and S. */
unsigned char pad[64];
unsigned char khash[32];
/* If Klen > 64, the key is really SHA256(K). */
SHA256_Init(&PShctx.ictx);
SHA256_Update(&PShctx.ictx, passwd, 80);
SHA256_Final(khash, &PShctx.ictx);
SHA256_Init(&PShctx.ictx);
memset(pad, 0x36, 64);
for (i = 0; i < 32; i++)
pad[i] ^= khash[i];
SHA256_Update(&PShctx.ictx, pad, 64);
SHA256_Init(&PShctx.octx);
memset(pad, 0x5c, 64);
for (i = 0; i < 32; i++)
pad[i] ^= khash[i];
SHA256_Update(&PShctx.octx, pad, 64);
SHA256_Update(&PShctx.ictx, salt, 128);
/* Generate INT(i + 1). */
be32enc(ivec, (uint32_t)(1));
/* Compute U_1 = PRF(P, S || INT(i)). */
SHA256_Update(&PShctx.ictx, ivec, 4);
SHA256_Final(ihash, &PShctx.ictx);
/* Feed the inner hash to the outer SHA256 operation. */
SHA256_Update(&PShctx.octx, ihash, 32);
/* Finish the outer SHA256 operation. */
SHA256_Final(&buf[0], &PShctx.octx);
}
static inline void
blkcpy(void * dest, void * src, size_t len) blkcpy(void * dest, void * src, size_t len)
{ {
size_t * D = dest; size_t * D = dest;
@ -484,7 +426,7 @@ blkcpy(void * dest, void * src, size_t len)
D[i] = S[i]; D[i] = S[i];
} }
static void static inline void
blkxor(void * dest, void * src, size_t len) blkxor(void * dest, void * src, size_t len)
{ {
size_t * D = dest; size_t * D = dest;
@ -500,149 +442,59 @@ blkxor(void * dest, void * src, size_t len)
* salsa20_8(B): * salsa20_8(B):
* Apply the salsa20/8 core to the provided block. * Apply the salsa20/8 core to the provided block.
*/ */
static void static inline void
salsa20_8(uint32_t B[16]) salsa20_8(uint32_t B[16])
{ {
uint32_t x[16]; uint32_t x00,x01,x02,x03,x04,x05,x06,x07,x08,x09,x10,x11,x12,x13,x14,x15;
size_t i; size_t i;
blkcpy(x, B, 64); x00 = B[ 0];
x01 = B[ 1];
x02 = B[ 2];
x03 = B[ 3];
x04 = B[ 4];
x05 = B[ 5];
x06 = B[ 6];
x07 = B[ 7];
x08 = B[ 8];
x09 = B[ 9];
x10 = B[10];
x11 = B[11];
x12 = B[12];
x13 = B[13];
x14 = B[14];
x15 = B[15];
for (i = 0; i < 8; i += 2) { for (i = 0; i < 8; i += 2) {
#define R(a,b) (((a) << (b)) | ((a) >> (32 - (b)))) #define R(a,b) (((a) << (b)) | ((a) >> (32 - (b))))
/* Operate on columns. */ /* Operate on columns. */
x[ 4] ^= R(x[ 0]+x[12], 7); x[ 8] ^= R(x[ 4]+x[ 0], 9); x04 ^= R(x00+x12, 7); x08 ^= R(x04+x00, 9); x12 ^= R(x08+x04,13); x00 ^= R(x12+x08,18);
x[12] ^= R(x[ 8]+x[ 4],13); x[ 0] ^= R(x[12]+x[ 8],18); x09 ^= R(x05+x01, 7); x13 ^= R(x09+x05, 9); x01 ^= R(x13+x09,13); x05 ^= R(x01+x13,18);
x14 ^= R(x10+x06, 7); x02 ^= R(x14+x10, 9); x06 ^= R(x02+x14,13); x10 ^= R(x06+x02,18);
x[ 9] ^= R(x[ 5]+x[ 1], 7); x[13] ^= R(x[ 9]+x[ 5], 9); x03 ^= R(x15+x11, 7); x07 ^= R(x03+x15, 9); x11 ^= R(x07+x03,13); x15 ^= R(x11+x07,18);
x[ 1] ^= R(x[13]+x[ 9],13); x[ 5] ^= R(x[ 1]+x[13],18);
x[14] ^= R(x[10]+x[ 6], 7); x[ 2] ^= R(x[14]+x[10], 9);
x[ 6] ^= R(x[ 2]+x[14],13); x[10] ^= R(x[ 6]+x[ 2],18);
x[ 3] ^= R(x[15]+x[11], 7); x[ 7] ^= R(x[ 3]+x[15], 9);
x[11] ^= R(x[ 7]+x[ 3],13); x[15] ^= R(x[11]+x[ 7],18);
/* Operate on rows. */ /* Operate on rows. */
x[ 1] ^= R(x[ 0]+x[ 3], 7); x[ 2] ^= R(x[ 1]+x[ 0], 9); x01 ^= R(x00+x03, 7); x02 ^= R(x01+x00, 9); x03 ^= R(x02+x01,13); x00 ^= R(x03+x02,18);
x[ 3] ^= R(x[ 2]+x[ 1],13); x[ 0] ^= R(x[ 3]+x[ 2],18); x06 ^= R(x05+x04, 7); x07 ^= R(x06+x05, 9); x04 ^= R(x07+x06,13); x05 ^= R(x04+x07,18);
x11 ^= R(x10+x09, 7); x08 ^= R(x11+x10, 9); x09 ^= R(x08+x11,13); x10 ^= R(x09+x08,18);
x[ 6] ^= R(x[ 5]+x[ 4], 7); x[ 7] ^= R(x[ 6]+x[ 5], 9); x12 ^= R(x15+x14, 7); x13 ^= R(x12+x15, 9); x14 ^= R(x13+x12,13); x15 ^= R(x14+x13,18);
x[ 4] ^= R(x[ 7]+x[ 6],13); x[ 5] ^= R(x[ 4]+x[ 7],18);
x[11] ^= R(x[10]+x[ 9], 7); x[ 8] ^= R(x[11]+x[10], 9);
x[ 9] ^= R(x[ 8]+x[11],13); x[10] ^= R(x[ 9]+x[ 8],18);
x[12] ^= R(x[15]+x[14], 7); x[13] ^= R(x[12]+x[15], 9);
x[14] ^= R(x[13]+x[12],13); x[15] ^= R(x[14]+x[13],18);
#undef R #undef R
} }
for (i = 0; i < 16; i++) B[ 0] += x00;
B[i] += x[i]; B[ 1] += x01;
} B[ 2] += x02;
B[ 3] += x03;
/** B[ 4] += x04;
* blockmix_salsa8(Bin, Bout, X, r): B[ 5] += x05;
* Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r B[ 6] += x06;
* bytes in length; the output Bout must also be the same size. The B[ 7] += x07;
* temporary space X must be 64 bytes. B[ 8] += x08;
*/ B[ 9] += x09;
static void B[10] += x10;
blockmix_salsa8(uint32_t * Bin, uint32_t * Bout, uint32_t * X, size_t r) B[11] += x11;
{ B[12] += x12;
size_t i; B[13] += x13;
B[14] += x14;
/* 1: X <-- B_{2r - 1} */ B[15] += x15;
blkcpy(X, &Bin[(2 * r - 1) * 16], 64);
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < 2 * r; i += 2) {
/* 3: X <-- H(X \xor B_i) */
blkxor(X, &Bin[i * 16], 64);
salsa20_8(X);
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
blkcpy(&Bout[i * 8], X, 64);
/* 3: X <-- H(X \xor B_i) */
blkxor(X, &Bin[i * 16 + 16], 64);
salsa20_8(X);
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
blkcpy(&Bout[i * 8 + r * 16], X, 64);
}
}
/**
* integerify(B, r):
* Return the result of parsing B_{2r-1} as a little-endian integer.
*/
static uint64_t
integerify(void * B, size_t r)
{
uint32_t * X = (void *)((uintptr_t)(B) + (2 * r - 1) * 64);
return (((uint64_t)(X[1]) << 32) + X[0]);
}
/**
* smix(B, r, N, V, XY):
* Compute B = SMix_r(B, N). The input B must be 128r bytes in length;
* the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 256r + 64 bytes in length. The value N must be a
* power of 2 greater than 1. The arrays B, V, and XY must be aligned to a
* multiple of 64 bytes.
*/
static void
smix(uint8_t * B, size_t r, uint64_t N, uint32_t * V, uint32_t * XY)
{
uint32_t * X = XY;
uint32_t * Y = &XY[32 * r];
uint32_t * Z = &XY[64 * r];
uint64_t i;
uint64_t j;
size_t k;
/* 1: X <-- B */
for (k = 0; k < 32 * r; k++)
X[k] = le32dec(&B[4 * k]);
/* 2: for i = 0 to N - 1 do */
for (i = 0; i < N; i += 2) {
/* 3: V_i <-- X */
blkcpy(&V[i * (32 * r)], X, 128 * r);
/* 4: X <-- H(X) */
blockmix_salsa8(X, Y, Z, r);
/* 3: V_i <-- X */
blkcpy(&V[(i + 1) * (32 * r)], Y, 128 * r);
/* 4: X <-- H(X) */
blockmix_salsa8(Y, X, Z, r);
}
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < N; i += 2) {
/* 7: j <-- Integerify(X) mod N */
j = integerify(X, r) & (N - 1);
/* 8: X <-- H(X \xor V_j) */
blkxor(X, &V[j * (32 * r)], 128 * r);
blockmix_salsa8(X, Y, Z, r);
/* 7: j <-- Integerify(X) mod N */
j = integerify(Y, r) & (N - 1);
/* 8: X <-- H(X \xor V_j) */
blkxor(Y, &V[j * (32 * r)], 128 * r);
blockmix_salsa8(Y, X, Z, r);
}
/* 10: B' <-- X */
for (k = 0; k < 32 * r; k++)
le32enc(&B[4 * k], X[k]);
} }
/* cpu and memory intensive function to transform a 80 byte buffer into a 32 byte output /* cpu and memory intensive function to transform a 80 byte buffer into a 32 byte output
@ -650,30 +502,50 @@ smix(uint8_t * B, size_t r, uint64_t N, uint32_t * V, uint32_t * XY)
*/ */
static void scrypt_1024_1_1_256_sp(const char* input, char* output, char* scratchpad) static void scrypt_1024_1_1_256_sp(const char* input, char* output, char* scratchpad)
{ {
uint8_t * B;
uint32_t * V; uint32_t * V;
uint32_t * XY; uint32_t * X;
uint32_t i; uint32_t i;
uint32_t j;
const uint32_t N = 1024; X = (uint32_t *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63));
const uint32_t r = 1; V = &X[32];
const uint32_t p = 1;
B = (uint8_t *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63)); PBKDF2_SHA256_80_128((const uint8_t*)input, (uint8_t *)X);
XY = (uint32_t *)(B + (128 * r * p));
V = (uint32_t *)(B + (128 * r * p) + (256 * r + 64));
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */ for (i = 0; i < 1024; i += 2) {
PBKDF2_SHA256((const uint8_t*)input, 80, (const uint8_t*)input, 80, 1, B, p * 128 * r); blkcpy(&V[i * 32], X, 128);
/* 2: for i = 0 to p - 1 do */ blkxor(&X[0], &X[16], 64);
for (i = 0; i < p; i++) { salsa20_8(&X[0]);
/* 3: B_i <-- MF(B_i, N) */ blkxor(&X[16], &X[0], 64);
smix(&B[i * 128 * r], r, N, V, XY); salsa20_8(&X[16]);
blkcpy(&V[(i + 1) * 32], X, 128);
blkxor(&X[0], &X[16], 64);
salsa20_8(&X[0]);
blkxor(&X[16], &X[0], 64);
salsa20_8(&X[16]);
}
for (i = 0; i < 1024; i += 2) {
j = X[16] & 1023;
blkxor(X, &V[j * 32], 128);
blkxor(&X[0], &X[16], 64);
salsa20_8(&X[0]);
blkxor(&X[16], &X[0], 64);
salsa20_8(&X[16]);
j = X[16] & 1023;
blkxor(X, &V[j * 32], 128);
blkxor(&X[0], &X[16], 64);
salsa20_8(&X[0]);
blkxor(&X[16], &X[0], 64);
salsa20_8(&X[16]);
} }
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */ PBKDF2_SHA256_80_128_32((const uint8_t*)input, (const uint8_t *)X, (uint8_t*)output);
PBKDF2_SHA256((const uint8_t*)input, 80, B, p * 128 * r, 1, (uint8_t*)output, 32);
} }
int scanhash_scrypt(int thr_id, unsigned char *pdata, unsigned char *scratchbuf, int scanhash_scrypt(int thr_id, unsigned char *pdata, unsigned char *scratchbuf,