Reorder a few instructions

This commit is contained in:
pooler 2011-12-28 19:37:34 +01:00
parent 36225b4206
commit 703d58cd82
3 changed files with 212 additions and 195 deletions

View file

@ -22,6 +22,10 @@
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE. # SUCH DAMAGE.
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#if defined(__x86_64__) #if defined(__x86_64__)
.macro gen_salsa8_core_doubleround .macro gen_salsa8_core_doubleround
@ -430,73 +434,78 @@ gen_scrypt_core_loop2:
.macro xmm_salsa8_core_doubleround .macro xmm_salsa8_core_doubleround
movdqa %xmm1, %xmm4
paddd %xmm0, %xmm4 paddd %xmm0, %xmm4
movdqa %xmm0, %xmm5 movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
pslld $7, %xmm4 pslld $7, %xmm4
psrld $25, %xmm6 psrld $25, %xmm5
pxor %xmm4, %xmm3 pxor %xmm4, %xmm3
pxor %xmm6, %xmm3 pxor %xmm5, %xmm3
paddd %xmm3, %xmm5 movdqa %xmm0, %xmm4
paddd %xmm3, %xmm4
movdqa %xmm4, %xmm5
pslld $9, %xmm4
psrld $23, %xmm5
pxor %xmm4, %xmm2
movdqa %xmm3, %xmm4 movdqa %xmm3, %xmm4
movdqa %xmm5, %xmm6
pslld $9, %xmm5
psrld $23, %xmm6
pxor %xmm5, %xmm2
pshufd $0x93, %xmm3, %xmm3 pshufd $0x93, %xmm3, %xmm3
pxor %xmm6, %xmm2 pxor %xmm5, %xmm2
paddd %xmm2, %xmm4 paddd %xmm2, %xmm4
movdqa %xmm2, %xmm5 movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
pslld $13, %xmm4 pslld $13, %xmm4
psrld $19, %xmm6 psrld $19, %xmm5
pxor %xmm4, %xmm1 pxor %xmm4, %xmm1
movdqa %xmm2, %xmm4
pshufd $0x4e, %xmm2, %xmm2 pshufd $0x4e, %xmm2, %xmm2
pxor %xmm6, %xmm1 pxor %xmm5, %xmm1
paddd %xmm1, %xmm5
movdqa %xmm3, %xmm4 paddd %xmm1, %xmm4
movdqa %xmm5, %xmm6 movdqa %xmm4, %xmm5
pslld $18, %xmm5 pslld $18, %xmm4
psrld $14, %xmm6 psrld $14, %xmm5
pxor %xmm5, %xmm0 pxor %xmm4, %xmm0
pshufd $0x39, %xmm1, %xmm1 pshufd $0x39, %xmm1, %xmm1
pxor %xmm6, %xmm0 pxor %xmm5, %xmm0
movdqa %xmm3, %xmm4
paddd %xmm0, %xmm4 paddd %xmm0, %xmm4
movdqa %xmm0, %xmm5 movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
pslld $7, %xmm4 pslld $7, %xmm4
psrld $25, %xmm6 psrld $25, %xmm5
pxor %xmm4, %xmm1 pxor %xmm4, %xmm1
pxor %xmm6, %xmm1 pxor %xmm5, %xmm1
paddd %xmm1, %xmm5 movdqa %xmm0, %xmm4
paddd %xmm1, %xmm4
movdqa %xmm4, %xmm5
pslld $9, %xmm4
psrld $23, %xmm5
pxor %xmm4, %xmm2
movdqa %xmm1, %xmm4 movdqa %xmm1, %xmm4
movdqa %xmm5, %xmm6
pslld $9, %xmm5
psrld $23, %xmm6
pxor %xmm5, %xmm2
pshufd $0x93, %xmm1, %xmm1 pshufd $0x93, %xmm1, %xmm1
pxor %xmm6, %xmm2 pxor %xmm5, %xmm2
paddd %xmm2, %xmm4 paddd %xmm2, %xmm4
movdqa %xmm2, %xmm5 movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
pslld $13, %xmm4 pslld $13, %xmm4
psrld $19, %xmm6 psrld $19, %xmm5
pxor %xmm4, %xmm3 pxor %xmm4, %xmm3
movdqa %xmm2, %xmm4
pshufd $0x4e, %xmm2, %xmm2 pshufd $0x4e, %xmm2, %xmm2
pxor %xmm6, %xmm3 pxor %xmm5, %xmm3
paddd %xmm3, %xmm5
movdqa %xmm1, %xmm4 paddd %xmm3, %xmm4
movdqa %xmm5, %xmm6 movdqa %xmm4, %xmm5
pslld $18, %xmm5 pslld $18, %xmm4
psrld $14, %xmm6 psrld $14, %xmm5
pxor %xmm5, %xmm0 pxor %xmm4, %xmm0
pshufd $0x39, %xmm3, %xmm3 pshufd $0x39, %xmm3, %xmm3
pxor %xmm6, %xmm0 pxor %xmm5, %xmm0
.endm .endm
.macro xmm_salsa8_core .macro xmm_salsa8_core
movdqa %xmm1, %xmm4
xmm_salsa8_core_doubleround xmm_salsa8_core_doubleround
xmm_salsa8_core_doubleround xmm_salsa8_core_doubleround
xmm_salsa8_core_doubleround xmm_salsa8_core_doubleround
@ -837,136 +846,140 @@ prefer_dual_scrypt_false:
.macro xmm_dual_salsa8_core_doubleround .macro xmm_dual_salsa8_core_doubleround
movdqa %xmm1, %xmm4
movdqa %xmm9, %xmm6
paddd %xmm0, %xmm4 paddd %xmm0, %xmm4
paddd %xmm8, %xmm12 paddd %xmm8, %xmm6
movdqa %xmm0, %xmm5 movdqa %xmm4, %xmm5
movdqa %xmm8, %xmm13 movdqa %xmm6, %xmm7
movdqa %xmm4, %xmm6
movdqa %xmm12, %xmm14
pslld $7, %xmm4 pslld $7, %xmm4
pslld $7, %xmm12 pslld $7, %xmm6
psrld $25, %xmm6 psrld $25, %xmm5
psrld $25, %xmm14 psrld $25, %xmm7
pxor %xmm4, %xmm3 pxor %xmm4, %xmm3
pxor %xmm12, %xmm11 pxor %xmm6, %xmm11
pxor %xmm6, %xmm3 pxor %xmm5, %xmm3
pxor %xmm14, %xmm11 pxor %xmm7, %xmm11
paddd %xmm3, %xmm5 movdqa %xmm0, %xmm4
paddd %xmm11, %xmm13 movdqa %xmm8, %xmm6
paddd %xmm3, %xmm4
paddd %xmm11, %xmm6
movdqa %xmm4, %xmm5
movdqa %xmm6, %xmm7
pslld $9, %xmm4
pslld $9, %xmm6
psrld $23, %xmm5
psrld $23, %xmm7
pxor %xmm4, %xmm2
pxor %xmm6, %xmm10
movdqa %xmm3, %xmm4 movdqa %xmm3, %xmm4
movdqa %xmm11, %xmm12 movdqa %xmm11, %xmm6
movdqa %xmm5, %xmm6
movdqa %xmm13, %xmm14
pslld $9, %xmm5
pslld $9, %xmm13
psrld $23, %xmm6
psrld $23, %xmm14
pxor %xmm5, %xmm2
pxor %xmm13, %xmm10
pshufd $0x93, %xmm3, %xmm3 pshufd $0x93, %xmm3, %xmm3
pshufd $0x93, %xmm11, %xmm11 pshufd $0x93, %xmm11, %xmm11
pxor %xmm6, %xmm2 pxor %xmm5, %xmm2
pxor %xmm14, %xmm10 pxor %xmm7, %xmm10
paddd %xmm2, %xmm4 paddd %xmm2, %xmm4
paddd %xmm10, %xmm12 paddd %xmm10, %xmm6
movdqa %xmm2, %xmm5 movdqa %xmm4, %xmm5
movdqa %xmm10, %xmm13 movdqa %xmm6, %xmm7
movdqa %xmm4, %xmm6
movdqa %xmm12, %xmm14
pslld $13, %xmm4 pslld $13, %xmm4
pslld $13, %xmm12 pslld $13, %xmm6
psrld $19, %xmm6 psrld $19, %xmm5
psrld $19, %xmm14 psrld $19, %xmm7
pxor %xmm4, %xmm1 pxor %xmm4, %xmm1
pxor %xmm12, %xmm9 pxor %xmm6, %xmm9
movdqa %xmm2, %xmm4
movdqa %xmm10, %xmm6
pshufd $0x4e, %xmm2, %xmm2 pshufd $0x4e, %xmm2, %xmm2
pshufd $0x4e, %xmm10, %xmm10 pshufd $0x4e, %xmm10, %xmm10
pxor %xmm6, %xmm1 pxor %xmm5, %xmm1
pxor %xmm14, %xmm9 pxor %xmm7, %xmm9
paddd %xmm1, %xmm5
paddd %xmm9, %xmm13 paddd %xmm1, %xmm4
movdqa %xmm3, %xmm4 paddd %xmm9, %xmm6
movdqa %xmm11, %xmm12 movdqa %xmm4, %xmm5
movdqa %xmm5, %xmm6 movdqa %xmm6, %xmm7
movdqa %xmm13, %xmm14 pslld $18, %xmm4
pslld $18, %xmm5 pslld $18, %xmm6
pslld $18, %xmm13 psrld $14, %xmm5
psrld $14, %xmm6 psrld $14, %xmm7
psrld $14, %xmm14 pxor %xmm4, %xmm0
pxor %xmm5, %xmm0 pxor %xmm6, %xmm8
pxor %xmm13, %xmm8
pshufd $0x39, %xmm1, %xmm1 pshufd $0x39, %xmm1, %xmm1
pshufd $0x39, %xmm9, %xmm9 pshufd $0x39, %xmm9, %xmm9
pxor %xmm6, %xmm0 pxor %xmm5, %xmm0
pxor %xmm14, %xmm8 pxor %xmm7, %xmm8
movdqa %xmm3, %xmm4
movdqa %xmm11, %xmm6
paddd %xmm0, %xmm4 paddd %xmm0, %xmm4
paddd %xmm8, %xmm12 paddd %xmm8, %xmm6
movdqa %xmm0, %xmm5 movdqa %xmm4, %xmm5
movdqa %xmm8, %xmm13 movdqa %xmm6, %xmm7
movdqa %xmm4, %xmm6
movdqa %xmm12, %xmm14
pslld $7, %xmm4 pslld $7, %xmm4
pslld $7, %xmm12 pslld $7, %xmm6
psrld $25, %xmm6 psrld $25, %xmm5
psrld $25, %xmm14 psrld $25, %xmm7
pxor %xmm4, %xmm1 pxor %xmm4, %xmm1
pxor %xmm12, %xmm9 pxor %xmm6, %xmm9
pxor %xmm6, %xmm1 pxor %xmm5, %xmm1
pxor %xmm14, %xmm9 pxor %xmm7, %xmm9
paddd %xmm1, %xmm5 movdqa %xmm0, %xmm4
paddd %xmm9, %xmm13 movdqa %xmm8, %xmm6
paddd %xmm1, %xmm4
paddd %xmm9, %xmm6
movdqa %xmm4, %xmm5
movdqa %xmm6, %xmm7
pslld $9, %xmm4
pslld $9, %xmm6
psrld $23, %xmm5
psrld $23, %xmm7
pxor %xmm4, %xmm2
pxor %xmm6, %xmm10
movdqa %xmm1, %xmm4 movdqa %xmm1, %xmm4
movdqa %xmm9, %xmm12 movdqa %xmm9, %xmm6
movdqa %xmm5, %xmm6
movdqa %xmm13, %xmm14
pslld $9, %xmm5
pslld $9, %xmm13
psrld $23, %xmm6
psrld $23, %xmm14
pxor %xmm5, %xmm2
pxor %xmm13, %xmm10
pshufd $0x93, %xmm1, %xmm1 pshufd $0x93, %xmm1, %xmm1
pshufd $0x93, %xmm9, %xmm9 pshufd $0x93, %xmm9, %xmm9
pxor %xmm6, %xmm2 pxor %xmm5, %xmm2
pxor %xmm14, %xmm10 pxor %xmm7, %xmm10
paddd %xmm2, %xmm4 paddd %xmm2, %xmm4
paddd %xmm10, %xmm12 paddd %xmm10, %xmm6
movdqa %xmm2, %xmm5 movdqa %xmm4, %xmm5
movdqa %xmm10, %xmm13 movdqa %xmm6, %xmm7
movdqa %xmm4, %xmm6
movdqa %xmm12, %xmm14
pslld $13, %xmm4 pslld $13, %xmm4
pslld $13, %xmm12 pslld $13, %xmm6
psrld $19, %xmm6 psrld $19, %xmm5
psrld $19, %xmm14 psrld $19, %xmm7
pxor %xmm4, %xmm3 pxor %xmm4, %xmm3
pxor %xmm12, %xmm11 pxor %xmm6, %xmm11
movdqa %xmm2, %xmm4
movdqa %xmm10, %xmm6
pshufd $0x4e, %xmm2, %xmm2 pshufd $0x4e, %xmm2, %xmm2
pshufd $0x4e, %xmm10, %xmm10 pshufd $0x4e, %xmm10, %xmm10
pxor %xmm6, %xmm3 pxor %xmm5, %xmm3
pxor %xmm14, %xmm11 pxor %xmm7, %xmm11
paddd %xmm3, %xmm5
paddd %xmm11, %xmm13 paddd %xmm3, %xmm4
movdqa %xmm1, %xmm4 paddd %xmm11, %xmm6
movdqa %xmm9, %xmm12 movdqa %xmm4, %xmm5
movdqa %xmm5, %xmm6 movdqa %xmm6, %xmm7
movdqa %xmm13, %xmm14 pslld $18, %xmm4
pslld $18, %xmm5 pslld $18, %xmm6
pslld $18, %xmm13 psrld $14, %xmm5
psrld $14, %xmm6 psrld $14, %xmm7
psrld $14, %xmm14 pxor %xmm4, %xmm0
pxor %xmm5, %xmm0 pxor %xmm6, %xmm8
pxor %xmm13, %xmm8
pshufd $0x39, %xmm3, %xmm3 pshufd $0x39, %xmm3, %xmm3
pshufd $0x39, %xmm11, %xmm11 pshufd $0x39, %xmm11, %xmm11
pxor %xmm6, %xmm0 pxor %xmm5, %xmm0
pxor %xmm14, %xmm8 pxor %xmm7, %xmm8
.endm .endm
.macro xmm_dual_salsa8_core .macro xmm_dual_salsa8_core
movdqa %xmm1, %xmm4
movdqa %xmm9, %xmm12
xmm_dual_salsa8_core_doubleround xmm_dual_salsa8_core_doubleround
xmm_dual_salsa8_core_doubleround xmm_dual_salsa8_core_doubleround
xmm_dual_salsa8_core_doubleround xmm_dual_salsa8_core_doubleround

View file

@ -558,74 +558,78 @@ gen_scrypt_core_loop2:
.macro xmm_salsa8_core_doubleround .macro xmm_salsa8_core_doubleround
movdqa %xmm1, %xmm4
paddd %xmm0, %xmm4 paddd %xmm0, %xmm4
movdqa %xmm0, %xmm5 movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
pslld $7, %xmm4 pslld $7, %xmm4
psrld $25, %xmm6 psrld $25, %xmm5
pxor %xmm4, %xmm3 pxor %xmm4, %xmm3
pxor %xmm6, %xmm3 pxor %xmm5, %xmm3
paddd %xmm3, %xmm5 movdqa %xmm0, %xmm4
paddd %xmm3, %xmm4
movdqa %xmm4, %xmm5
pslld $9, %xmm4
psrld $23, %xmm5
pxor %xmm4, %xmm2
movdqa %xmm3, %xmm4 movdqa %xmm3, %xmm4
movdqa %xmm5, %xmm6
pslld $9, %xmm5
psrld $23, %xmm6
pxor %xmm5, %xmm2
pshufd $0x93, %xmm3, %xmm3 pshufd $0x93, %xmm3, %xmm3
pxor %xmm6, %xmm2 pxor %xmm5, %xmm2
paddd %xmm2, %xmm4 paddd %xmm2, %xmm4
movdqa %xmm2, %xmm5 movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
pslld $13, %xmm4 pslld $13, %xmm4
psrld $19, %xmm6 psrld $19, %xmm5
pxor %xmm4, %xmm1 pxor %xmm4, %xmm1
movdqa %xmm2, %xmm4
pshufd $0x4e, %xmm2, %xmm2 pshufd $0x4e, %xmm2, %xmm2
pxor %xmm6, %xmm1 pxor %xmm5, %xmm1
paddd %xmm1, %xmm5
movdqa %xmm3, %xmm4 paddd %xmm1, %xmm4
movdqa %xmm5, %xmm6 movdqa %xmm4, %xmm5
pslld $18, %xmm5 pslld $18, %xmm4
psrld $14, %xmm6 psrld $14, %xmm5
pxor %xmm5, %xmm0 pxor %xmm4, %xmm0
pshufd $0x39, %xmm1, %xmm1 pshufd $0x39, %xmm1, %xmm1
pxor %xmm6, %xmm0 pxor %xmm5, %xmm0
movdqa %xmm3, %xmm4
paddd %xmm0, %xmm4 paddd %xmm0, %xmm4
movdqa %xmm0, %xmm5 movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
pslld $7, %xmm4 pslld $7, %xmm4
psrld $25, %xmm6 psrld $25, %xmm5
pxor %xmm4, %xmm1 pxor %xmm4, %xmm1
pxor %xmm6, %xmm1 pxor %xmm5, %xmm1
paddd %xmm1, %xmm5 movdqa %xmm0, %xmm4
paddd %xmm1, %xmm4
movdqa %xmm4, %xmm5
pslld $9, %xmm4
psrld $23, %xmm5
pxor %xmm4, %xmm2
movdqa %xmm1, %xmm4 movdqa %xmm1, %xmm4
movdqa %xmm5, %xmm6
pslld $9, %xmm5
psrld $23, %xmm6
pxor %xmm5, %xmm2
pshufd $0x93, %xmm1, %xmm1 pshufd $0x93, %xmm1, %xmm1
pxor %xmm6, %xmm2 pxor %xmm5, %xmm2
paddd %xmm2, %xmm4 paddd %xmm2, %xmm4
movdqa %xmm2, %xmm5 movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
pslld $13, %xmm4 pslld $13, %xmm4
psrld $19, %xmm6 psrld $19, %xmm5
pxor %xmm4, %xmm3 pxor %xmm4, %xmm3
movdqa %xmm2, %xmm4
pshufd $0x4e, %xmm2, %xmm2 pshufd $0x4e, %xmm2, %xmm2
pxor %xmm6, %xmm3 pxor %xmm5, %xmm3
subl $2, %eax
paddd %xmm3, %xmm5 paddd %xmm3, %xmm4
movdqa %xmm1, %xmm4 movdqa %xmm4, %xmm5
movdqa %xmm5, %xmm6 pslld $18, %xmm4
pslld $18, %xmm5 psrld $14, %xmm5
psrld $14, %xmm6 pxor %xmm4, %xmm0
pxor %xmm5, %xmm0
pshufd $0x39, %xmm3, %xmm3 pshufd $0x39, %xmm3, %xmm3
pxor %xmm6, %xmm0 pxor %xmm5, %xmm0
.endm .endm
.macro xmm_salsa8_core .macro xmm_salsa8_core
movdqa %xmm1, %xmm4
xmm_salsa8_core_doubleround xmm_salsa8_core_doubleround
xmm_salsa8_core_doubleround xmm_salsa8_core_doubleround
xmm_salsa8_core_doubleround xmm_salsa8_core_doubleround

View file

@ -42,8 +42,8 @@ typedef struct SHA256Context {
} SHA256_CTX; } SHA256_CTX;
/* /*
* Encode a length len/4 vector of (uint32_t) into a length len vector of * Encode a length len vector of (uint32_t) into a length len vector of
* (unsigned char) in big-endian form. Assumes len is a multiple of 4. * (unsigned char) in big-endian form.
*/ */
static inline void static inline void
be32enc_vect(uint32_t *dst, const uint32_t *src, uint32_t len) be32enc_vect(uint32_t *dst, const uint32_t *src, uint32_t len)
@ -225,7 +225,7 @@ PBKDF2_SHA256_80_128_init(const uint32_t *passwd, uint32_t tstate[8], uint32_t o
/** /**
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen): * PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and * Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1). * write the output to buf.
*/ */
static inline void static inline void
PBKDF2_SHA256_80_128(const uint32_t *tstate, const uint32_t *ostate, const uint32_t *passwd, uint32_t *buf) PBKDF2_SHA256_80_128(const uint32_t *tstate, const uint32_t *ostate, const uint32_t *passwd, uint32_t *buf)
@ -406,7 +406,7 @@ unsigned char *scrypt_buffer_alloc() {
scratchpad size needs to be at least 63 + (128 * r * p) + (256 * r + 64) + (128 * r * N) bytes scratchpad size needs to be at least 63 + (128 * r * p) + (256 * r + 64) + (128 * r * N) bytes
r = 1, p = 1, N = 1024 r = 1, p = 1, N = 1024
*/ */
static void scrypt_1024_1_1_256_sp(const uint32_t* input, unsigned char *scratchpad, uint32_t *res) static void scrypt_1024_1_1_256_sp(const uint32_t* input, uint32_t *res, unsigned char *scratchpad)
{ {
uint32_t tstate[8], ostate[8]; uint32_t tstate[8], ostate[8];
uint32_t *V; uint32_t *V;
@ -422,7 +422,7 @@ static void scrypt_1024_1_1_256_sp(const uint32_t* input, unsigned char *scratch
} }
#ifdef DUAL_SCRYPT #ifdef DUAL_SCRYPT
static void dual_scrypt_1024_1_1_256_sp(const uint32_t *input1, const uint32_t *input2, unsigned char *scratchpad, uint32_t *res1, uint32_t *res2) static void dual_scrypt_1024_1_1_256_sp(const uint32_t *input1, const uint32_t *input2, uint32_t *res1, uint32_t *res2, unsigned char *scratchpad)
{ {
uint32_t tstate1[8], tstate2[8], ostate1[8], ostate2[8]; uint32_t tstate1[8], tstate2[8], ostate1[8], ostate2[8];
uint32_t *V; uint32_t *V;
@ -466,17 +466,17 @@ int scanhash_scrypt(int thr_id, unsigned char *pdata, unsigned char *scratchbuf,
#ifdef DUAL_SCRYPT #ifdef DUAL_SCRYPT
if (use_dual) { if (use_dual) {
data2[19] = n++; data2[19] = n++;
dual_scrypt_1024_1_1_256_sp(data, data2, scratchbuf, hash, hash2); dual_scrypt_1024_1_1_256_sp(data, data2, hash, hash2, scratchbuf);
if (hash2[7] <= Htarg) { if (hash2[7] <= Htarg) {
((uint32_t *)pdata)[19] = byteswap(data2[19]); ((uint32_t *)pdata)[19] = byteswap(data2[19]);
*hashes_done = n; *hashes_done = n;
return true; return true;
} }
} else { } else {
scrypt_1024_1_1_256_sp(data, scratchbuf, hash); scrypt_1024_1_1_256_sp(data, hash, scratchbuf);
} }
#else #else
scrypt_1024_1_1_256_sp(data, scratchbuf, hash); scrypt_1024_1_1_256_sp(data, hash, scratchbuf);
#endif #endif
if (hash[7] <= Htarg) { if (hash[7] <= Htarg) {