Enable W191 and W291 flake8 checks.
Remove trailing whitespace from Python files. Convert tabs to spaces.
This commit is contained in:
parent
1c58250350
commit
0d31ef4762
13 changed files with 367 additions and 365 deletions
|
@ -506,7 +506,7 @@ def file_has_hashbang(file_lines):
|
|||
|
||||
def insert_python_header(filename, file_lines, start_year, end_year):
|
||||
if file_has_hashbang(file_lines):
|
||||
insert_idx = 1
|
||||
insert_idx = 1
|
||||
else:
|
||||
insert_idx = 0
|
||||
header_lines = get_python_header_lines_to_insert(start_year, end_year)
|
||||
|
@ -571,7 +571,7 @@ def insert_cmd(argv):
|
|||
if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
|
||||
sys.exit("*** cannot insert for file extension %s" % extension)
|
||||
|
||||
if extension == '.py':
|
||||
if extension == '.py':
|
||||
style = 'python'
|
||||
else:
|
||||
style = 'cpp'
|
||||
|
|
|
@ -61,6 +61,8 @@
|
|||
# F823 local variable name … referenced before assignment
|
||||
# F831 duplicate argument name in function definition
|
||||
# F841 local variable 'foo' is assigned to but never used
|
||||
# W191 indentation contains tabs
|
||||
# W291 trailing whitespace
|
||||
# W292 no newline at end of file
|
||||
# W293 blank line contains whitespace
|
||||
# W504 line break after binary operator
|
||||
|
@ -71,4 +73,4 @@
|
|||
# W605 invalid escape sequence "x"
|
||||
# W606 'async' and 'await' are reserved keywords starting with Python 3.7
|
||||
|
||||
flake8 --ignore=B,C,E,F,I,N,W --select=E112,E113,E115,E116,E125,E131,E133,E223,E224,E242,E266,E271,E272,E273,E274,E275,E304,E306,E401,E402,E502,E701,E702,E703,E714,E721,E741,E742,E743,F401,E901,E902,F402,F404,F406,F407,F601,F602,F621,F622,F631,F701,F702,F703,F704,F705,F706,F707,F811,F812,F821,F822,F823,F831,F841,W292,W293,W504,W601,W602,W603,W604,W605,W606 .
|
||||
flake8 --ignore=B,C,E,F,I,N,W --select=E112,E113,E115,E116,E125,E131,E133,E223,E224,E242,E266,E271,E272,E273,E274,E275,E304,E306,E401,E402,E502,E701,E702,E703,E714,E721,E741,E742,E743,F401,E901,E902,F402,F404,F406,F407,F601,F602,F621,F622,F631,F701,F702,F703,F704,F705,F706,F707,F811,F812,F821,F822,F823,F831,F841,W191,W291,W292,W293,W504,W601,W602,W603,W604,W605,W606 .
|
||||
|
|
|
@ -150,7 +150,7 @@ def check_PE_DYNAMIC_BASE(executable):
|
|||
def check_PE_HIGH_ENTROPY_VA(executable):
|
||||
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
|
||||
(arch,bits) = get_PE_dll_characteristics(executable)
|
||||
if arch == 'i386:x86-64':
|
||||
if arch == 'i386:x86-64':
|
||||
reqbits = IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA
|
||||
else: # Unnecessary on 32-bit
|
||||
assert(arch == 'i386')
|
||||
|
|
|
@ -32,15 +32,15 @@ class TestSecurityChecks(unittest.TestCase):
|
|||
cc = 'gcc'
|
||||
write_testcode(source)
|
||||
|
||||
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro']),
|
||||
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro']),
|
||||
(1, executable+': failed PIE NX RELRO Canary'))
|
||||
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro']),
|
||||
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro']),
|
||||
(1, executable+': failed PIE RELRO Canary'))
|
||||
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro']),
|
||||
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro']),
|
||||
(1, executable+': failed PIE RELRO'))
|
||||
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE']),
|
||||
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE']),
|
||||
(1, executable+': failed RELRO'))
|
||||
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE']),
|
||||
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE']),
|
||||
(0, ''))
|
||||
|
||||
def test_32bit_PE(self):
|
||||
|
@ -49,11 +49,11 @@ class TestSecurityChecks(unittest.TestCase):
|
|||
cc = 'i686-w64-mingw32-gcc'
|
||||
write_testcode(source)
|
||||
|
||||
self.assertEqual(call_security_check(cc, source, executable, []),
|
||||
self.assertEqual(call_security_check(cc, source, executable, []),
|
||||
(1, executable+': failed DYNAMIC_BASE NX'))
|
||||
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat']),
|
||||
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat']),
|
||||
(1, executable+': failed DYNAMIC_BASE'))
|
||||
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase']),
|
||||
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase']),
|
||||
(0, ''))
|
||||
def test_64bit_PE(self):
|
||||
source = 'test1.c'
|
||||
|
|
|
@ -22,300 +22,300 @@ from binascii import hexlify, unhexlify
|
|||
settings = {}
|
||||
|
||||
def hex_switchEndian(s):
|
||||
""" Switches the endianness of a hex string (in pairs of hex chars) """
|
||||
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
|
||||
return b''.join(pairList[::-1]).decode()
|
||||
""" Switches the endianness of a hex string (in pairs of hex chars) """
|
||||
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
|
||||
return b''.join(pairList[::-1]).decode()
|
||||
|
||||
def uint32(x):
|
||||
return x & 0xffffffff
|
||||
return x & 0xffffffff
|
||||
|
||||
def bytereverse(x):
|
||||
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
|
||||
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
|
||||
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
|
||||
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
|
||||
|
||||
def bufreverse(in_buf):
|
||||
out_words = []
|
||||
for i in range(0, len(in_buf), 4):
|
||||
word = struct.unpack('@I', in_buf[i:i+4])[0]
|
||||
out_words.append(struct.pack('@I', bytereverse(word)))
|
||||
return b''.join(out_words)
|
||||
out_words = []
|
||||
for i in range(0, len(in_buf), 4):
|
||||
word = struct.unpack('@I', in_buf[i:i+4])[0]
|
||||
out_words.append(struct.pack('@I', bytereverse(word)))
|
||||
return b''.join(out_words)
|
||||
|
||||
def wordreverse(in_buf):
|
||||
out_words = []
|
||||
for i in range(0, len(in_buf), 4):
|
||||
out_words.append(in_buf[i:i+4])
|
||||
out_words.reverse()
|
||||
return b''.join(out_words)
|
||||
out_words = []
|
||||
for i in range(0, len(in_buf), 4):
|
||||
out_words.append(in_buf[i:i+4])
|
||||
out_words.reverse()
|
||||
return b''.join(out_words)
|
||||
|
||||
def calc_hdr_hash(blk_hdr):
|
||||
hash1 = hashlib.sha256()
|
||||
hash1.update(blk_hdr)
|
||||
hash1_o = hash1.digest()
|
||||
hash1 = hashlib.sha256()
|
||||
hash1.update(blk_hdr)
|
||||
hash1_o = hash1.digest()
|
||||
|
||||
hash2 = hashlib.sha256()
|
||||
hash2.update(hash1_o)
|
||||
hash2_o = hash2.digest()
|
||||
hash2 = hashlib.sha256()
|
||||
hash2.update(hash1_o)
|
||||
hash2_o = hash2.digest()
|
||||
|
||||
return hash2_o
|
||||
return hash2_o
|
||||
|
||||
def calc_hash_str(blk_hdr):
|
||||
hash = calc_hdr_hash(blk_hdr)
|
||||
hash = bufreverse(hash)
|
||||
hash = wordreverse(hash)
|
||||
hash_str = hexlify(hash).decode('utf-8')
|
||||
return hash_str
|
||||
hash = calc_hdr_hash(blk_hdr)
|
||||
hash = bufreverse(hash)
|
||||
hash = wordreverse(hash)
|
||||
hash_str = hexlify(hash).decode('utf-8')
|
||||
return hash_str
|
||||
|
||||
def get_blk_dt(blk_hdr):
|
||||
members = struct.unpack("<I", blk_hdr[68:68+4])
|
||||
nTime = members[0]
|
||||
dt = datetime.datetime.fromtimestamp(nTime)
|
||||
dt_ym = datetime.datetime(dt.year, dt.month, 1)
|
||||
return (dt_ym, nTime)
|
||||
members = struct.unpack("<I", blk_hdr[68:68+4])
|
||||
nTime = members[0]
|
||||
dt = datetime.datetime.fromtimestamp(nTime)
|
||||
dt_ym = datetime.datetime(dt.year, dt.month, 1)
|
||||
return (dt_ym, nTime)
|
||||
|
||||
# When getting the list of block hashes, undo any byte reversals.
|
||||
def get_block_hashes(settings):
|
||||
blkindex = []
|
||||
f = open(settings['hashlist'], "r")
|
||||
for line in f:
|
||||
line = line.rstrip()
|
||||
if settings['rev_hash_bytes'] == 'true':
|
||||
line = hex_switchEndian(line)
|
||||
blkindex.append(line)
|
||||
blkindex = []
|
||||
f = open(settings['hashlist'], "r")
|
||||
for line in f:
|
||||
line = line.rstrip()
|
||||
if settings['rev_hash_bytes'] == 'true':
|
||||
line = hex_switchEndian(line)
|
||||
blkindex.append(line)
|
||||
|
||||
print("Read " + str(len(blkindex)) + " hashes")
|
||||
print("Read " + str(len(blkindex)) + " hashes")
|
||||
|
||||
return blkindex
|
||||
return blkindex
|
||||
|
||||
# The block map shouldn't give or receive byte-reversed hashes.
|
||||
def mkblockmap(blkindex):
|
||||
blkmap = {}
|
||||
for height,hash in enumerate(blkindex):
|
||||
blkmap[hash] = height
|
||||
return blkmap
|
||||
blkmap = {}
|
||||
for height,hash in enumerate(blkindex):
|
||||
blkmap[hash] = height
|
||||
return blkmap
|
||||
|
||||
# Block header and extent on disk
|
||||
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
|
||||
|
||||
class BlockDataCopier:
|
||||
def __init__(self, settings, blkindex, blkmap):
|
||||
self.settings = settings
|
||||
self.blkindex = blkindex
|
||||
self.blkmap = blkmap
|
||||
def __init__(self, settings, blkindex, blkmap):
|
||||
self.settings = settings
|
||||
self.blkindex = blkindex
|
||||
self.blkmap = blkmap
|
||||
|
||||
self.inFn = 0
|
||||
self.inF = None
|
||||
self.outFn = 0
|
||||
self.outsz = 0
|
||||
self.outF = None
|
||||
self.outFname = None
|
||||
self.blkCountIn = 0
|
||||
self.blkCountOut = 0
|
||||
self.inFn = 0
|
||||
self.inF = None
|
||||
self.outFn = 0
|
||||
self.outsz = 0
|
||||
self.outF = None
|
||||
self.outFname = None
|
||||
self.blkCountIn = 0
|
||||
self.blkCountOut = 0
|
||||
|
||||
self.lastDate = datetime.datetime(2000, 1, 1)
|
||||
self.highTS = 1408893517 - 315360000
|
||||
self.timestampSplit = False
|
||||
self.fileOutput = True
|
||||
self.setFileTime = False
|
||||
self.maxOutSz = settings['max_out_sz']
|
||||
if 'output' in settings:
|
||||
self.fileOutput = False
|
||||
if settings['file_timestamp'] != 0:
|
||||
self.setFileTime = True
|
||||
if settings['split_timestamp'] != 0:
|
||||
self.timestampSplit = True
|
||||
# Extents and cache for out-of-order blocks
|
||||
self.blockExtents = {}
|
||||
self.outOfOrderData = {}
|
||||
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
|
||||
self.lastDate = datetime.datetime(2000, 1, 1)
|
||||
self.highTS = 1408893517 - 315360000
|
||||
self.timestampSplit = False
|
||||
self.fileOutput = True
|
||||
self.setFileTime = False
|
||||
self.maxOutSz = settings['max_out_sz']
|
||||
if 'output' in settings:
|
||||
self.fileOutput = False
|
||||
if settings['file_timestamp'] != 0:
|
||||
self.setFileTime = True
|
||||
if settings['split_timestamp'] != 0:
|
||||
self.timestampSplit = True
|
||||
# Extents and cache for out-of-order blocks
|
||||
self.blockExtents = {}
|
||||
self.outOfOrderData = {}
|
||||
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
|
||||
|
||||
def writeBlock(self, inhdr, blk_hdr, rawblock):
|
||||
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
|
||||
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
|
||||
self.outF.close()
|
||||
if self.setFileTime:
|
||||
os.utime(self.outFname, (int(time.time()), self.highTS))
|
||||
self.outF = None
|
||||
self.outFname = None
|
||||
self.outFn = self.outFn + 1
|
||||
self.outsz = 0
|
||||
def writeBlock(self, inhdr, blk_hdr, rawblock):
|
||||
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
|
||||
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
|
||||
self.outF.close()
|
||||
if self.setFileTime:
|
||||
os.utime(self.outFname, (int(time.time()), self.highTS))
|
||||
self.outF = None
|
||||
self.outFname = None
|
||||
self.outFn = self.outFn + 1
|
||||
self.outsz = 0
|
||||
|
||||
(blkDate, blkTS) = get_blk_dt(blk_hdr)
|
||||
if self.timestampSplit and (blkDate > self.lastDate):
|
||||
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
|
||||
self.lastDate = blkDate
|
||||
if self.outF:
|
||||
self.outF.close()
|
||||
if self.setFileTime:
|
||||
os.utime(self.outFname, (int(time.time()), self.highTS))
|
||||
self.outF = None
|
||||
self.outFname = None
|
||||
self.outFn = self.outFn + 1
|
||||
self.outsz = 0
|
||||
(blkDate, blkTS) = get_blk_dt(blk_hdr)
|
||||
if self.timestampSplit and (blkDate > self.lastDate):
|
||||
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
|
||||
self.lastDate = blkDate
|
||||
if self.outF:
|
||||
self.outF.close()
|
||||
if self.setFileTime:
|
||||
os.utime(self.outFname, (int(time.time()), self.highTS))
|
||||
self.outF = None
|
||||
self.outFname = None
|
||||
self.outFn = self.outFn + 1
|
||||
self.outsz = 0
|
||||
|
||||
if not self.outF:
|
||||
if self.fileOutput:
|
||||
self.outFname = self.settings['output_file']
|
||||
else:
|
||||
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
|
||||
print("Output file " + self.outFname)
|
||||
self.outF = open(self.outFname, "wb")
|
||||
if not self.outF:
|
||||
if self.fileOutput:
|
||||
self.outFname = self.settings['output_file']
|
||||
else:
|
||||
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
|
||||
print("Output file " + self.outFname)
|
||||
self.outF = open(self.outFname, "wb")
|
||||
|
||||
self.outF.write(inhdr)
|
||||
self.outF.write(blk_hdr)
|
||||
self.outF.write(rawblock)
|
||||
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
|
||||
self.outF.write(inhdr)
|
||||
self.outF.write(blk_hdr)
|
||||
self.outF.write(rawblock)
|
||||
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
|
||||
|
||||
self.blkCountOut = self.blkCountOut + 1
|
||||
if blkTS > self.highTS:
|
||||
self.highTS = blkTS
|
||||
self.blkCountOut = self.blkCountOut + 1
|
||||
if blkTS > self.highTS:
|
||||
self.highTS = blkTS
|
||||
|
||||
if (self.blkCountOut % 1000) == 0:
|
||||
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
|
||||
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
|
||||
if (self.blkCountOut % 1000) == 0:
|
||||
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
|
||||
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
|
||||
|
||||
def inFileName(self, fn):
|
||||
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
|
||||
def inFileName(self, fn):
|
||||
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
|
||||
|
||||
def fetchBlock(self, extent):
|
||||
'''Fetch block contents from disk given extents'''
|
||||
with open(self.inFileName(extent.fn), "rb") as f:
|
||||
f.seek(extent.offset)
|
||||
return f.read(extent.size)
|
||||
def fetchBlock(self, extent):
|
||||
'''Fetch block contents from disk given extents'''
|
||||
with open(self.inFileName(extent.fn), "rb") as f:
|
||||
f.seek(extent.offset)
|
||||
return f.read(extent.size)
|
||||
|
||||
def copyOneBlock(self):
|
||||
'''Find the next block to be written in the input, and copy it to the output.'''
|
||||
extent = self.blockExtents.pop(self.blkCountOut)
|
||||
if self.blkCountOut in self.outOfOrderData:
|
||||
# If the data is cached, use it from memory and remove from the cache
|
||||
rawblock = self.outOfOrderData.pop(self.blkCountOut)
|
||||
self.outOfOrderSize -= len(rawblock)
|
||||
else: # Otherwise look up data on disk
|
||||
rawblock = self.fetchBlock(extent)
|
||||
def copyOneBlock(self):
|
||||
'''Find the next block to be written in the input, and copy it to the output.'''
|
||||
extent = self.blockExtents.pop(self.blkCountOut)
|
||||
if self.blkCountOut in self.outOfOrderData:
|
||||
# If the data is cached, use it from memory and remove from the cache
|
||||
rawblock = self.outOfOrderData.pop(self.blkCountOut)
|
||||
self.outOfOrderSize -= len(rawblock)
|
||||
else: # Otherwise look up data on disk
|
||||
rawblock = self.fetchBlock(extent)
|
||||
|
||||
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
|
||||
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
|
||||
|
||||
def run(self):
|
||||
while self.blkCountOut < len(self.blkindex):
|
||||
if not self.inF:
|
||||
fname = self.inFileName(self.inFn)
|
||||
print("Input file " + fname)
|
||||
try:
|
||||
self.inF = open(fname, "rb")
|
||||
except IOError:
|
||||
print("Premature end of block data")
|
||||
return
|
||||
def run(self):
|
||||
while self.blkCountOut < len(self.blkindex):
|
||||
if not self.inF:
|
||||
fname = self.inFileName(self.inFn)
|
||||
print("Input file " + fname)
|
||||
try:
|
||||
self.inF = open(fname, "rb")
|
||||
except IOError:
|
||||
print("Premature end of block data")
|
||||
return
|
||||
|
||||
inhdr = self.inF.read(8)
|
||||
if (not inhdr or (inhdr[0] == "\0")):
|
||||
self.inF.close()
|
||||
self.inF = None
|
||||
self.inFn = self.inFn + 1
|
||||
continue
|
||||
inhdr = self.inF.read(8)
|
||||
if (not inhdr or (inhdr[0] == "\0")):
|
||||
self.inF.close()
|
||||
self.inF = None
|
||||
self.inFn = self.inFn + 1
|
||||
continue
|
||||
|
||||
inMagic = inhdr[:4]
|
||||
if (inMagic != self.settings['netmagic']):
|
||||
print("Invalid magic: " + hexlify(inMagic).decode('utf-8'))
|
||||
return
|
||||
inLenLE = inhdr[4:]
|
||||
su = struct.unpack("<I", inLenLE)
|
||||
inLen = su[0] - 80 # length without header
|
||||
blk_hdr = self.inF.read(80)
|
||||
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
|
||||
inMagic = inhdr[:4]
|
||||
if (inMagic != self.settings['netmagic']):
|
||||
print("Invalid magic: " + hexlify(inMagic).decode('utf-8'))
|
||||
return
|
||||
inLenLE = inhdr[4:]
|
||||
su = struct.unpack("<I", inLenLE)
|
||||
inLen = su[0] - 80 # length without header
|
||||
blk_hdr = self.inF.read(80)
|
||||
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
|
||||
|
||||
self.hash_str = calc_hash_str(blk_hdr)
|
||||
if not self.hash_str in blkmap:
|
||||
# Because blocks can be written to files out-of-order as of 0.10, the script
|
||||
# may encounter blocks it doesn't know about. Treat as debug output.
|
||||
if settings['debug_output'] == 'true':
|
||||
print("Skipping unknown block " + self.hash_str)
|
||||
self.inF.seek(inLen, os.SEEK_CUR)
|
||||
continue
|
||||
self.hash_str = calc_hash_str(blk_hdr)
|
||||
if not self.hash_str in blkmap:
|
||||
# Because blocks can be written to files out-of-order as of 0.10, the script
|
||||
# may encounter blocks it doesn't know about. Treat as debug output.
|
||||
if settings['debug_output'] == 'true':
|
||||
print("Skipping unknown block " + self.hash_str)
|
||||
self.inF.seek(inLen, os.SEEK_CUR)
|
||||
continue
|
||||
|
||||
blkHeight = self.blkmap[self.hash_str]
|
||||
self.blkCountIn += 1
|
||||
blkHeight = self.blkmap[self.hash_str]
|
||||
self.blkCountIn += 1
|
||||
|
||||
if self.blkCountOut == blkHeight:
|
||||
# If in-order block, just copy
|
||||
rawblock = self.inF.read(inLen)
|
||||
self.writeBlock(inhdr, blk_hdr, rawblock)
|
||||
if self.blkCountOut == blkHeight:
|
||||
# If in-order block, just copy
|
||||
rawblock = self.inF.read(inLen)
|
||||
self.writeBlock(inhdr, blk_hdr, rawblock)
|
||||
|
||||
# See if we can catch up to prior out-of-order blocks
|
||||
while self.blkCountOut in self.blockExtents:
|
||||
self.copyOneBlock()
|
||||
# See if we can catch up to prior out-of-order blocks
|
||||
while self.blkCountOut in self.blockExtents:
|
||||
self.copyOneBlock()
|
||||
|
||||
else: # If out-of-order, skip over block data for now
|
||||
self.blockExtents[blkHeight] = inExtent
|
||||
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
|
||||
# If there is space in the cache, read the data
|
||||
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
|
||||
# but we don't want to fill up memory
|
||||
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
|
||||
self.outOfOrderSize += inLen
|
||||
else: # If no space in cache, seek forward
|
||||
self.inF.seek(inLen, os.SEEK_CUR)
|
||||
else: # If out-of-order, skip over block data for now
|
||||
self.blockExtents[blkHeight] = inExtent
|
||||
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
|
||||
# If there is space in the cache, read the data
|
||||
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
|
||||
# but we don't want to fill up memory
|
||||
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
|
||||
self.outOfOrderSize += inLen
|
||||
else: # If no space in cache, seek forward
|
||||
self.inF.seek(inLen, os.SEEK_CUR)
|
||||
|
||||
print("Done (%i blocks written)" % (self.blkCountOut))
|
||||
print("Done (%i blocks written)" % (self.blkCountOut))
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: linearize-data.py CONFIG-FILE")
|
||||
sys.exit(1)
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: linearize-data.py CONFIG-FILE")
|
||||
sys.exit(1)
|
||||
|
||||
f = open(sys.argv[1])
|
||||
for line in f:
|
||||
# skip comment lines
|
||||
m = re.search('^\s*#', line)
|
||||
if m:
|
||||
continue
|
||||
f = open(sys.argv[1])
|
||||
for line in f:
|
||||
# skip comment lines
|
||||
m = re.search('^\s*#', line)
|
||||
if m:
|
||||
continue
|
||||
|
||||
# parse key=value lines
|
||||
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
|
||||
if m is None:
|
||||
continue
|
||||
settings[m.group(1)] = m.group(2)
|
||||
f.close()
|
||||
# parse key=value lines
|
||||
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
|
||||
if m is None:
|
||||
continue
|
||||
settings[m.group(1)] = m.group(2)
|
||||
f.close()
|
||||
|
||||
# Force hash byte format setting to be lowercase to make comparisons easier.
|
||||
# Also place upfront in case any settings need to know about it.
|
||||
if 'rev_hash_bytes' not in settings:
|
||||
settings['rev_hash_bytes'] = 'false'
|
||||
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
|
||||
# Force hash byte format setting to be lowercase to make comparisons easier.
|
||||
# Also place upfront in case any settings need to know about it.
|
||||
if 'rev_hash_bytes' not in settings:
|
||||
settings['rev_hash_bytes'] = 'false'
|
||||
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
|
||||
|
||||
if 'netmagic' not in settings:
|
||||
settings['netmagic'] = 'f9beb4d9'
|
||||
if 'genesis' not in settings:
|
||||
settings['genesis'] = '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
|
||||
if 'input' not in settings:
|
||||
settings['input'] = 'input'
|
||||
if 'hashlist' not in settings:
|
||||
settings['hashlist'] = 'hashlist.txt'
|
||||
if 'file_timestamp' not in settings:
|
||||
settings['file_timestamp'] = 0
|
||||
if 'split_timestamp' not in settings:
|
||||
settings['split_timestamp'] = 0
|
||||
if 'max_out_sz' not in settings:
|
||||
settings['max_out_sz'] = 1000 * 1000 * 1000
|
||||
if 'out_of_order_cache_sz' not in settings:
|
||||
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
|
||||
if 'debug_output' not in settings:
|
||||
settings['debug_output'] = 'false'
|
||||
if 'netmagic' not in settings:
|
||||
settings['netmagic'] = 'f9beb4d9'
|
||||
if 'genesis' not in settings:
|
||||
settings['genesis'] = '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
|
||||
if 'input' not in settings:
|
||||
settings['input'] = 'input'
|
||||
if 'hashlist' not in settings:
|
||||
settings['hashlist'] = 'hashlist.txt'
|
||||
if 'file_timestamp' not in settings:
|
||||
settings['file_timestamp'] = 0
|
||||
if 'split_timestamp' not in settings:
|
||||
settings['split_timestamp'] = 0
|
||||
if 'max_out_sz' not in settings:
|
||||
settings['max_out_sz'] = 1000 * 1000 * 1000
|
||||
if 'out_of_order_cache_sz' not in settings:
|
||||
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
|
||||
if 'debug_output' not in settings:
|
||||
settings['debug_output'] = 'false'
|
||||
|
||||
settings['max_out_sz'] = int(settings['max_out_sz'])
|
||||
settings['split_timestamp'] = int(settings['split_timestamp'])
|
||||
settings['file_timestamp'] = int(settings['file_timestamp'])
|
||||
settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
|
||||
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
|
||||
settings['debug_output'] = settings['debug_output'].lower()
|
||||
settings['max_out_sz'] = int(settings['max_out_sz'])
|
||||
settings['split_timestamp'] = int(settings['split_timestamp'])
|
||||
settings['file_timestamp'] = int(settings['file_timestamp'])
|
||||
settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
|
||||
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
|
||||
settings['debug_output'] = settings['debug_output'].lower()
|
||||
|
||||
if 'output_file' not in settings and 'output' not in settings:
|
||||
print("Missing output file / directory")
|
||||
sys.exit(1)
|
||||
if 'output_file' not in settings and 'output' not in settings:
|
||||
print("Missing output file / directory")
|
||||
sys.exit(1)
|
||||
|
||||
blkindex = get_block_hashes(settings)
|
||||
blkmap = mkblockmap(blkindex)
|
||||
blkindex = get_block_hashes(settings)
|
||||
blkmap = mkblockmap(blkindex)
|
||||
|
||||
# Block hash map won't be byte-reversed. Neither should the genesis hash.
|
||||
if not settings['genesis'] in blkmap:
|
||||
print("Genesis block not found in hashlist")
|
||||
else:
|
||||
BlockDataCopier(settings, blkindex, blkmap).run()
|
||||
# Block hash map won't be byte-reversed. Neither should the genesis hash.
|
||||
if not settings['genesis'] in blkmap:
|
||||
print("Genesis block not found in hashlist")
|
||||
else:
|
||||
BlockDataCopier(settings, blkindex, blkmap).run()
|
||||
|
|
|
@ -22,135 +22,135 @@ import os.path
|
|||
settings = {}
|
||||
|
||||
def hex_switchEndian(s):
|
||||
""" Switches the endianness of a hex string (in pairs of hex chars) """
|
||||
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
|
||||
return b''.join(pairList[::-1]).decode()
|
||||
""" Switches the endianness of a hex string (in pairs of hex chars) """
|
||||
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
|
||||
return b''.join(pairList[::-1]).decode()
|
||||
|
||||
class BitcoinRPC:
|
||||
def __init__(self, host, port, username, password):
|
||||
authpair = "%s:%s" % (username, password)
|
||||
authpair = authpair.encode('utf-8')
|
||||
self.authhdr = b"Basic " + base64.b64encode(authpair)
|
||||
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
|
||||
def __init__(self, host, port, username, password):
|
||||
authpair = "%s:%s" % (username, password)
|
||||
authpair = authpair.encode('utf-8')
|
||||
self.authhdr = b"Basic " + base64.b64encode(authpair)
|
||||
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
|
||||
|
||||
def execute(self, obj):
|
||||
try:
|
||||
self.conn.request('POST', '/', json.dumps(obj),
|
||||
{ 'Authorization' : self.authhdr,
|
||||
'Content-type' : 'application/json' })
|
||||
except ConnectionRefusedError:
|
||||
print('RPC connection refused. Check RPC settings and the server status.',
|
||||
file=sys.stderr)
|
||||
return None
|
||||
def execute(self, obj):
|
||||
try:
|
||||
self.conn.request('POST', '/', json.dumps(obj),
|
||||
{ 'Authorization' : self.authhdr,
|
||||
'Content-type' : 'application/json' })
|
||||
except ConnectionRefusedError:
|
||||
print('RPC connection refused. Check RPC settings and the server status.',
|
||||
file=sys.stderr)
|
||||
return None
|
||||
|
||||
resp = self.conn.getresponse()
|
||||
if resp is None:
|
||||
print("JSON-RPC: no response", file=sys.stderr)
|
||||
return None
|
||||
resp = self.conn.getresponse()
|
||||
if resp is None:
|
||||
print("JSON-RPC: no response", file=sys.stderr)
|
||||
return None
|
||||
|
||||
body = resp.read().decode('utf-8')
|
||||
resp_obj = json.loads(body)
|
||||
return resp_obj
|
||||
body = resp.read().decode('utf-8')
|
||||
resp_obj = json.loads(body)
|
||||
return resp_obj
|
||||
|
||||
@staticmethod
|
||||
def build_request(idx, method, params):
|
||||
obj = { 'version' : '1.1',
|
||||
'method' : method,
|
||||
'id' : idx }
|
||||
if params is None:
|
||||
obj['params'] = []
|
||||
else:
|
||||
obj['params'] = params
|
||||
return obj
|
||||
@staticmethod
|
||||
def build_request(idx, method, params):
|
||||
obj = { 'version' : '1.1',
|
||||
'method' : method,
|
||||
'id' : idx }
|
||||
if params is None:
|
||||
obj['params'] = []
|
||||
else:
|
||||
obj['params'] = params
|
||||
return obj
|
||||
|
||||
@staticmethod
|
||||
def response_is_error(resp_obj):
|
||||
return 'error' in resp_obj and resp_obj['error'] is not None
|
||||
@staticmethod
|
||||
def response_is_error(resp_obj):
|
||||
return 'error' in resp_obj and resp_obj['error'] is not None
|
||||
|
||||
def get_block_hashes(settings, max_blocks_per_call=10000):
|
||||
rpc = BitcoinRPC(settings['host'], settings['port'],
|
||||
settings['rpcuser'], settings['rpcpassword'])
|
||||
rpc = BitcoinRPC(settings['host'], settings['port'],
|
||||
settings['rpcuser'], settings['rpcpassword'])
|
||||
|
||||
height = settings['min_height']
|
||||
while height < settings['max_height']+1:
|
||||
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
|
||||
batch = []
|
||||
for x in range(num_blocks):
|
||||
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
|
||||
height = settings['min_height']
|
||||
while height < settings['max_height']+1:
|
||||
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
|
||||
batch = []
|
||||
for x in range(num_blocks):
|
||||
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
|
||||
|
||||
reply = rpc.execute(batch)
|
||||
if reply is None:
|
||||
print('Cannot continue. Program will halt.')
|
||||
return None
|
||||
reply = rpc.execute(batch)
|
||||
if reply is None:
|
||||
print('Cannot continue. Program will halt.')
|
||||
return None
|
||||
|
||||
for x,resp_obj in enumerate(reply):
|
||||
if rpc.response_is_error(resp_obj):
|
||||
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
|
||||
sys.exit(1)
|
||||
assert(resp_obj['id'] == x) # assume replies are in-sequence
|
||||
if settings['rev_hash_bytes'] == 'true':
|
||||
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
|
||||
print(resp_obj['result'])
|
||||
for x,resp_obj in enumerate(reply):
|
||||
if rpc.response_is_error(resp_obj):
|
||||
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
|
||||
sys.exit(1)
|
||||
assert(resp_obj['id'] == x) # assume replies are in-sequence
|
||||
if settings['rev_hash_bytes'] == 'true':
|
||||
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
|
||||
print(resp_obj['result'])
|
||||
|
||||
height += num_blocks
|
||||
height += num_blocks
|
||||
|
||||
def get_rpc_cookie():
|
||||
# Open the cookie file
|
||||
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r') as f:
|
||||
combined = f.readline()
|
||||
combined_split = combined.split(":")
|
||||
settings['rpcuser'] = combined_split[0]
|
||||
settings['rpcpassword'] = combined_split[1]
|
||||
# Open the cookie file
|
||||
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r') as f:
|
||||
combined = f.readline()
|
||||
combined_split = combined.split(":")
|
||||
settings['rpcuser'] = combined_split[0]
|
||||
settings['rpcpassword'] = combined_split[1]
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: linearize-hashes.py CONFIG-FILE")
|
||||
sys.exit(1)
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: linearize-hashes.py CONFIG-FILE")
|
||||
sys.exit(1)
|
||||
|
||||
f = open(sys.argv[1])
|
||||
for line in f:
|
||||
# skip comment lines
|
||||
m = re.search('^\s*#', line)
|
||||
if m:
|
||||
continue
|
||||
f = open(sys.argv[1])
|
||||
for line in f:
|
||||
# skip comment lines
|
||||
m = re.search('^\s*#', line)
|
||||
if m:
|
||||
continue
|
||||
|
||||
# parse key=value lines
|
||||
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
|
||||
if m is None:
|
||||
continue
|
||||
settings[m.group(1)] = m.group(2)
|
||||
f.close()
|
||||
# parse key=value lines
|
||||
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
|
||||
if m is None:
|
||||
continue
|
||||
settings[m.group(1)] = m.group(2)
|
||||
f.close()
|
||||
|
||||
if 'host' not in settings:
|
||||
settings['host'] = '127.0.0.1'
|
||||
if 'port' not in settings:
|
||||
settings['port'] = 8332
|
||||
if 'min_height' not in settings:
|
||||
settings['min_height'] = 0
|
||||
if 'max_height' not in settings:
|
||||
settings['max_height'] = 313000
|
||||
if 'rev_hash_bytes' not in settings:
|
||||
settings['rev_hash_bytes'] = 'false'
|
||||
if 'host' not in settings:
|
||||
settings['host'] = '127.0.0.1'
|
||||
if 'port' not in settings:
|
||||
settings['port'] = 8332
|
||||
if 'min_height' not in settings:
|
||||
settings['min_height'] = 0
|
||||
if 'max_height' not in settings:
|
||||
settings['max_height'] = 313000
|
||||
if 'rev_hash_bytes' not in settings:
|
||||
settings['rev_hash_bytes'] = 'false'
|
||||
|
||||
use_userpass = True
|
||||
use_datadir = False
|
||||
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
|
||||
use_userpass = False
|
||||
if 'datadir' in settings and not use_userpass:
|
||||
use_datadir = True
|
||||
if not use_userpass and not use_datadir:
|
||||
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
use_userpass = True
|
||||
use_datadir = False
|
||||
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
|
||||
use_userpass = False
|
||||
if 'datadir' in settings and not use_userpass:
|
||||
use_datadir = True
|
||||
if not use_userpass and not use_datadir:
|
||||
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
settings['port'] = int(settings['port'])
|
||||
settings['min_height'] = int(settings['min_height'])
|
||||
settings['max_height'] = int(settings['max_height'])
|
||||
settings['port'] = int(settings['port'])
|
||||
settings['min_height'] = int(settings['min_height'])
|
||||
settings['max_height'] = int(settings['max_height'])
|
||||
|
||||
# Force hash byte format setting to be lowercase to make comparisons easier.
|
||||
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
|
||||
# Force hash byte format setting to be lowercase to make comparisons easier.
|
||||
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
|
||||
|
||||
# Get the rpc user and pass from the cookie if the datadir is set
|
||||
if use_datadir:
|
||||
get_rpc_cookie()
|
||||
# Get the rpc user and pass from the cookie if the datadir is set
|
||||
if use_datadir:
|
||||
get_rpc_cookie()
|
||||
|
||||
get_block_hashes(settings)
|
||||
get_block_hashes(settings)
|
||||
|
|
|
@ -11,7 +11,7 @@ argument:
|
|||
nodes_main.txt
|
||||
nodes_test.txt
|
||||
|
||||
These files must consist of lines in the format
|
||||
These files must consist of lines in the format
|
||||
|
||||
<ip>
|
||||
<ip>:<port>
|
||||
|
|
|
@ -67,7 +67,7 @@ class BIP68Test(BitcoinTestFramework):
|
|||
# If sequence locks were used, this would require 1 block for the
|
||||
# input to mature.
|
||||
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
|
||||
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
|
||||
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
|
||||
tx1.vout = [CTxOut(value, CScript([b'a']))]
|
||||
|
||||
tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))["hex"]
|
||||
|
|
|
@ -100,7 +100,7 @@ class MaxUploadTest(BitcoinTestFramework):
|
|||
assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
|
||||
|
||||
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
|
||||
# At most a couple more tries should succeed (depending on how long
|
||||
# At most a couple more tries should succeed (depending on how long
|
||||
# the test has been running so far).
|
||||
for i in range(3):
|
||||
p2p_conns[0].send_message(getdata_request)
|
||||
|
|
|
@ -79,9 +79,9 @@ class ProxyTest(BitcoinTestFramework):
|
|||
# Note: proxies are not used to connect to local nodes
|
||||
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
|
||||
args = [
|
||||
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
|
||||
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
|
||||
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
|
||||
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
|
||||
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
|
||||
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
|
||||
[]
|
||||
]
|
||||
if self.have_ipv6:
|
||||
|
|
|
@ -69,7 +69,7 @@ class FeeFilterTest(BitcoinTestFramework):
|
|||
# Change tx fee rate to 10 sat/byte and test they are no longer received
|
||||
node1.settxfee(Decimal("0.00010000"))
|
||||
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
|
||||
sync_mempools(self.nodes) # must be sure node 0 has received all txs
|
||||
sync_mempools(self.nodes) # must be sure node 0 has received all txs
|
||||
|
||||
# Send one transaction from node0 that should be received, so that we
|
||||
# we can sync the test on receipt (if node1's txs were relayed, they'd
|
||||
|
|
|
@ -1659,7 +1659,7 @@ class SegWitTest(BitcoinTestFramework):
|
|||
tx2.wit.vtxinwit.append(CTxInWitness())
|
||||
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
|
||||
total_value += tx.vout[i].nValue
|
||||
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
|
||||
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
|
||||
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
|
||||
tx2.rehash()
|
||||
|
||||
|
|
|
@ -136,7 +136,7 @@ class WalletDumpTest(BitcoinTestFramework):
|
|||
assert_equal(found_addr, test_addr_count)
|
||||
assert_equal(found_script_addr, 2)
|
||||
assert_equal(found_addr_chg, 90*2 + 50) # old reserve keys are marked as change now
|
||||
assert_equal(found_addr_rsv, 90*2)
|
||||
assert_equal(found_addr_rsv, 90*2)
|
||||
assert_equal(witness_addr_ret, witness_addr)
|
||||
|
||||
# Overwriting should fail
|
||||
|
|
Loading…
Reference in a new issue