Fix typos

This commit is contained in:
Dimitris Apostolou 2018-03-18 16:26:45 +02:00 committed by Dimitris Apostolou
parent 00d1680498
commit 4d9b4256d8
44 changed files with 55 additions and 55 deletions

View file

@ -209,6 +209,6 @@ void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry,
entry.pushKV("blockhash", hashBlock.GetHex()); entry.pushKV("blockhash", hashBlock.GetHex());
if (include_hex) { if (include_hex) {
entry.pushKV("hex", EncodeHexTx(tx, serialize_flags)); // the hex-encoded transaction. used the name "hex" to be consistent with the verbose output of "getrawtransaction". entry.pushKV("hex", EncodeHexTx(tx, serialize_flags)); // The hex-encoded transaction. Used the name "hex" to be consistent with the verbose output of "getrawtransaction".
} }
} }

View file

@ -224,7 +224,7 @@ private:
* *
* Instead we treat the 32-bit random number as a Q32 fixed-point number in the range * Instead we treat the 32-bit random number as a Q32 fixed-point number in the range
* [0,1) and simply multiply it by the size. Then we just shift the result down by * [0,1) and simply multiply it by the size. Then we just shift the result down by
* 32-bits to get our bucket number. The results has non-uniformity the same as a * 32-bits to get our bucket number. The result has non-uniformity the same as a
* mod, but it is much faster to compute. More about this technique can be found at * mod, but it is much faster to compute. More about this technique can be found at
* http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ * http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
* *

View file

@ -1954,7 +1954,7 @@ void CConnman::ThreadOpenAddedConnections()
for (const AddedNodeInfo& info : vInfo) { for (const AddedNodeInfo& info : vInfo) {
if (!info.fConnected) { if (!info.fConnected) {
if (!grant.TryAcquire()) { if (!grant.TryAcquire()) {
// If we've used up our semaphore and need a new one, lets not wait here since while we are waiting // If we've used up our semaphore and need a new one, let's not wait here since while we are waiting
// the addednodeinfo state might change. // the addednodeinfo state might change.
break; break;
} }

View file

@ -339,7 +339,7 @@ bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex*
CNodeState *state = State(nodeid); CNodeState *state = State(nodeid);
assert(state != nullptr); assert(state != nullptr);
// Short-circuit most stuff in case its from the same node // Short-circuit most stuff in case it is from the same node
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) { if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
if (pit) { if (pit) {
@ -518,7 +518,7 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<con
} }
// Iterate over those blocks in vToFetch (in forward direction), adding the ones that // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
// are not yet downloaded and not in flight to vBlocks. In the mean time, update // are not yet downloaded and not in flight to vBlocks. In the meantime, update
// pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
// already part of our chain (and therefore don't need it even if pruned). // already part of our chain (and therefore don't need it even if pruned).
for (const CBlockIndex* pindex : vToFetch) { for (const CBlockIndex* pindex : vToFetch) {

View file

@ -206,7 +206,7 @@ void CoinControlDialog::showMenu(const QPoint &point)
contextMenuItem = item; contextMenuItem = item;
// disable some items (like Copy Transaction ID, lock, unlock) for tree roots in context menu // disable some items (like Copy Transaction ID, lock, unlock) for tree roots in context menu
if (item->text(COLUMN_TXHASH).length() == 64) // transaction hash is 64 characters (this means its a child node, so its not a parent node in tree mode) if (item->text(COLUMN_TXHASH).length() == 64) // transaction hash is 64 characters (this means it is a child node, so it is not a parent node in tree mode)
{ {
copyTransactionHashAction->setEnabled(true); copyTransactionHashAction->setEnabled(true);
if (model->isLockedCoin(uint256S(item->text(COLUMN_TXHASH).toStdString()), item->text(COLUMN_VOUT_INDEX).toUInt())) if (model->isLockedCoin(uint256S(item->text(COLUMN_TXHASH).toStdString()), item->text(COLUMN_VOUT_INDEX).toUInt()))
@ -374,7 +374,7 @@ void CoinControlDialog::radioListMode(bool checked)
// checkbox clicked by user // checkbox clicked by user
void CoinControlDialog::viewItemChanged(QTreeWidgetItem* item, int column) void CoinControlDialog::viewItemChanged(QTreeWidgetItem* item, int column)
{ {
if (column == COLUMN_CHECKBOX && item->text(COLUMN_TXHASH).length() == 64) // transaction hash is 64 characters (this means its a child node, so its not a parent node in tree mode) if (column == COLUMN_CHECKBOX && item->text(COLUMN_TXHASH).length() == 64) // transaction hash is 64 characters (this means it is a child node, so it is not a parent node in tree mode)
{ {
COutPoint outpt(uint256S(item->text(COLUMN_TXHASH).toStdString()), item->text(COLUMN_VOUT_INDEX).toUInt()); COutPoint outpt(uint256S(item->text(COLUMN_TXHASH).toStdString()), item->text(COLUMN_VOUT_INDEX).toUInt());

View file

@ -141,7 +141,7 @@ namespace GUIUtil
* Makes a QTableView last column feel as if it was being resized from its left border. * Makes a QTableView last column feel as if it was being resized from its left border.
* Also makes sure the column widths are never larger than the table's viewport. * Also makes sure the column widths are never larger than the table's viewport.
* In Qt, all columns are resizable from the right, but it's not intuitive resizing the last column from the right. * In Qt, all columns are resizable from the right, but it's not intuitive resizing the last column from the right.
* Usually our second to last columns behave as if stretched, and when on strech mode, columns aren't resizable * Usually our second to last columns behave as if stretched, and when on stretch mode, columns aren't resizable
* interactively or programmatically. * interactively or programmatically.
* *
* This helper object takes care of this issue. * This helper object takes care of this issue.

View file

@ -81,7 +81,7 @@ void ModalOverlay::tipUpdate(int count, const QDateTime& blockDate, double nVeri
// keep a vector of samples of verification progress at height // keep a vector of samples of verification progress at height
blockProcessTime.push_front(qMakePair(currentDate.toMSecsSinceEpoch(), nVerificationProgress)); blockProcessTime.push_front(qMakePair(currentDate.toMSecsSinceEpoch(), nVerificationProgress));
// show progress speed if we have more then one sample // show progress speed if we have more than one sample
if (blockProcessTime.size() >= 2) { if (blockProcessTime.size() >= 2) {
double progressDelta = 0; double progressDelta = 0;
double progressPerHour = 0; double progressPerHour = 0;

View file

@ -91,7 +91,7 @@ SplashScreen::SplashScreen(Qt::WindowFlags f, const NetworkStyle *networkStyle)
pixPaint.setFont(QFont(font, 15*fontFactor)); pixPaint.setFont(QFont(font, 15*fontFactor));
// if the version string is to long, reduce size // if the version string is too long, reduce size
fm = pixPaint.fontMetrics(); fm = pixPaint.fontMetrics();
int versionTextWidth = fm.width(versionText); int versionTextWidth = fm.width(versionText);
if(versionTextWidth > titleTextWidth+paddingRight-10) { if(versionTextWidth > titleTextWidth+paddingRight-10) {

View file

@ -698,7 +698,7 @@ bool WalletModel::bumpFee(uint256 hash)
confirmationDialog.exec(); confirmationDialog.exec();
QMessageBox::StandardButton retval = static_cast<QMessageBox::StandardButton>(confirmationDialog.result()); QMessageBox::StandardButton retval = static_cast<QMessageBox::StandardButton>(confirmationDialog.result());
// cancel sign&broadcast if users doesn't want to bump the fee // cancel sign&broadcast if user doesn't want to bump the fee
if (retval != QMessageBox::Yes) { if (retval != QMessageBox::Yes) {
return false; return false;
} }

View file

@ -32,7 +32,7 @@ void RandAddSeedSleep();
/** /**
* Function to gather random data from multiple sources, failing whenever any * Function to gather random data from multiple sources, failing whenever any
* of those source fail to provide a result. * of those sources fail to provide a result.
*/ */
void GetStrongRandBytes(unsigned char* buf, int num); void GetStrongRandBytes(unsigned char* buf, int num);

View file

@ -110,7 +110,7 @@ bool static IsValidSignatureEncoding(const std::vector<unsigned char> &sig) {
// excluding the sighash byte. // excluding the sighash byte.
// * R-length: 1-byte length descriptor of the R value that follows. // * R-length: 1-byte length descriptor of the R value that follows.
// * R: arbitrary-length big-endian encoded R value. It must use the shortest // * R: arbitrary-length big-endian encoded R value. It must use the shortest
// possible encoding for a positive integers (which means no null bytes at // possible encoding for a positive integer (which means no null bytes at
// the start, except a single one when the next byte has its highest bit set). // the start, except a single one when the next byte has its highest bit set).
// * S-length: 1-byte length descriptor of the S value that follows. // * S-length: 1-byte length descriptor of the S value that follows.
// * S: arbitrary-length big-endian encoded S value. The same rules apply. // * S: arbitrary-length big-endian encoded S value. The same rules apply.

View file

@ -184,7 +184,7 @@ void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len)
size_t Win32LockedPageAllocator::GetLimit() size_t Win32LockedPageAllocator::GetLimit()
{ {
// TODO is there a limit on windows, how to get it? // TODO is there a limit on Windows, how to get it?
return std::numeric_limits<size_t>::max(); return std::numeric_limits<size_t>::max();
} }
#endif #endif

View file

@ -491,7 +491,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
// this test could be a security issue. // this test could be a security issue.
BOOST_CHECK(info1.GetNewBucket(nKey1) != info1.GetNewBucket(nKey2)); BOOST_CHECK(info1.GetNewBucket(nKey1) != info1.GetNewBucket(nKey2));
// Test: Ports should not effect bucket placement in the addr // Test: Ports should not affect bucket placement in the addr
CAddrInfo info2 = CAddrInfo(addr2, source1); CAddrInfo info2 = CAddrInfo(addr2, source1);
BOOST_CHECK(info1.GetKey() != info2.GetKey()); BOOST_CHECK(info1.GetKey() != info2.GetKey());
BOOST_CHECK_EQUAL(info1.GetNewBucket(nKey1), info2.GetNewBucket(nKey1)); BOOST_CHECK_EQUAL(info1.GetNewBucket(nKey1), info2.GetNewBucket(nKey1));

View file

@ -313,7 +313,7 @@ BOOST_AUTO_TEST_CASE(updatecoins_simulation_test)
auto utxod = FindRandomFrom(coinbase_coins); auto utxod = FindRandomFrom(coinbase_coins);
// Reuse the exact same coinbase // Reuse the exact same coinbase
tx = std::get<0>(utxod->second); tx = std::get<0>(utxod->second);
// shouldn't be available for reconnection if its been duplicated // shouldn't be available for reconnection if it's been duplicated
disconnected_coins.erase(utxod->first); disconnected_coins.erase(utxod->first);
duplicate_coins.insert(utxod->first); duplicate_coins.insert(utxod->first);

View file

@ -163,7 +163,7 @@ void test_cache_erase(size_t megabytes)
for (uint32_t i = (n_insert / 2); i < n_insert; ++i) for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
set.insert(hashes_insert_copy[i]); set.insert(hashes_insert_copy[i]);
/** elements that we marked erased but that are still there */ /** elements that we marked as erased but are still there */
size_t count_erased_but_contained = 0; size_t count_erased_but_contained = 0;
/** elements that we did not erase but are older */ /** elements that we did not erase but are older */
size_t count_stale = 0; size_t count_stale = 0;
@ -303,7 +303,7 @@ void test_cache_generations()
local_rand_ctx = FastRandomContext(true); local_rand_ctx = FastRandomContext(true);
// block_activity models a chunk of network activity. n_insert elements are // block_activity models a chunk of network activity. n_insert elements are
// adde to the cache. The first and last n/4 are stored for removal later // added to the cache. The first and last n/4 are stored for removal later
// and the middle n/2 are not stored. This models a network which uses half // and the middle n/2 are not stored. This models a network which uses half
// the signatures of recently (since the last block) added transactions // the signatures of recently (since the last block) added transactions
// immediately and never uses the other half. // immediately and never uses the other half.

View file

@ -237,7 +237,7 @@ BOOST_AUTO_TEST_CASE(iterator_ordering)
} }
struct StringContentsSerializer { struct StringContentsSerializer {
// Used to make two serialized objects the same while letting them have a different lengths // Used to make two serialized objects the same while letting them have different lengths
// This is a terrible idea // This is a terrible idea
std::string str; std::string str;
StringContentsSerializer() {} StringContentsSerializer() {}

View file

@ -523,7 +523,7 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
pool.addUnchecked(tx6.GetHash(), entry.Fee(1100LL).FromTx(tx6)); pool.addUnchecked(tx6.GetHash(), entry.Fee(1100LL).FromTx(tx6));
pool.addUnchecked(tx7.GetHash(), entry.Fee(9000LL).FromTx(tx7)); pool.addUnchecked(tx7.GetHash(), entry.Fee(9000LL).FromTx(tx7));
// we only require this remove, at max, 2 txn, because its not clear what we're really optimizing for aside from that // we only require this to remove, at max, 2 txn, because it's not clear what we're really optimizing for aside from that
pool.TrimToSize(pool.DynamicMemoryUsage() - 1); pool.TrimToSize(pool.DynamicMemoryUsage() - 1);
BOOST_CHECK(pool.exists(tx4.GetHash())); BOOST_CHECK(pool.exists(tx4.GetHash()));
BOOST_CHECK(pool.exists(tx6.GetHash())); BOOST_CHECK(pool.exists(tx6.GetHash()));

View file

@ -35,7 +35,7 @@ ToMemPool(CMutableTransaction& tx)
BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup) BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup)
{ {
// Make sure skipping validation of transctions that were // Make sure skipping validation of transactions that were
// validated going into the memory pool does not allow // validated going into the memory pool does not allow
// double-spends in blocks to pass validation when they should not. // double-spends in blocks to pass validation when they should not.

View file

@ -155,7 +155,7 @@ namespace tfm = tinyformat;
#endif #endif
#ifdef __APPLE__ #ifdef __APPLE__
// Workaround OSX linker warning: xcode uses different default symbol // Workaround OSX linker warning: Xcode uses different default symbol
// visibilities for static libs vs executables (see issue #25) // visibilities for static libs vs executables (see issue #25)
# define TINYFORMAT_HIDDEN __attribute__((visibility("hidden"))) # define TINYFORMAT_HIDDEN __attribute__((visibility("hidden")))
#else #else
@ -592,7 +592,7 @@ inline const char* printFormatStringLiteral(std::ostream& out, const char* fmt)
// Formatting options which can't be natively represented using the ostream // Formatting options which can't be natively represented using the ostream
// state are returned in spacePadPositive (for space padded positive numbers) // state are returned in spacePadPositive (for space padded positive numbers)
// and ntrunc (for truncating conversions). argIndex is incremented if // and ntrunc (for truncating conversions). argIndex is incremented if
// necessary to pull out variable width and precision . The function returns a // necessary to pull out variable width and precision. The function returns a
// pointer to the character after the end of the current format spec. // pointer to the character after the end of the current format spec.
inline const char* streamStateFromFormat(std::ostream& out, bool& spacePadPositive, inline const char* streamStateFromFormat(std::ostream& out, bool& spacePadPositive,
int& ntrunc, const char* fmtStart, int& ntrunc, const char* fmtStart,

View file

@ -689,7 +689,7 @@ private:
}; };
/** /**
* CCoinsView that brings transactions from a memorypool into view. * CCoinsView that brings transactions from a mempool into view.
* It does not check for spendings by memory pool transactions. * It does not check for spendings by memory pool transactions.
* Instead, it provides access to all Coins which are either unspent in the * Instead, it provides access to all Coins which are either unspent in the
* base CCoinsView, or are outputs from any mempool transaction! * base CCoinsView, or are outputs from any mempool transaction!

View file

@ -15,7 +15,7 @@
* *
* Contains the prevout's CTxOut being spent, and its metadata as well * Contains the prevout's CTxOut being spent, and its metadata as well
* (coinbase or not, height). The serialization contains a dummy value of * (coinbase or not, height). The serialization contains a dummy value of
* zero. This is be compatible with older versions which expect to see * zero. This is compatible with older versions which expect to see
* the transaction version there. * the transaction version there.
*/ */
class TxInUndoSerializer class TxInUndoSerializer

View file

@ -136,7 +136,7 @@ template<typename T, typename... Args> static inline void MarkUsed(const T& t, c
// Be conservative when using LogPrintf/error or other things which // Be conservative when using LogPrintf/error or other things which
// unconditionally log to debug.log! It should not be the case that an inbound // unconditionally log to debug.log! It should not be the case that an inbound
// peer can fill up a users disk with debug.log entries. // peer can fill up a user's disk with debug.log entries.
#ifdef USE_COVERAGE #ifdef USE_COVERAGE
#define LogPrintf(...) do { MarkUsed(__VA_ARGS__); } while(0) #define LogPrintf(...) do { MarkUsed(__VA_ARGS__); } while(0)

View file

@ -949,7 +949,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// This transaction should only count for fee estimation if: // This transaction should only count for fee estimation if:
// - it isn't a BIP 125 replacement transaction (may not be widely supported) // - it isn't a BIP 125 replacement transaction (may not be widely supported)
// - it's not being readded during a reorg which bypasses typical mempool fee limits // - it's not being re-added during a reorg which bypasses typical mempool fee limits
// - the node is not behind // - the node is not behind
// - the transaction is not dependent on any other transactions in the mempool // - the transaction is not dependent on any other transactions in the mempool
bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx); bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx);
@ -1852,7 +1852,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
// Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
// with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
// time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
// before the first had been spent. Since those coinbases are sufficiently buried its no longer possible to create further // before the first had been spent. Since those coinbases are sufficiently buried it's no longer possible to create further
// duplicate transactions descending from the known pairs either. // duplicate transactions descending from the known pairs either.
// If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check. // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.

View file

@ -2015,7 +2015,7 @@ UniValue listsinceblock(const JSONRPCRequest& request)
" ],\n" " ],\n"
" \"removed\": [\n" " \"removed\": [\n"
" <structure is the same as \"transactions\" above, only present if include_removed=true>\n" " <structure is the same as \"transactions\" above, only present if include_removed=true>\n"
" Note: transactions that were readded in the active chain will appear as-is in this array, and may thus have a positive confirmation count.\n" " Note: transactions that were re-added in the active chain will appear as-is in this array, and may thus have a positive confirmation count.\n"
" ],\n" " ],\n"
" \"lastblock\": \"lastblockhash\" (string) The hash of the block (target_confirmations-1) from the best block on the main chain. This is typically used to feed back into listsinceblock the next time you call it. So you would generally use a target_confirmations of say 6, so you will be continually re-notified of transactions until they've reached 6 confirmations plus any new ones\n" " \"lastblock\": \"lastblockhash\" (string) The hash of the block (target_confirmations-1) from the best block on the main chain. This is typically used to feed back into listsinceblock the next time you call it. So you would generally use a target_confirmations of say 6, so you will be continually re-notified of transactions until they've reached 6 confirmations plus any new ones\n"
"}\n" "}\n"
@ -3578,7 +3578,7 @@ UniValue rescanblockchain(const JSONRPCRequest& request)
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid stop_height"); throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid stop_height");
} }
else if (pindexStop->nHeight < pindexStart->nHeight) { else if (pindexStop->nHeight < pindexStart->nHeight) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "stop_height must be greater then start_height"); throw JSONRPCError(RPC_INVALID_PARAMETER, "stop_height must be greater than start_height");
} }
} }
} }

View file

@ -1269,7 +1269,7 @@ void CWallet::BlockUntilSyncedToCurrentChain() {
// chainActive.Tip()... // chainActive.Tip()...
// We could also take cs_wallet here, and call m_last_block_processed // We could also take cs_wallet here, and call m_last_block_processed
// protected by cs_wallet instead of cs_main, but as long as we need // protected by cs_wallet instead of cs_main, but as long as we need
// cs_main here anyway, its easier to just call it cs_main-protected. // cs_main here anyway, it's easier to just call it cs_main-protected.
LOCK(cs_main); LOCK(cs_main);
const CBlockIndex* initialChainTip = chainActive.Tip(); const CBlockIndex* initialChainTip = chainActive.Tip();
@ -4186,8 +4186,8 @@ bool CWalletTx::AcceptToMemoryPool(const CAmount& nAbsurdFee, CValidationState&
// We must set fInMempool here - while it will be re-set to true by the // We must set fInMempool here - while it will be re-set to true by the
// entered-mempool callback, if we did not there would be a race where a // entered-mempool callback, if we did not there would be a race where a
// user could call sendmoney in a loop and hit spurious out of funds errors // user could call sendmoney in a loop and hit spurious out of funds errors
// because we think that the transaction they just generated's change is // because we think that this newly generated transaction's change is
// unavailable as we're not yet aware its in mempool. // unavailable as we're not yet aware that it is in the mempool.
bool ret = ::AcceptToMemoryPool(mempool, state, tx, nullptr /* pfMissingInputs */, bool ret = ::AcceptToMemoryPool(mempool, state, tx, nullptr /* pfMissingInputs */,
nullptr /* plTxnReplaced */, false /* bypass_limits */, nAbsurdFee); nullptr /* plTxnReplaced */, false /* bypass_limits */, nAbsurdFee);
fInMempool |= ret; fInMempool |= ret;

View file

@ -38,7 +38,7 @@ class BaseNode(P2PInterface):
def __init__(self): def __init__(self):
"""Initialize the P2PInterface """Initialize the P2PInterface
Used to inialize custom properties for the Node that aren't Used to initialize custom properties for the Node that aren't
included by default in the base class. Be aware that the P2PInterface included by default in the base class. Be aware that the P2PInterface
base class already stores a counter for each P2P message type and the base class already stores a counter for each P2P message type and the
last received message of each type, which should be sufficient for the last received message of each type, which should be sufficient for the

View file

@ -6,7 +6,7 @@
* Verify that getdata requests for old blocks (>1week) are dropped * Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached. if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even * Verify that getdata requests for recent blocks are respected even
if uploadtarget has been reached. if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours. * Verify that the upload counters are reset after 24 hours.
""" """

View file

@ -190,8 +190,8 @@ class PruneTest(BitcoinTestFramework):
# Verify that we have enough history to reorg back to the fork point # Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently # Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it, # and only its other 299 small and 220 large blocks are in the block files after it,
# its expected to still be retained # it is expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount() first_reorg_height = self.nodes[2].getblockcount()

View file

@ -107,7 +107,7 @@ class RESTTest (BitcoinTestFramework):
#check chainTip response #check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash) assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utox in the response because this oupoint has been spent #make sure there is no utxo in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0) assert_equal(len(json_obj['utxos']), 0)
#check bitmap #check bitmap

View file

@ -29,7 +29,7 @@ Test is as follows:
transactions in its mempool. This tests that -persistmempool=0 transactions in its mempool. This tests that -persistmempool=0
does not overwrite a previously valid mempool stored on disk. does not overwrite a previously valid mempool stored on disk.
- Remove node0 mempool.dat and verify savemempool RPC recreates it - Remove node0 mempool.dat and verify savemempool RPC recreates it
and verify that node1 can load it and has 5 transaction in its and verify that node1 can load it and has 5 transactions in its
mempool. mempool.
- Verify that savemempool throws when the RPC is called if - Verify that savemempool throws when the RPC is called if
node1 can't write to disk. node1 can't write to disk.

View file

@ -124,7 +124,7 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
assert(tx_id not in self.nodes[0].getrawmempool()) assert(tx_id not in self.nodes[0].getrawmempool())
# This is a less than 1000-byte transaction, so just set the fee # This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000 byte transaction and check that it is # to be the minimum for a 1000-byte transaction and check that it is
# accepted. # accepted.
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN)) self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN))

View file

@ -548,7 +548,7 @@ class CompactBlocksTest(BitcoinTestFramework):
# Note that it's possible for bitcoind to be smart enough to know we're # Note that it's possible for bitcoind to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're # lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior # sending, and eg disconnect us for misbehavior. If that behavior
# change were made, we could just modify this test by having a # change was made, we could just modify this test by having a
# different peer provide the block further down, so that we're still # different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good # verifying that the block isn't marked bad permanently. This is good
# enough for now. # enough for now.

View file

@ -7,7 +7,7 @@
A node should never send anything other than VERSION/VERACK/REJECT until it's A node should never send anything other than VERSION/VERACK/REJECT until it's
received a VERACK. received a VERACK.
This test connects to a node and sends it a few messages, trying to intice it This test connects to a node and sends it a few messages, trying to entice it
into sending us something it shouldn't. into sending us something it shouldn't.
Also test that nodes that send unsupported service bits to bitcoind are disconnected Also test that nodes that send unsupported service bits to bitcoind are disconnected

View file

@ -64,7 +64,7 @@ class NodeNetworkLimitedTest(BitcoinTestFramework):
blocks = self.nodes[1].generate(292) blocks = self.nodes[1].generate(292)
sync_blocks([self.nodes[0], self.nodes[1]]) sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrive block at tip-288.") self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3) node.wait_for_block(int(blocks[1], 16), timeout=3)

View file

@ -1511,7 +1511,7 @@ class SegWitTest(BitcoinTestFramework):
# Make sure that this peer thinks segwit has activated. # Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[node_id], 'segwit')['status'] == "active") assert(get_bip9_status(self.nodes[node_id], 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0. # Make sure this peer's blocks match those of node0.
height = self.nodes[node_id].getblockcount() height = self.nodes[node_id].getblockcount()
while height >= 0: while height >= 0:
block_hash = self.nodes[node_id].getblockhash(height) block_hash = self.nodes[node_id].getblockhash(height)

View file

@ -166,7 +166,7 @@ class AcceptBlockTest(BitcoinTestFramework):
self.log.info("Unrequested more-work block accepted") self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but # 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as its not missing any headers) # the last (height-too-high) on node (as long as it is not missing any headers)
tip = block_h3 tip = block_h3
all_blocks = [] all_blocks = []
for i in range(288): for i in range(288):

View file

@ -55,7 +55,7 @@ class RPCBindTest(BitcoinTestFramework):
def run_test(self): def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux # due to OS-specific network stats queries, this test works only on Linux
if not sys.platform.startswith('linux'): if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.") raise SkipTest("This test can only be run on Linux.")
# find the first non-loopback interface for testing # find the first non-loopback interface for testing
non_loopback_ip = None non_loopback_ip = None
for name,ip in all_interfaces(): for name,ip in all_interfaces():

View file

@ -4,7 +4,7 @@
# Copyright (c) 2010-2017 The Bitcoin Core developers # Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying # Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php. # file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message strcutures """Bitcoin test framework primitive and message structures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....: CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in data structures that should map to corresponding structures in

View file

@ -405,7 +405,7 @@ class P2PInterface(P2PConnection):
# Keep our own socket map for asyncore, so that we can track disconnects # Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when # ourselves (to work around an issue with closing an asyncore socket when
# using select) # using select)
mininode_socket_map = dict() mininode_socket_map = dict()
@ -424,7 +424,7 @@ class NetworkThread(threading.Thread):
def run(self): def run(self):
while mininode_socket_map: while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore # We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using # loop to work around the behavior of asyncore when using
# select # select
disconnected = [] disconnected = []
for fd, obj in mininode_socket_map.items(): for fd, obj in mininode_socket_map.items():

View file

@ -335,7 +335,7 @@ class BitcoinTestFramework():
blockchain. If the cached version of the blockchain is used without blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD. mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous For backward compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1, versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)""" 2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60) self.mocktime = 1388534400 + (201 * 10 * 60)

View file

@ -61,7 +61,7 @@ class TestNode():
if extra_conf != None: if extra_conf != None:
append_config(dirname, i, extra_conf) append_config(dirname, i, extra_conf)
# Most callers will just need to add extra args to the standard list below. # Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibity, they can just set the args property directly. # For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir) # Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args self.extra_args = extra_args
self.args = [self.binary, "-datadir=" + self.datadir, "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i] self.args = [self.binary, "-datadir=" + self.datadir, "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i]

View file

@ -109,7 +109,7 @@ class AbandonConflictTest(BitcoinTestFramework):
assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance) assert_equal(self.nodes[0].getbalance(), balance)
# But if its received again then it is unabandoned # But if it is received again then it is unabandoned
# And since now in mempool, the change is available # And since now in mempool, the change is available
# But its child tx remains abandoned # But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"]) self.nodes[0].sendrawtransaction(signed["hex"])
@ -117,7 +117,7 @@ class AbandonConflictTest(BitcoinTestFramework):
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998")) assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance balance = newbalance
# Send child tx again so its unabandoned # Send child tx again so it is unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"]) self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance() newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996")) assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))

View file

@ -283,7 +283,7 @@ class WalletTest(BitcoinTestFramework):
sync_blocks(self.nodes[0:3]) sync_blocks(self.nodes[0:3])
node_2_bal += 2 node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet #tx should be added to balance because after restarting the nodes tx should be broadcast
assert_equal(self.nodes[2].getbalance(), node_2_bal) assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +) #send a tx with value in a string (PR#6380 +)

View file

@ -211,7 +211,7 @@ class ListSinceBlockTest (BitcoinTestFramework):
1. tx1 is listed in listsinceblock. 1. tx1 is listed in listsinceblock.
2. It is included in 'removed' as it was removed, even though it is now 2. It is included in 'removed' as it was removed, even though it is now
present in a different block. present in a different block.
3. It is listed with a confirmations count of 2 (bb3, bb4), not 3. It is listed with a confirmation count of 2 (bb3, bb4), not
3 (aa1, aa2, aa3). 3 (aa1, aa2, aa3).
''' '''