restored compatible undo functionality

This commit is contained in:
Brannon King 2020-02-27 09:55:47 -07:00 committed by Anthony Fieroni
parent af42072578
commit 541b26e920
13 changed files with 95 additions and 132 deletions

View file

@ -32,7 +32,7 @@ jobs:
- cp -l dist/lbrycrd-${NAME}.zip packaging/docker-for-binary/lbrycrd-${NAME}.zip
- sha256sum dist/lbrycrd-${NAME}.zip
- sha256sum dist/lbrycrd-${NAME}-test.zip
- cp test/config.ini dist/config.ini
- cp test/config.ini dist/config-${NAME}.ini
deploy:
- provider: s3
access_key_id: AKIAICKFHNTR5RITASAQ
@ -101,7 +101,7 @@ jobs:
clone: true
depth: 3
install:
- curl http://build.lbry.io/lbrycrd/${TRAVIS_BRANCH}/config.ini -o test/config.ini
- curl http://build.lbry.io/lbrycrd/${TRAVIS_BRANCH}/config-${NAME}.ini -o test/config.ini
- sed -i "s|/lbrycrd|$(pwd)|" test/config.ini
- curl http://build.lbry.io/lbrycrd/${TRAVIS_BRANCH}/lbrycrd-${NAME}.zip -o src/temp.zip
- cd src && unzip temp.zip && cd ..

View file

@ -269,7 +269,7 @@ static GCSFilter::ElementSet BasicFilterElements(const CBlock& block,
}
for (const CTxUndo& tx_undo : block_undo.vtxundo) {
for (const Coin& prevout : tx_undo.vprevout) {
for (const auto& prevout : tx_undo.vprevout) {
const CScript& script = prevout.out.scriptPubKey;
if (script.empty()) continue;
elements.emplace(script.begin(), script.end());

View file

@ -181,7 +181,6 @@ public:
vSeeds.emplace_back("dnsseed1.lbry.io"); // lbry.io
vSeeds.emplace_back("dnsseed2.lbry.io"); // lbry.io
vSeeds.emplace_back("dnsseed3.lbry.io"); // lbry.io
vSeeds.emplace_back("dnsseed.emzy.de"); // Stephan Oeste
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1, 0x55);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1, 0x7a);

View file

@ -50,7 +50,7 @@ BlockFilterIndex::BlockFilterIndex(BlockFilterType filter_type,
const std::string& filter_name = BlockFilterTypeName(filter_type);
if (filter_name.empty()) throw std::invalid_argument("unknown filter_type");
fs::path path = GetBlocksDir() / "filter" / filter_name;
fs::path path = GetDataDir() / "filter" / filter_name;
fs::create_directories(path);
m_name = filter_name + " block filter index";
@ -181,11 +181,13 @@ bool BlockFilterIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex
if (bytes_written == 0)
return false;
const auto filterHash = filter.GetHash(); // trying to avoid temps
const auto filterHeader = filter.ComputeHeader(prev_header);
(*m_db) << "INSERT INTO block VALUES(?, ?, ?, ?, ?, ?)"
<< pindex->nHeight
<< pindex->GetBlockHash()
<< filter.GetHash()
<< filter.ComputeHeader(prev_header)
<< pindex->hash
<< filterHash
<< filterHeader
<< m_next_filter_pos.nFile
<< m_next_filter_pos.nPos;

View file

@ -1574,21 +1574,22 @@ bool AppInitMain(InitInterfaces& interfaces)
break;
}
auto tip = ::ChainActive().Tip();
if (tip && !::ClaimtrieCache().validateDb(tip->nHeight, tip->hashClaimTrie)) {
strLoadError = _("Error validating the stored claim trie").translated;
auto tip = ::ChainActive().Tip();
LogPrintf("Checking existing claim trie consistency...\n");
if (tip && !::ClaimtrieCache().validateDb(tip->nHeight, tip->hashClaimTrie)) {
strLoadError = _("Error validating the stored claim trie").translated;
break;
}
if (!fReset) {
// Note that RewindBlockIndex MUST run even if we're about to -reindex-chainstate.
// It both disconnects blocks based on ::ChainActive(), and drops block data in
// BlockIndex() based on lack of available witness data.
uiInterface.InitMessage(_("Rewinding blocks...").translated);
if (!RewindBlockIndex(chainparams)) {
strLoadError = _("Unable to rewind the database to a pre-fork state. You will need to redownload the blockchain").translated;
break;
}
if (!fReset) {
// Note that RewindBlockIndex MUST run even if we're about to -reindex-chainstate.
// It both disconnects blocks based on ::ChainActive(), and drops block data in
// BlockIndex() based on lack of available witness data.
uiInterface.InitMessage(_("Rewinding blocks...").translated);
if (!RewindBlockIndex(chainparams)) {
strLoadError = _("Unable to rewind the database to a pre-fork state. You will need to redownload the blockchain").translated;
break;
}
}
try {

View file

@ -1878,7 +1878,7 @@ static UniValue getblockstats(const JSONRPCRequest& request)
if (loop_inputs) {
CAmount tx_total_in = 0;
const auto& txundo = blockUndo.vtxundo.at(i - 1);
for (const Coin& coin: txundo.vprevout) {
for (const auto& coin: txundo.vprevout) {
const CTxOut& prevoutput = coin.out;
tx_total_in += prevoutput.nValue;

View file

@ -156,7 +156,7 @@ size_t CCoinsViewDB::EstimateSize() const
}
CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe)
: db(fMemory ? ":memory:" : (GetBlocksDir() / "index.sqlite").string(), sharedConfig)
: db(fMemory ? ":memory:" : (GetDataDir() / "block_index.sqlite").string(), sharedConfig)
{
applyPragmas(db, nCacheSize >> 10); // in -KB

View file

@ -1,103 +1,72 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2018 The Bitcoin Core developers
// Copyright (c) 2009-2014 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_UNDO_H
#define BITCOIN_UNDO_H
#include <coins.h>
#include <compressor.h>
#include <consensus/consensus.h>
#include <primitives/transaction.h>
#include <serialize.h>
#include <version.h>
/** Undo information for a CTxIn
*
* Contains the prevout's CTxOut being spent, and its metadata as well
* (coinbase or not, height). The serialization contains a dummy value of
* zero. This is compatible with older versions which expect to see
* the transaction version there.
* Contains the prevout's CTxOut being spent, and if this was the
* last output of the affected transaction, its metadata as well
* (coinbase or not, height, transaction version)
*/
class TxInUndoSerializer
class CTxInUndo
{
const Coin* txout;
uint32_t nDeprecated1 = 0; // if the outpoint was the last unspent: its version
bool fDeprecated2 = false; // whether the outpoint was the last unspent
public:
template<typename Stream>
void Serialize(Stream &s) const {
::Serialize(s, VARINT(txout->nHeight * 2 + (txout->fCoinBase ? 1u : 0u)));
if (txout->nHeight > 0) {
// Required to maintain compatibility with older undo format.
::Serialize(s, (unsigned char)0);
CTxOut out; // the out data before being spent
bool fCoinBase; // if the outpoint was the last unspent: whether it belonged to a coinbase
uint32_t nHeight; // if the outpoint was the last unspent: its height
uint32_t nClaimValidHeight; // If the outpoint was a claim or support, the height at which the claim or support should be inserted into the trie
bool fIsClaim; // if the outpoint was a claim or support
uint32_t nClaimOriginalHeight; // If we are a claim, we need to track the original insertion height for that claim
CTxInUndo() : out(), fCoinBase(false), nHeight(0), nClaimValidHeight(0), fIsClaim(false), nClaimOriginalHeight(0) {}
CTxInUndo(const CTxOut &txoutIn, bool fCoinBaseIn = false, uint32_t nHeightIn = 0) :
out(txoutIn), fCoinBase(fCoinBaseIn), nHeight(nHeightIn), nClaimValidHeight(0), fIsClaim(false), nClaimOriginalHeight(0) {}
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action) {
if (ser_action.ForRead()) {
uint32_t nCode = 0;
READWRITE(VARINT(nCode)); // VARINT in this method is for legacy compatibility
nHeight = nCode >> 2U;
fCoinBase = nCode & 2U;
fDeprecated2 = nCode & 1U;
} else {
uint32_t nCode = (nHeight << 2U) | (fCoinBase ? 2U : 0U) | (fDeprecated2 ? 1U: 0U);
READWRITE(VARINT(nCode));
}
::Serialize(s, CTxOutCompressor(REF(txout->out)));
if (fDeprecated2)
READWRITE(VARINT(nDeprecated1));
READWRITE(REF(CTxOutCompressor(REF(out))));
READWRITE(VARINT(nClaimValidHeight));
READWRITE(fIsClaim);
READWRITE(VARINT(nClaimOriginalHeight));
}
explicit TxInUndoSerializer(const Coin* coin) : txout(coin) {}
};
class TxInUndoDeserializer
{
Coin* txout;
public:
template<typename Stream>
void Unserialize(Stream &s) {
unsigned int nCode = 0;
::Unserialize(s, VARINT(nCode));
txout->nHeight = nCode / 2;
txout->fCoinBase = nCode & 1;
if (txout->nHeight > 0) {
// Old versions stored the version number for the last spend of
// a transaction's outputs. Non-final spends were indicated with
// height = 0.
unsigned int nVersionDummy;
::Unserialize(s, VARINT(nVersionDummy));
}
::Unserialize(s, CTxOutCompressor(REF(txout->out)));
}
explicit TxInUndoDeserializer(Coin* coin) : txout(coin) {}
};
static const size_t MIN_TRANSACTION_INPUT_WEIGHT = WITNESS_SCALE_FACTOR * ::GetSerializeSize(CTxIn(), PROTOCOL_VERSION);
static const size_t MAX_INPUTS_PER_BLOCK = MAX_BLOCK_WEIGHT / MIN_TRANSACTION_INPUT_WEIGHT;
/** Undo information for a CTransaction */
class CTxUndo
{
public:
// undo information for all txins
std::vector<Coin> vprevout;
// tx index to claim otiginal and valid height
std::map<int, std::pair<int, int>> claimHeights;
std::vector<CTxInUndo> vprevout;
template <typename Stream>
void Serialize(Stream& s) const {
// TODO: avoid reimplementing vector serializer
uint64_t count = vprevout.size();
::Serialize(s, COMPACTSIZE(REF(count)));
for (const auto& prevout : vprevout) {
::Serialize(s, TxInUndoSerializer(&prevout));
}
::Serialize(s, claimHeights);
}
ADD_SERIALIZE_METHODS;
template <typename Stream>
void Unserialize(Stream& s) {
// TODO: avoid reimplementing vector deserializer
uint64_t count = 0;
::Unserialize(s, COMPACTSIZE(count));
if (count > MAX_INPUTS_PER_BLOCK) {
throw std::ios_base::failure("Too many input undo records");
}
vprevout.resize(count);
for (auto& prevout : vprevout) {
::Unserialize(s, TxInUndoDeserializer(&prevout));
}
::Unserialize(s, claimHeights);
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(vprevout);
}
};

View file

@ -1542,10 +1542,11 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txund
// mark inputs spent
if (!tx.IsCoinBase()) {
txundo.vprevout.reserve(tx.vin.size());
Coin coin;
for (const CTxIn &txin : tx.vin) {
txundo.vprevout.emplace_back();
bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
bool is_spent = inputs.SpendCoin(txin.prevout, &coin);
assert(is_spent);
txundo.vprevout.emplace_back(coin.out, coin.IsCoinBase(), int(coin.nHeight));
}
}
// add outputs
@ -1782,23 +1783,21 @@ int ApplyTxInUndo(unsigned int index, CTxUndo& txUndo, CCoinsViewCache& view, CC
}
}
if (!undo.out.scriptPubKey.empty()) {
auto it = txUndo.claimHeights.find(index);
// restore claim if applicable
if (it != txUndo.claimHeights.end()) {
auto nValidHeight = it->second.first;
auto nOriginalHeight = it->second.second;
// assert(nValidHeight > 0 && nOriginalHeight > 0); // fails unit tests
CClaimScriptUndoSpendOp undoSpend(out, undo.out.nValue, undo.nHeight, nValidHeight, nOriginalHeight);
ProcessClaim(undoSpend, trieCache, undo.out.scriptPubKey);
}
// restore claim if applicable
if (undo.fIsClaim && !undo.out.scriptPubKey.empty()) {
auto nValidHeight = int(undo.nClaimValidHeight);
auto nOriginalHeight = int(undo.nClaimOriginalHeight);
// assert(nValidHeight > 0 && nOriginalHeight > 0); // fails unit tests
CClaimScriptUndoSpendOp undoSpend(COutPoint(out.hash, out.n), undo.out.nValue, undo.nHeight,
nValidHeight, nOriginalHeight);
ProcessClaim(undoSpend, trieCache, undo.out.scriptPubKey);
}
// The potential_overwrite parameter to AddCoin is only allowed to be false if we know for
// sure that the coin did not already exist in the cache. As we have queried for that above
// using HaveCoin, we don't need to guess. When fClean is false, a coin already existed and
// it is an overwrite.
view.AddCoin(out, std::move(undo), !fClean);
Coin coin(undo.out, int(undo.nHeight), undo.fCoinBase);
view.AddCoin(out, std::move(coin), !fClean);
return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
}
@ -2343,9 +2342,13 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
}
UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
if (i > 0 && !mClaimUndoHeights.empty()) {
auto& claimsHeights = blockundo.vtxundo.back().claimHeights;
auto& txinUndos = blockundo.vtxundo.back().vprevout;
for (auto itHeight = mClaimUndoHeights.begin(); itHeight != mClaimUndoHeights.end(); ++itHeight)
claimsHeights[itHeight->first] = itHeight->second;
{
txinUndos[itHeight->first].nClaimValidHeight = itHeight->second.first;
txinUndos[itHeight->first].nClaimOriginalHeight = itHeight->second.second;
txinUndos[itHeight->first].fIsClaim = true;
}
}
// The CTxUndo vector contains the heights at which claims should be put into the trie.

View file

@ -32,7 +32,6 @@ class BlocksdirTest(BitcoinTestFramework):
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
assert os.path.isfile(os.path.join(blocksdir_path, "regtest", "blk00000.dat"))
assert os.path.isfile(os.path.join(blocksdir_path, "regtest", "rev00000.dat"))
assert os.path.isfile(os.path.join(blocksdir_path, "regtest", "index.sqlite"))
if __name__ == '__main__':

View file

@ -312,8 +312,11 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
self.import_deterministic_coinbase_privkeys()
if not self.setup_clean_chain:
for n in self.nodes:
while n.getblockchaininfo()["blocks"] < 199:
hits = 0
while n.getblockchaininfo()["blocks"] < 199 and hits < 20:
time.sleep(1)
hits += 1
assert n.getblockchaininfo()["blocks"] >= 199
# To ensure that all nodes are out of IBD, the most recent block
# must have a timestamp not too old (see IsInitialBlockDownload()).
self.log.debug('Generate a block with current time')
@ -540,7 +543,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
os.rmdir(cache_path('wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path()):
if entry not in ['blocks']: # Only keep blocks folder
if entry != 'blocks' and not entry.endswith('.sqlite'):
os.remove(cache_path(entry))
for i in range(self.num_nodes):
@ -579,23 +582,6 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
if not self.is_cli_compiled():
raise SkipTest("lbrycrd-cli has not been compiled.")
def is_cli_compiled(self):
"""Checks whether lbrycrd-cli was compiled."""
return config["components"].getboolean("ENABLE_UTILS")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_WALLET")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
def is_cli_compiled(self):
"""Checks whether bitcoin-cli was compiled."""
return self.config["components"].getboolean("ENABLE_CLI")

View file

@ -163,6 +163,7 @@ class WalletBackupTest(BitcoinTestFramework):
# Start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks'))
os.remove(os.path.join(self.nodes[2].datadir, 'regtest', 'coins.sqlite'))
os.remove(os.path.join(self.nodes[2].datadir, 'regtest', 'block_index.sqlite'))
# Restore wallets from backup
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'))
@ -184,6 +185,7 @@ class WalletBackupTest(BitcoinTestFramework):
#start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks'))
os.remove(os.path.join(self.nodes[2].datadir, 'regtest', 'coins.sqlite'))
os.remove(os.path.join(self.nodes[2].datadir, 'regtest', 'block_index.sqlite'))
self.start_three()

View file

@ -73,6 +73,7 @@ class WalletHDTest(BitcoinTestFramework):
# otherwise node1 would auto-recover all funds in flag the keypool keys as used
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "blocks"))
os.remove(os.path.join(self.nodes[1].datadir, 'regtest', 'coins.sqlite'))
os.remove(os.path.join(self.nodes[1].datadir, 'regtest', 'block_index.sqlite'))
shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join(self.nodes[1].datadir, "regtest", "wallets", "wallet.dat"))
self.start_node(1)
@ -96,6 +97,7 @@ class WalletHDTest(BitcoinTestFramework):
self.stop_node(1)
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "blocks"))
os.remove(os.path.join(self.nodes[1].datadir, 'regtest', 'coins.sqlite'))
os.remove(os.path.join(self.nodes[1].datadir, 'regtest', 'block_index.sqlite'))
shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join(self.nodes[1].datadir, "regtest", "wallets", "wallet.dat"))
self.start_node(1, extra_args=self.extra_args[1])
connect_nodes(self.nodes[0], 1)