Fix db directory
Don't use recursive hash computation Signed-off-by: Anthony Fieroni <bvbfan@abv.bg>
This commit is contained in:
parent
a50bc97e83
commit
3bc4e6f9f0
6 changed files with 57 additions and 76 deletions
|
@ -260,34 +260,23 @@ CUint256 ComputeMerkleRoot(std::vector<CUint256> hashes)
|
|||
return hashes.empty() ? CUint256{} : hashes[0];
|
||||
}
|
||||
|
||||
CUint256 CClaimTrieCacheHashFork::recursiveComputeMerkleHash(const std::string& name, int takeoverHeight, bool checkOnly)
|
||||
CUint256 CClaimTrieCacheHashFork::computeNodeHash(const std::string& name, int takeoverHeight)
|
||||
{
|
||||
if (nNextHeight < base->nAllClaimsInMerkleForkHeight)
|
||||
return CClaimTrieCacheNormalizationFork::recursiveComputeMerkleHash(name, takeoverHeight, checkOnly);
|
||||
return CClaimTrieCacheNormalizationFork::computeNodeHash(name, takeoverHeight);
|
||||
|
||||
// it may be that using RAM for this is more expensive than preparing a new query statement in each recursive call
|
||||
std::vector<std::tuple<std::string, std::unique_ptr<CUint256>, int>> children;
|
||||
childHashQuery << name >> [&children](std::string name, std::unique_ptr<CUint256> hash, int takeoverHeight) {
|
||||
children.push_back(std::make_tuple(std::move(name), std::move(hash), takeoverHeight));
|
||||
std::vector<CUint256> childHashes;
|
||||
childHashQuery << name >> [&childHashes](std::string name, CUint256 hash) {
|
||||
childHashes.push_back(std::move(hash));
|
||||
};
|
||||
childHashQuery++;
|
||||
std::vector<CUint256> childHashes;
|
||||
for (auto& child: children) {
|
||||
auto& name = std::get<0>(child);
|
||||
auto& hash = std::get<1>(child);
|
||||
if (!hash || hash->IsNull())
|
||||
childHashes.push_back(recursiveComputeMerkleHash(name, std::get<2>(child), checkOnly));
|
||||
else
|
||||
childHashes.push_back(*hash);
|
||||
}
|
||||
|
||||
std::vector<CUint256> claimHashes;
|
||||
//if (takeoverHeight > 0) {
|
||||
for (auto &&row: claimHashQuery << nNextHeight << name) {
|
||||
CTxOutPoint p;
|
||||
row >> p.hash >> p.n;
|
||||
auto claimHash = getValueHash(p, takeoverHeight);
|
||||
claimHashes.push_back(claimHash);
|
||||
claimHashes.push_back(getValueHash(p, takeoverHeight));
|
||||
}
|
||||
claimHashQuery++;
|
||||
//}
|
||||
|
@ -295,10 +284,7 @@ CUint256 CClaimTrieCacheHashFork::recursiveComputeMerkleHash(const std::string&
|
|||
auto left = childHashes.empty() ? leafHash : ComputeMerkleRoot(std::move(childHashes));
|
||||
auto right = claimHashes.empty() ? emptyHash : ComputeMerkleRoot(std::move(claimHashes));
|
||||
|
||||
auto computedHash = Hash(left.begin(), left.end(), right.begin(), right.end());
|
||||
if (!checkOnly)
|
||||
db << "UPDATE nodes SET hash = ? WHERE name = ?" << computedHash << name;
|
||||
return computedHash;
|
||||
return Hash(left.begin(), left.end(), right.begin(), right.end());
|
||||
}
|
||||
|
||||
std::vector<CUint256> ComputeMerklePath(const std::vector<CUint256>& hashes, uint32_t idx)
|
||||
|
|
|
@ -79,7 +79,7 @@ public:
|
|||
bool allowSupportMetadata() const;
|
||||
|
||||
protected:
|
||||
CUint256 recursiveComputeMerkleHash(const std::string& name, int takeoverHeight, bool checkOnly) override;
|
||||
CUint256 computeNodeHash(const std::string& name, int takeoverHeight) override;
|
||||
};
|
||||
|
||||
typedef CClaimTrieCacheHashFork CClaimTrieCache;
|
||||
|
|
|
@ -404,38 +404,25 @@ void completeHash(CUint256& partialHash, const std::string& key, int to)
|
|||
partialHash = Hash(it, it + 1, partialHash.begin(), partialHash.end());
|
||||
}
|
||||
|
||||
CUint256 CClaimTrieCacheBase::recursiveComputeMerkleHash(const std::string& name, int takeoverHeight, bool checkOnly)
|
||||
CUint256 CClaimTrieCacheBase::computeNodeHash(const std::string& name, int takeoverHeight)
|
||||
{
|
||||
std::vector<uint8_t> vchToHash;
|
||||
const auto pos = name.size();
|
||||
std::vector<uint8_t> vchToHash;
|
||||
// we have to free up the hash query so it can be reused by a child
|
||||
std::vector<std::tuple<std::string, std::unique_ptr<CUint256>, int>> children;
|
||||
childHashQuery << name >> [&children](std::string name, std::unique_ptr<CUint256> hash, int takeoverHeight) {
|
||||
children.push_back(std::make_tuple(std::move(name), std::move(hash), takeoverHeight));
|
||||
childHashQuery << name >> [&vchToHash, pos](std::string name, CUint256 hash) {
|
||||
completeHash(hash, name, pos);
|
||||
vchToHash.push_back(name[pos]);
|
||||
vchToHash.insert(vchToHash.end(), hash.begin(), hash.end());
|
||||
};
|
||||
childHashQuery++;
|
||||
|
||||
for (auto& child: children) {
|
||||
auto& name = std::get<0>(child);
|
||||
auto& hash = std::get<1>(child);
|
||||
if (!hash) hash = std::make_unique<CUint256>();
|
||||
if (hash->IsNull())
|
||||
*hash = recursiveComputeMerkleHash(name, std::get<2>(child), checkOnly);
|
||||
completeHash(*hash, name, pos);
|
||||
vchToHash.push_back(name[pos]);
|
||||
vchToHash.insert(vchToHash.end(), hash->begin(), hash->end());
|
||||
}
|
||||
|
||||
CClaimValue claim;
|
||||
if (getInfoForName(name, claim)) {
|
||||
CUint256 valueHash = getValueHash(claim.outPoint, takeoverHeight);
|
||||
auto valueHash = getValueHash(claim.outPoint, takeoverHeight);
|
||||
vchToHash.insert(vchToHash.end(), valueHash.begin(), valueHash.end());
|
||||
}
|
||||
|
||||
auto computedHash = vchToHash.empty() ? one : Hash(vchToHash.begin(), vchToHash.end());
|
||||
if (!checkOnly)
|
||||
db << "UPDATE nodes SET hash = ? WHERE name = ?" << computedHash << name;
|
||||
return computedHash;
|
||||
return vchToHash.empty() ? one : Hash(vchToHash.begin(), vchToHash.end());
|
||||
}
|
||||
|
||||
bool CClaimTrieCacheBase::checkConsistency()
|
||||
|
@ -448,15 +435,17 @@ bool CClaimTrieCacheBase::checkConsistency()
|
|||
CUint256 hash;
|
||||
int takeoverHeight;
|
||||
row >> name >> hash >> takeoverHeight;
|
||||
auto computedHash = recursiveComputeMerkleHash(name, takeoverHeight, true);
|
||||
auto computedHash = computeNodeHash(name, takeoverHeight);
|
||||
if (computedHash != hash)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CClaimTrieCacheBase::validateDb(const CUint256& rootHash)
|
||||
bool CClaimTrieCacheBase::validateDb(int height, const CUint256& rootHash)
|
||||
{
|
||||
base->nNextHeight = nNextHeight = height + 1;
|
||||
|
||||
logPrint << "Checking claim trie consistency... " << Clog::flush;
|
||||
if (checkConsistency()) {
|
||||
logPrint << "consistent" << Clog::endl;
|
||||
|
@ -520,6 +509,7 @@ CClaimTrieCacheBase::CClaimTrieCacheBase(CClaimTrie* base)
|
|||
db << "PRAGMA temp_store=MEMORY";
|
||||
db << "PRAGMA case_sensitive_like=true";
|
||||
|
||||
db.define("SIZE", [](const std::string& s) -> int { return s.size(); });
|
||||
db.define("POPS", [](std::string s) -> std::string { if (!s.empty()) s.pop_back(); return s; });
|
||||
}
|
||||
|
||||
|
@ -539,15 +529,21 @@ int CClaimTrieCacheBase::expirationTime() const
|
|||
CUint256 CClaimTrieCacheBase::getMerkleHash()
|
||||
{
|
||||
ensureTreeStructureIsUpToDate();
|
||||
std::unique_ptr<CUint256> hash;
|
||||
int takeoverHeight;
|
||||
// can't use childHashQuery here because "IS NULL" must be used instead of parent = NULL
|
||||
db << "SELECT hash, IFNULL(takeoverHeight, 0) FROM nodes WHERE name = ''" >> std::tie(hash, takeoverHeight);
|
||||
if (hash == nullptr || hash->IsNull()) {
|
||||
CUint256 hash;
|
||||
db << "SELECT hash FROM nodes WHERE name = ''"
|
||||
>>[&hash](std::unique_ptr<CUint256> rootHash) {
|
||||
if (rootHash)
|
||||
hash = std::move(*rootHash);
|
||||
};
|
||||
if (!hash.IsNull())
|
||||
return hash;
|
||||
assert(transacting); // no data changed but we didn't have the root hash there already?
|
||||
return recursiveComputeMerkleHash("", takeoverHeight, false);
|
||||
}
|
||||
return *hash;
|
||||
db << "SELECT name, IFNULL(takeoverHeight, 0) FROM nodes WHERE hash IS NULL ORDER BY SIZE(name) DESC"
|
||||
>> [this, &hash](const std::string& name, int takeoverHeight) {
|
||||
hash = computeNodeHash(name, takeoverHeight);
|
||||
db << "UPDATE nodes SET hash = ? WHERE name = ?" << hash << name;
|
||||
};
|
||||
return hash;
|
||||
}
|
||||
|
||||
bool CClaimTrieCacheBase::getLastTakeoverForName(const std::string& name, CUint160& claimId, int& takeoverHeight) const
|
||||
|
|
|
@ -83,7 +83,7 @@ public:
|
|||
bool flush();
|
||||
bool checkConsistency();
|
||||
CUint256 getMerkleHash();
|
||||
bool validateDb(const CUint256& rootHash);
|
||||
bool validateDb(int height, const CUint256& rootHash);
|
||||
|
||||
std::size_t getTotalNamesInTrie() const;
|
||||
std::size_t getTotalClaimsInTrie() const;
|
||||
|
@ -138,7 +138,7 @@ protected:
|
|||
|
||||
mutable sqlite::database_binder claimHashQuery, childHashQuery;
|
||||
|
||||
virtual CUint256 recursiveComputeMerkleHash(const std::string& name, int takeoverHeight, bool checkOnly);
|
||||
virtual CUint256 computeNodeHash(const std::string& name, int takeoverHeight);
|
||||
supportEntryType getSupportsForName(const std::string& name) const;
|
||||
|
||||
virtual int getDelayForName(const std::string& name, const CUint160& claimId) const;
|
||||
|
|
35
src/init.cpp
35
src/init.cpp
|
@ -1483,20 +1483,6 @@ bool AppInitMain(InitInterfaces& interfaces)
|
|||
// fails if it's still open from the previous loop. Close it first:
|
||||
pblocktree.reset();
|
||||
pblocktree.reset(new CBlockTreeDB(nBlockTreeDBCache, false, fReset));
|
||||
delete pclaimTrie;
|
||||
auto& consensus = chainparams.GetConsensus();
|
||||
if (g_logger->Enabled() && LogAcceptCategory(BCLog::CLAIMS))
|
||||
CLogPrint::global().setLogger(g_logger);
|
||||
auto dataDir = GetDataDir() / "claimtrie";
|
||||
TryCreateDirectories(dataDir);
|
||||
pclaimTrie = new CClaimTrie(fReindex || fReindexChainState, 0,
|
||||
dataDir.string(),
|
||||
consensus.nNormalizedNameForkHeight,
|
||||
consensus.nOriginalClaimExpirationTime,
|
||||
consensus.nExtendedClaimExpirationTime,
|
||||
consensus.nExtendedClaimExpirationForkHeight,
|
||||
consensus.nAllClaimsInMerkleForkHeight,
|
||||
32);
|
||||
|
||||
if (fReset) {
|
||||
pblocktree->WriteReindexing(true);
|
||||
|
@ -1561,6 +1547,20 @@ bool AppInitMain(InitInterfaces& interfaces)
|
|||
break;
|
||||
}
|
||||
|
||||
if (g_logger->Enabled() && LogAcceptCategory(BCLog::CLAIMS))
|
||||
CLogPrint::global().setLogger(g_logger);
|
||||
|
||||
delete pclaimTrie;
|
||||
auto& consensus = chainparams.GetConsensus();
|
||||
pclaimTrie = new CClaimTrie(fReindex || fReindexChainState, 0,
|
||||
GetDataDir().string(),
|
||||
consensus.nNormalizedNameForkHeight,
|
||||
consensus.nOriginalClaimExpirationTime,
|
||||
consensus.nExtendedClaimExpirationTime,
|
||||
consensus.nExtendedClaimExpirationForkHeight,
|
||||
consensus.nAllClaimsInMerkleForkHeight,
|
||||
32);
|
||||
|
||||
// ReplayBlocks is a no-op if we cleared the coinsviewdb with -reindex or -reindex-chainstate
|
||||
if (!::ChainstateActive().ReplayBlocks(chainparams)) {
|
||||
strLoadError = _("Unable to replay blocks. You will need to rebuild the database using -reindex-chainstate.").translated;
|
||||
|
@ -1586,12 +1586,13 @@ bool AppInitMain(InitInterfaces& interfaces)
|
|||
strLoadError = _("Error opening block database").translated;
|
||||
break;
|
||||
}
|
||||
|
||||
auto tip = chainActive.Tip();
|
||||
assert(tip);
|
||||
if (!CClaimTrieCache(pclaimTrie).validateDb(tip->hashClaimTrie)) {
|
||||
strLoadError = _("Error loading the claim trie from disk");
|
||||
if (tip && !CClaimTrieCache(pclaimTrie).validateDb(tip->nHeight, tip->hashClaimTrie)) {
|
||||
strLoadError = _("Error validating the claim trie from disk");
|
||||
break;
|
||||
}
|
||||
|
||||
if (!fReset) {
|
||||
// Note that RewindBlockIndex MUST run even if we're about to -reindex-chainstate.
|
||||
// It both disconnects blocks based on ::ChainActive(), and drops block data in
|
||||
|
|
|
@ -143,9 +143,7 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha
|
|||
::ChainstateActive().InitCoinsDB(
|
||||
/* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
|
||||
auto& consensus = chainparams.GetConsensus();
|
||||
auto dataDir = GetDataDir() / "claimtrie";
|
||||
TryCreateDirectories(dataDir);
|
||||
pclaimTrie = new CClaimTrie(true, 0, dataDir.string(),
|
||||
pclaimTrie = new CClaimTrie(true, 0, GetDataDir().string(),
|
||||
consensus.nNormalizedNameForkHeight,
|
||||
consensus.nOriginalClaimExpirationTime,
|
||||
consensus.nExtendedClaimExpirationTime,
|
||||
|
|
Loading…
Reference in a new issue