Fix db directory
Don't use recursive hash computation Signed-off-by: Anthony Fieroni <bvbfan@abv.bg>
This commit is contained in:
parent
0dad6a7c10
commit
106a4c26da
6 changed files with 57 additions and 76 deletions
|
@ -260,34 +260,23 @@ CUint256 ComputeMerkleRoot(std::vector<CUint256> hashes)
|
||||||
return hashes.empty() ? CUint256{} : hashes[0];
|
return hashes.empty() ? CUint256{} : hashes[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
CUint256 CClaimTrieCacheHashFork::recursiveComputeMerkleHash(const std::string& name, int takeoverHeight, bool checkOnly)
|
CUint256 CClaimTrieCacheHashFork::computeNodeHash(const std::string& name, int takeoverHeight)
|
||||||
{
|
{
|
||||||
if (nNextHeight < base->nAllClaimsInMerkleForkHeight)
|
if (nNextHeight < base->nAllClaimsInMerkleForkHeight)
|
||||||
return CClaimTrieCacheNormalizationFork::recursiveComputeMerkleHash(name, takeoverHeight, checkOnly);
|
return CClaimTrieCacheNormalizationFork::computeNodeHash(name, takeoverHeight);
|
||||||
|
|
||||||
// it may be that using RAM for this is more expensive than preparing a new query statement in each recursive call
|
std::vector<CUint256> childHashes;
|
||||||
std::vector<std::tuple<std::string, std::unique_ptr<CUint256>, int>> children;
|
childHashQuery << name >> [&childHashes](std::string name, CUint256 hash) {
|
||||||
childHashQuery << name >> [&children](std::string name, std::unique_ptr<CUint256> hash, int takeoverHeight) {
|
childHashes.push_back(std::move(hash));
|
||||||
children.push_back(std::make_tuple(std::move(name), std::move(hash), takeoverHeight));
|
|
||||||
};
|
};
|
||||||
childHashQuery++;
|
childHashQuery++;
|
||||||
std::vector<CUint256> childHashes;
|
|
||||||
for (auto& child: children) {
|
|
||||||
auto& name = std::get<0>(child);
|
|
||||||
auto& hash = std::get<1>(child);
|
|
||||||
if (!hash || hash->IsNull())
|
|
||||||
childHashes.push_back(recursiveComputeMerkleHash(name, std::get<2>(child), checkOnly));
|
|
||||||
else
|
|
||||||
childHashes.push_back(*hash);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<CUint256> claimHashes;
|
std::vector<CUint256> claimHashes;
|
||||||
//if (takeoverHeight > 0) {
|
//if (takeoverHeight > 0) {
|
||||||
for (auto &&row: claimHashQuery << nNextHeight << name) {
|
for (auto &&row: claimHashQuery << nNextHeight << name) {
|
||||||
CTxOutPoint p;
|
CTxOutPoint p;
|
||||||
row >> p.hash >> p.n;
|
row >> p.hash >> p.n;
|
||||||
auto claimHash = getValueHash(p, takeoverHeight);
|
claimHashes.push_back(getValueHash(p, takeoverHeight));
|
||||||
claimHashes.push_back(claimHash);
|
|
||||||
}
|
}
|
||||||
claimHashQuery++;
|
claimHashQuery++;
|
||||||
//}
|
//}
|
||||||
|
@ -295,10 +284,7 @@ CUint256 CClaimTrieCacheHashFork::recursiveComputeMerkleHash(const std::string&
|
||||||
auto left = childHashes.empty() ? leafHash : ComputeMerkleRoot(std::move(childHashes));
|
auto left = childHashes.empty() ? leafHash : ComputeMerkleRoot(std::move(childHashes));
|
||||||
auto right = claimHashes.empty() ? emptyHash : ComputeMerkleRoot(std::move(claimHashes));
|
auto right = claimHashes.empty() ? emptyHash : ComputeMerkleRoot(std::move(claimHashes));
|
||||||
|
|
||||||
auto computedHash = Hash(left.begin(), left.end(), right.begin(), right.end());
|
return Hash(left.begin(), left.end(), right.begin(), right.end());
|
||||||
if (!checkOnly)
|
|
||||||
db << "UPDATE nodes SET hash = ? WHERE name = ?" << computedHash << name;
|
|
||||||
return computedHash;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<CUint256> ComputeMerklePath(const std::vector<CUint256>& hashes, uint32_t idx)
|
std::vector<CUint256> ComputeMerklePath(const std::vector<CUint256>& hashes, uint32_t idx)
|
||||||
|
|
|
@ -79,7 +79,7 @@ public:
|
||||||
bool allowSupportMetadata() const;
|
bool allowSupportMetadata() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
CUint256 recursiveComputeMerkleHash(const std::string& name, int takeoverHeight, bool checkOnly) override;
|
CUint256 computeNodeHash(const std::string& name, int takeoverHeight) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef CClaimTrieCacheHashFork CClaimTrieCache;
|
typedef CClaimTrieCacheHashFork CClaimTrieCache;
|
||||||
|
|
|
@ -404,38 +404,25 @@ void completeHash(CUint256& partialHash, const std::string& key, int to)
|
||||||
partialHash = Hash(it, it + 1, partialHash.begin(), partialHash.end());
|
partialHash = Hash(it, it + 1, partialHash.begin(), partialHash.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
CUint256 CClaimTrieCacheBase::recursiveComputeMerkleHash(const std::string& name, int takeoverHeight, bool checkOnly)
|
CUint256 CClaimTrieCacheBase::computeNodeHash(const std::string& name, int takeoverHeight)
|
||||||
{
|
{
|
||||||
std::vector<uint8_t> vchToHash;
|
|
||||||
const auto pos = name.size();
|
const auto pos = name.size();
|
||||||
|
std::vector<uint8_t> vchToHash;
|
||||||
// we have to free up the hash query so it can be reused by a child
|
// we have to free up the hash query so it can be reused by a child
|
||||||
std::vector<std::tuple<std::string, std::unique_ptr<CUint256>, int>> children;
|
childHashQuery << name >> [&vchToHash, pos](std::string name, CUint256 hash) {
|
||||||
childHashQuery << name >> [&children](std::string name, std::unique_ptr<CUint256> hash, int takeoverHeight) {
|
completeHash(hash, name, pos);
|
||||||
children.push_back(std::make_tuple(std::move(name), std::move(hash), takeoverHeight));
|
vchToHash.push_back(name[pos]);
|
||||||
|
vchToHash.insert(vchToHash.end(), hash.begin(), hash.end());
|
||||||
};
|
};
|
||||||
childHashQuery++;
|
childHashQuery++;
|
||||||
|
|
||||||
for (auto& child: children) {
|
|
||||||
auto& name = std::get<0>(child);
|
|
||||||
auto& hash = std::get<1>(child);
|
|
||||||
if (!hash) hash = std::make_unique<CUint256>();
|
|
||||||
if (hash->IsNull())
|
|
||||||
*hash = recursiveComputeMerkleHash(name, std::get<2>(child), checkOnly);
|
|
||||||
completeHash(*hash, name, pos);
|
|
||||||
vchToHash.push_back(name[pos]);
|
|
||||||
vchToHash.insert(vchToHash.end(), hash->begin(), hash->end());
|
|
||||||
}
|
|
||||||
|
|
||||||
CClaimValue claim;
|
CClaimValue claim;
|
||||||
if (getInfoForName(name, claim)) {
|
if (getInfoForName(name, claim)) {
|
||||||
CUint256 valueHash = getValueHash(claim.outPoint, takeoverHeight);
|
auto valueHash = getValueHash(claim.outPoint, takeoverHeight);
|
||||||
vchToHash.insert(vchToHash.end(), valueHash.begin(), valueHash.end());
|
vchToHash.insert(vchToHash.end(), valueHash.begin(), valueHash.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
auto computedHash = vchToHash.empty() ? one : Hash(vchToHash.begin(), vchToHash.end());
|
return vchToHash.empty() ? one : Hash(vchToHash.begin(), vchToHash.end());
|
||||||
if (!checkOnly)
|
|
||||||
db << "UPDATE nodes SET hash = ? WHERE name = ?" << computedHash << name;
|
|
||||||
return computedHash;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CClaimTrieCacheBase::checkConsistency()
|
bool CClaimTrieCacheBase::checkConsistency()
|
||||||
|
@ -448,15 +435,17 @@ bool CClaimTrieCacheBase::checkConsistency()
|
||||||
CUint256 hash;
|
CUint256 hash;
|
||||||
int takeoverHeight;
|
int takeoverHeight;
|
||||||
row >> name >> hash >> takeoverHeight;
|
row >> name >> hash >> takeoverHeight;
|
||||||
auto computedHash = recursiveComputeMerkleHash(name, takeoverHeight, true);
|
auto computedHash = computeNodeHash(name, takeoverHeight);
|
||||||
if (computedHash != hash)
|
if (computedHash != hash)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CClaimTrieCacheBase::validateDb(const CUint256& rootHash)
|
bool CClaimTrieCacheBase::validateDb(int height, const CUint256& rootHash)
|
||||||
{
|
{
|
||||||
|
base->nNextHeight = nNextHeight = height + 1;
|
||||||
|
|
||||||
logPrint << "Checking claim trie consistency... " << Clog::flush;
|
logPrint << "Checking claim trie consistency... " << Clog::flush;
|
||||||
if (checkConsistency()) {
|
if (checkConsistency()) {
|
||||||
logPrint << "consistent" << Clog::endl;
|
logPrint << "consistent" << Clog::endl;
|
||||||
|
@ -520,6 +509,7 @@ CClaimTrieCacheBase::CClaimTrieCacheBase(CClaimTrie* base)
|
||||||
db << "PRAGMA temp_store=MEMORY";
|
db << "PRAGMA temp_store=MEMORY";
|
||||||
db << "PRAGMA case_sensitive_like=true";
|
db << "PRAGMA case_sensitive_like=true";
|
||||||
|
|
||||||
|
db.define("SIZE", [](const std::string& s) -> int { return s.size(); });
|
||||||
db.define("POPS", [](std::string s) -> std::string { if (!s.empty()) s.pop_back(); return s; });
|
db.define("POPS", [](std::string s) -> std::string { if (!s.empty()) s.pop_back(); return s; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -539,15 +529,21 @@ int CClaimTrieCacheBase::expirationTime() const
|
||||||
CUint256 CClaimTrieCacheBase::getMerkleHash()
|
CUint256 CClaimTrieCacheBase::getMerkleHash()
|
||||||
{
|
{
|
||||||
ensureTreeStructureIsUpToDate();
|
ensureTreeStructureIsUpToDate();
|
||||||
std::unique_ptr<CUint256> hash;
|
CUint256 hash;
|
||||||
int takeoverHeight;
|
db << "SELECT hash FROM nodes WHERE name = ''"
|
||||||
// can't use childHashQuery here because "IS NULL" must be used instead of parent = NULL
|
>>[&hash](std::unique_ptr<CUint256> rootHash) {
|
||||||
db << "SELECT hash, IFNULL(takeoverHeight, 0) FROM nodes WHERE name = ''" >> std::tie(hash, takeoverHeight);
|
if (rootHash)
|
||||||
if (hash == nullptr || hash->IsNull()) {
|
hash = std::move(*rootHash);
|
||||||
assert(transacting); // no data changed but we didn't have the root hash there already?
|
};
|
||||||
return recursiveComputeMerkleHash("", takeoverHeight, false);
|
if (!hash.IsNull())
|
||||||
}
|
return hash;
|
||||||
return *hash;
|
assert(transacting); // no data changed but we didn't have the root hash there already?
|
||||||
|
db << "SELECT name, IFNULL(takeoverHeight, 0) FROM nodes WHERE hash IS NULL ORDER BY SIZE(name) DESC"
|
||||||
|
>> [this, &hash](const std::string& name, int takeoverHeight) {
|
||||||
|
hash = computeNodeHash(name, takeoverHeight);
|
||||||
|
db << "UPDATE nodes SET hash = ? WHERE name = ?" << hash << name;
|
||||||
|
};
|
||||||
|
return hash;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CClaimTrieCacheBase::getLastTakeoverForName(const std::string& name, CUint160& claimId, int& takeoverHeight) const
|
bool CClaimTrieCacheBase::getLastTakeoverForName(const std::string& name, CUint160& claimId, int& takeoverHeight) const
|
||||||
|
|
|
@ -83,7 +83,7 @@ public:
|
||||||
bool flush();
|
bool flush();
|
||||||
bool checkConsistency();
|
bool checkConsistency();
|
||||||
CUint256 getMerkleHash();
|
CUint256 getMerkleHash();
|
||||||
bool validateDb(const CUint256& rootHash);
|
bool validateDb(int height, const CUint256& rootHash);
|
||||||
|
|
||||||
std::size_t getTotalNamesInTrie() const;
|
std::size_t getTotalNamesInTrie() const;
|
||||||
std::size_t getTotalClaimsInTrie() const;
|
std::size_t getTotalClaimsInTrie() const;
|
||||||
|
@ -138,7 +138,7 @@ protected:
|
||||||
|
|
||||||
mutable sqlite::database_binder claimHashQuery, childHashQuery;
|
mutable sqlite::database_binder claimHashQuery, childHashQuery;
|
||||||
|
|
||||||
virtual CUint256 recursiveComputeMerkleHash(const std::string& name, int takeoverHeight, bool checkOnly);
|
virtual CUint256 computeNodeHash(const std::string& name, int takeoverHeight);
|
||||||
supportEntryType getSupportsForName(const std::string& name) const;
|
supportEntryType getSupportsForName(const std::string& name) const;
|
||||||
|
|
||||||
virtual int getDelayForName(const std::string& name, const CUint160& claimId) const;
|
virtual int getDelayForName(const std::string& name, const CUint160& claimId) const;
|
||||||
|
|
35
src/init.cpp
35
src/init.cpp
|
@ -1454,20 +1454,6 @@ bool AppInitMain()
|
||||||
// fails if it's still open from the previous loop. Close it first:
|
// fails if it's still open from the previous loop. Close it first:
|
||||||
pblocktree.reset();
|
pblocktree.reset();
|
||||||
pblocktree.reset(new CBlockTreeDB(nBlockTreeDBCache, false, fReset));
|
pblocktree.reset(new CBlockTreeDB(nBlockTreeDBCache, false, fReset));
|
||||||
delete pclaimTrie;
|
|
||||||
auto& consensus = chainparams.GetConsensus();
|
|
||||||
if (g_logger->Enabled() && LogAcceptCategory(BCLog::CLAIMS))
|
|
||||||
CLogPrint::global().setLogger(g_logger);
|
|
||||||
auto dataDir = GetDataDir() / "claimtrie";
|
|
||||||
TryCreateDirectories(dataDir);
|
|
||||||
pclaimTrie = new CClaimTrie(fReindex || fReindexChainState, 0,
|
|
||||||
dataDir.string(),
|
|
||||||
consensus.nNormalizedNameForkHeight,
|
|
||||||
consensus.nOriginalClaimExpirationTime,
|
|
||||||
consensus.nExtendedClaimExpirationTime,
|
|
||||||
consensus.nExtendedClaimExpirationForkHeight,
|
|
||||||
consensus.nAllClaimsInMerkleForkHeight,
|
|
||||||
32);
|
|
||||||
|
|
||||||
if (fReset) {
|
if (fReset) {
|
||||||
pblocktree->WriteReindexing(true);
|
pblocktree->WriteReindexing(true);
|
||||||
|
@ -1522,6 +1508,20 @@ bool AppInitMain()
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (g_logger->Enabled() && LogAcceptCategory(BCLog::CLAIMS))
|
||||||
|
CLogPrint::global().setLogger(g_logger);
|
||||||
|
|
||||||
|
delete pclaimTrie;
|
||||||
|
auto& consensus = chainparams.GetConsensus();
|
||||||
|
pclaimTrie = new CClaimTrie(fReindex || fReindexChainState, 0,
|
||||||
|
GetDataDir().string(),
|
||||||
|
consensus.nNormalizedNameForkHeight,
|
||||||
|
consensus.nOriginalClaimExpirationTime,
|
||||||
|
consensus.nExtendedClaimExpirationTime,
|
||||||
|
consensus.nExtendedClaimExpirationForkHeight,
|
||||||
|
consensus.nAllClaimsInMerkleForkHeight,
|
||||||
|
32);
|
||||||
|
|
||||||
// ReplayBlocks is a no-op if we cleared the coinsviewdb with -reindex or -reindex-chainstate
|
// ReplayBlocks is a no-op if we cleared the coinsviewdb with -reindex or -reindex-chainstate
|
||||||
if (!ReplayBlocks(chainparams, pcoinsdbview.get())) {
|
if (!ReplayBlocks(chainparams, pcoinsdbview.get())) {
|
||||||
strLoadError = _("Unable to replay blocks. You will need to rebuild the database using -reindex-chainstate.");
|
strLoadError = _("Unable to replay blocks. You will need to rebuild the database using -reindex-chainstate.");
|
||||||
|
@ -1540,12 +1540,13 @@ bool AppInitMain()
|
||||||
}
|
}
|
||||||
assert(chainActive.Tip() != nullptr);
|
assert(chainActive.Tip() != nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto tip = chainActive.Tip();
|
auto tip = chainActive.Tip();
|
||||||
assert(tip);
|
if (tip && !CClaimTrieCache(pclaimTrie).validateDb(tip->nHeight, tip->hashClaimTrie)) {
|
||||||
if (!CClaimTrieCache(pclaimTrie).validateDb(tip->hashClaimTrie)) {
|
strLoadError = _("Error validating the claim trie from disk");
|
||||||
strLoadError = _("Error loading the claim trie from disk");
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!fReset) {
|
if (!fReset) {
|
||||||
// Note that RewindBlockIndex MUST run even if we're about to -reindex-chainstate.
|
// Note that RewindBlockIndex MUST run even if we're about to -reindex-chainstate.
|
||||||
// It both disconnects blocks based on chainActive, and drops block data in
|
// It both disconnects blocks based on chainActive, and drops block data in
|
||||||
|
|
|
@ -148,9 +148,7 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha
|
||||||
pcoinsdbview.reset(new CCoinsViewDB(1 << 23, true));
|
pcoinsdbview.reset(new CCoinsViewDB(1 << 23, true));
|
||||||
pcoinsTip.reset(new CCoinsViewCache(pcoinsdbview.get()));
|
pcoinsTip.reset(new CCoinsViewCache(pcoinsdbview.get()));
|
||||||
auto& consensus = chainparams.GetConsensus();
|
auto& consensus = chainparams.GetConsensus();
|
||||||
auto dataDir = GetDataDir() / "claimtrie";
|
pclaimTrie = new CClaimTrie(true, 0, GetDataDir().string(),
|
||||||
TryCreateDirectories(dataDir);
|
|
||||||
pclaimTrie = new CClaimTrie(true, 0, dataDir.string(),
|
|
||||||
consensus.nNormalizedNameForkHeight,
|
consensus.nNormalizedNameForkHeight,
|
||||||
consensus.nOriginalClaimExpirationTime,
|
consensus.nOriginalClaimExpirationTime,
|
||||||
consensus.nExtendedClaimExpirationTime,
|
consensus.nExtendedClaimExpirationTime,
|
||||||
|
|
Loading…
Add table
Reference in a new issue