Use fully static linkage #364

Closed
bvbfan wants to merge 78 commits from static_link into master
5 changed files with 31 additions and 22 deletions
Showing only changes of commit a7a790342b - Show all commits

View file

@ -64,7 +64,8 @@ bool CClaimScriptUndoAddOp::undoAddClaim(CClaimTrieCache& trieCache, const std::
int validHeight, originalHeight; int validHeight, originalHeight;
bool res = trieCache.removeClaim(claimId, point, nodeName, validHeight, originalHeight); bool res = trieCache.removeClaim(claimId, point, nodeName, validHeight, originalHeight);
if (!res) if (!res)
LogPrint(BCLog::CLAIMS, "Remove claim failed for %s with claimid %s\n", name, claimId.GetHex().substr(0, 6)); LogPrint(BCLog::CLAIMS, "Remove claim failed for %s (%s) with claimID %.6s, t:%.6s:%d\n", name, nodeName,
claimId.GetHex(), point.hash.GetHex(), point.n);
return res; return res;
} }
@ -76,7 +77,8 @@ bool CClaimScriptUndoAddOp::supportClaim(CClaimTrieCache& trieCache, const std::
int validHeight; int validHeight;
bool res = trieCache.removeSupport(point, nodeName, validHeight); bool res = trieCache.removeSupport(point, nodeName, validHeight);
if (!res) if (!res)
LogPrint(BCLog::CLAIMS, "Remove support failed for %s with claimid %s\n", name, claimId.GetHex().substr(0, 6)); LogPrint(BCLog::CLAIMS, "Remove support failed for %s with claimID %.6s, t:%.6s:%d\n", name,
claimId.GetHex(), point.hash.GetHex(), point.n);
return res; return res;
} }
@ -189,6 +191,7 @@ void UpdateCache(const CTransaction& tx, CClaimTrieCache& trieCache, const CCoin
bool spendClaim(CClaimTrieCache& trieCache, const std::string& name, const uint160& claimId) override bool spendClaim(CClaimTrieCache& trieCache, const std::string& name, const uint160& claimId) override
{ {
if (CClaimScriptSpendOp::spendClaim(trieCache, name, claimId)) { if (CClaimScriptSpendOp::spendClaim(trieCache, name, claimId)) {
assert(nOriginalHeight >= 0);
callback(name, claimId, nOriginalHeight); callback(name, claimId, nOriginalHeight);
return true; return true;
} }
@ -256,6 +259,7 @@ void UpdateCache(const CTransaction& tx, CClaimTrieCache& trieCache, const CCoin
for (auto itSpent = spentClaims.begin(); itSpent != spentClaims.end(); ++itSpent) { for (auto itSpent = spentClaims.begin(); itSpent != spentClaims.end(); ++itSpent) {
if (itSpent->id == claimId && trieCache.normalizeClaimName(name) == trieCache.normalizeClaimName(itSpent->name)) { if (itSpent->id == claimId && trieCache.normalizeClaimName(name) == trieCache.normalizeClaimName(itSpent->name)) {
spentClaims.erase(itSpent); spentClaims.erase(itSpent);
assert(itSpent->originalHeight >= 0);
return itSpent->originalHeight; return itSpent->originalHeight;
} }
} }

View file

@ -62,7 +62,7 @@ namespace sqlite
int code = SQLITE_OK; int code = SQLITE_OK;
for (auto i = 0u; i < attempts; ++i) { for (auto i = 0u; i < attempts; ++i) {
try { try {
db << "commit"; db << "COMMIT";
} catch (const sqlite_exception& e) { } catch (const sqlite_exception& e) {
code = e.get_code(); code = e.get_code();
if (code == SQLITE_LOCKED || code == SQLITE_BUSY) { if (code == SQLITE_LOCKED || code == SQLITE_BUSY) {

View file

@ -119,7 +119,7 @@ CClaimTrie::CClaimTrie(std::size_t cacheBytes, bool fWipe, int height,
CClaimTrieCacheBase::~CClaimTrieCacheBase() CClaimTrieCacheBase::~CClaimTrieCacheBase()
{ {
if (transacting) { if (transacting) {
db << "rollback"; db << "ROLLBACK";
transacting = false; transacting = false;
} }
claimHashQuery.used(true); claimHashQuery.used(true);
@ -525,7 +525,7 @@ void CClaimTrieCacheBase::ensureTransacting()
{ {
if (!transacting) { if (!transacting) {
transacting = true; transacting = true;
db << "begin"; db << "BEGIN";
} }
} }
@ -634,22 +634,27 @@ bool CClaimTrieCacheBase::addSupport(const std::string& name, const COutPoint& o
bool CClaimTrieCacheBase::removeClaim(const uint160& claimId, const COutPoint& outPoint, std::string& nodeName, bool CClaimTrieCacheBase::removeClaim(const uint160& claimId, const COutPoint& outPoint, std::string& nodeName,
int& validHeight, int& originalHeight) int& validHeight, int& originalHeight)
{ {
ensureTransacting();
// this gets tricky in that we may be removing an update // this gets tricky in that we may be removing an update
// when going forward we spend a claim (aka, call removeClaim) before updating it (aka, call addClaim) // when going forward we spend a claim (aka, call removeClaim) before updating it (aka, call addClaim)
// when going backwards we first remove the update by calling removeClaim // when going backwards we first remove the update by calling removeClaim
// we then undo the spend of the previous one by calling addClaim with the original data // we then undo the spend of the previous one by calling addClaim with the original data
// in order to maintain the proper takeover height the updater will need to use our height returned here // in order to maintain the proper takeover height the updater will need to use our height returned here
auto query = db << "SELECT nodeName, activationHeight, originalHeight FROM claim WHERE claimID = ? AND txID = ? AND txN = ? AND expirationHeight >= ?" {
auto query = db << "SELECT nodeName, originalHeight, activationHeight FROM claim "
"WHERE claimID = ? AND txID = ? AND txN = ? AND expirationHeight >= ?"
<< claimId << outPoint.hash << outPoint.n << nNextHeight; << claimId << outPoint.hash << outPoint.n << nNextHeight;
auto it = query.begin(); auto it = query.begin();
if (it == query.end()) if (it == query.end())
return false; return false;
*it >> nodeName >> validHeight >> originalHeight; *it >> nodeName >> originalHeight >> validHeight;
db << "DELETE FROM claim WHERE claimID = ? AND txID = ? and txN = ?" << claimId << outPoint.hash << outPoint.n; }
ensureTransacting();
db << "DELETE FROM claim WHERE claimID = ? AND txID = ? AND txN = ?"
<< claimId << outPoint.hash << outPoint.n;
if (!db.rows_modified()) if (!db.rows_modified())
return false; return false;

View file

@ -111,7 +111,7 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, boo
} }
} }
db << "begin"; db << "BEGIN";
db << "INSERT OR REPLACE INTO marker VALUES('head_block', ?)" << hashBlock; db << "INSERT OR REPLACE INTO marker VALUES('head_block', ?)" << hashBlock;
for (auto it = mapCoins.begin(); it != mapCoins.end();) { for (auto it = mapCoins.begin(); it != mapCoins.end();) {
if (it->second.flags & CCoinsCacheEntry::DIRTY) { if (it->second.flags & CCoinsCacheEntry::DIRTY) {
@ -300,7 +300,7 @@ void CCoinsViewDBCursor::Next()
bool CBlockTreeDB::BatchWrite(const std::vector<std::pair<int, const CBlockFileInfo*> >& fileInfo, bool CBlockTreeDB::BatchWrite(const std::vector<std::pair<int, const CBlockFileInfo*> >& fileInfo,
int nLastFile, const std::vector<const CBlockIndex*>& blockInfo, bool sync) { int nLastFile, const std::vector<const CBlockIndex*>& blockInfo, bool sync) {
db << "begin"; db << "BEGIN";
auto ibf = db << "INSERT OR REPLACE INTO block_file(file, blocks, size, undoSize, heightFirst, heightLast, timeFirst, timeLast) " auto ibf = db << "INSERT OR REPLACE INTO block_file(file, blocks, size, undoSize, heightFirst, heightLast, timeFirst, timeLast) "
"VALUES(?,?,?,?,?,?,?,?)"; "VALUES(?,?,?,?,?,?,?,?)";
for (auto& kvp: fileInfo) { for (auto& kvp: fileInfo) {
@ -394,7 +394,7 @@ bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams,
bool CBlockTreeDB::WriteTxIndex(const std::vector<std::pair<uint256, CDiskTxPos>> &list) { bool CBlockTreeDB::WriteTxIndex(const std::vector<std::pair<uint256, CDiskTxPos>> &list) {
if (list.empty()) return true; if (list.empty()) return true;
db << "begin"; db << "BEGIN";
auto query = db << "INSERT OR REPLACE INTO tx_to_block VALUES(?,?,?,?)"; auto query = db << "INSERT OR REPLACE INTO tx_to_block VALUES(?,?,?,?)";
for (auto& kvp: list) { for (auto& kvp: list) {
query << kvp.first << kvp.second.nFile << kvp.second.nPos << kvp.second.nTxOffset; query << kvp.first << kvp.second.nFile << kvp.second.nPos << kvp.second.nTxOffset;

View file

@ -1488,8 +1488,8 @@ int ApplyTxInUndo(unsigned int index, CTxUndo& txUndo, CCoinsViewCache& view, CC
// restore claim if applicable // restore claim if applicable
if (undo.fIsClaim && !undo.txout.scriptPubKey.empty()) { if (undo.fIsClaim && !undo.txout.scriptPubKey.empty()) {
auto nValidHeight = undo.nClaimValidHeight; auto nValidHeight = int(undo.nClaimValidHeight);
auto nOriginalHeight = undo.nClaimOriginalHeight; auto nOriginalHeight = int(undo.nClaimOriginalHeight);
// assert(nValidHeight > 0 && nOriginalHeight > 0); // fails unit tests // assert(nValidHeight > 0 && nOriginalHeight > 0); // fails unit tests
CClaimScriptUndoSpendOp undoSpend(COutPoint(out.hash, out.n), undo.txout.nValue, undo.nHeight, CClaimScriptUndoSpendOp undoSpend(COutPoint(out.hash, out.n), undo.txout.nValue, undo.nHeight,
nValidHeight, nOriginalHeight); nValidHeight, nOriginalHeight);
@ -1552,12 +1552,12 @@ DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockI
} }
// remove any claims // remove any claims
for (size_t j = 0; j < tx.vout.size(); j++) for (uint32_t j = tx.vout.size(); j > 0; --j)
{ {
const CTxOut& txout = tx.vout[j]; const CTxOut& txout = tx.vout[j - 1];
if (!txout.scriptPubKey.empty()) { if (!txout.scriptPubKey.empty()) {
CClaimScriptUndoAddOp undoAdd(COutPoint(hash, j), pindex->nHeight); CClaimScriptUndoAddOp undoAdd(COutPoint(hash, j - 1), pindex->nHeight);
ProcessClaim(undoAdd, trieCache, txout.scriptPubKey); ProcessClaim(undoAdd, trieCache, txout.scriptPubKey);
} }
} }