611116d4e3
- ensures alphabetical ordering for includes etc. in source file headers
236 lines
8.1 KiB
C++
236 lines
8.1 KiB
C++
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
|
// Copyright (c) 2009-2014 The Bitcoin developers
|
|
// Distributed under the MIT/X11 software license, see the accompanying
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
#include "txdb.h"
|
|
|
|
#include "core.h"
|
|
#include "pow.h"
|
|
#include "uint256.h"
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <boost/thread.hpp>
|
|
|
|
using namespace std;
|
|
|
|
void static BatchWriteCoins(CLevelDBBatch &batch, const uint256 &hash, const CCoins &coins) {
|
|
if (coins.IsPruned())
|
|
batch.Erase(make_pair('c', hash));
|
|
else
|
|
batch.Write(make_pair('c', hash), coins);
|
|
}
|
|
|
|
void static BatchWriteHashBestChain(CLevelDBBatch &batch, const uint256 &hash) {
|
|
batch.Write('B', hash);
|
|
}
|
|
|
|
CCoinsViewDB::CCoinsViewDB(size_t nCacheSize, bool fMemory, bool fWipe) : db(GetDataDir() / "chainstate", nCacheSize, fMemory, fWipe) {
|
|
}
|
|
|
|
bool CCoinsViewDB::GetCoins(const uint256 &txid, CCoins &coins) const {
|
|
return db.Read(make_pair('c', txid), coins);
|
|
}
|
|
|
|
bool CCoinsViewDB::SetCoins(const uint256 &txid, const CCoins &coins) {
|
|
CLevelDBBatch batch;
|
|
BatchWriteCoins(batch, txid, coins);
|
|
return db.WriteBatch(batch);
|
|
}
|
|
|
|
bool CCoinsViewDB::HaveCoins(const uint256 &txid) const {
|
|
return db.Exists(make_pair('c', txid));
|
|
}
|
|
|
|
uint256 CCoinsViewDB::GetBestBlock() const {
|
|
uint256 hashBestChain;
|
|
if (!db.Read('B', hashBestChain))
|
|
return uint256(0);
|
|
return hashBestChain;
|
|
}
|
|
|
|
bool CCoinsViewDB::SetBestBlock(const uint256 &hashBlock) {
|
|
CLevelDBBatch batch;
|
|
BatchWriteHashBestChain(batch, hashBlock);
|
|
return db.WriteBatch(batch);
|
|
}
|
|
|
|
bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
|
|
LogPrint("coindb", "Committing %u changed transactions to coin database...\n", (unsigned int)mapCoins.size());
|
|
|
|
CLevelDBBatch batch;
|
|
for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) {
|
|
BatchWriteCoins(batch, it->first, it->second);
|
|
CCoinsMap::iterator itOld = it++;
|
|
mapCoins.erase(itOld);
|
|
}
|
|
if (hashBlock != uint256(0))
|
|
BatchWriteHashBestChain(batch, hashBlock);
|
|
|
|
return db.WriteBatch(batch);
|
|
}
|
|
|
|
CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CLevelDBWrapper(GetDataDir() / "blocks" / "index", nCacheSize, fMemory, fWipe) {
|
|
}
|
|
|
|
bool CBlockTreeDB::WriteBlockIndex(const CDiskBlockIndex& blockindex)
|
|
{
|
|
return Write(make_pair('b', blockindex.GetBlockHash()), blockindex);
|
|
}
|
|
|
|
bool CBlockTreeDB::WriteBlockFileInfo(int nFile, const CBlockFileInfo &info) {
|
|
return Write(make_pair('f', nFile), info);
|
|
}
|
|
|
|
bool CBlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo &info) {
|
|
return Read(make_pair('f', nFile), info);
|
|
}
|
|
|
|
bool CBlockTreeDB::WriteLastBlockFile(int nFile) {
|
|
return Write('l', nFile);
|
|
}
|
|
|
|
bool CBlockTreeDB::WriteReindexing(bool fReindexing) {
|
|
if (fReindexing)
|
|
return Write('R', '1');
|
|
else
|
|
return Erase('R');
|
|
}
|
|
|
|
bool CBlockTreeDB::ReadReindexing(bool &fReindexing) {
|
|
fReindexing = Exists('R');
|
|
return true;
|
|
}
|
|
|
|
bool CBlockTreeDB::ReadLastBlockFile(int &nFile) {
|
|
return Read('l', nFile);
|
|
}
|
|
|
|
bool CCoinsViewDB::GetStats(CCoinsStats &stats) const {
|
|
/* It seems that there are no "const iterators" for LevelDB. Since we
|
|
only need read operations on it, use a const-cast to get around
|
|
that restriction. */
|
|
leveldb::Iterator *pcursor = const_cast<CLevelDBWrapper*>(&db)->NewIterator();
|
|
pcursor->SeekToFirst();
|
|
|
|
CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION);
|
|
stats.hashBlock = GetBestBlock();
|
|
ss << stats.hashBlock;
|
|
int64_t nTotalAmount = 0;
|
|
while (pcursor->Valid()) {
|
|
boost::this_thread::interruption_point();
|
|
try {
|
|
leveldb::Slice slKey = pcursor->key();
|
|
CDataStream ssKey(slKey.data(), slKey.data()+slKey.size(), SER_DISK, CLIENT_VERSION);
|
|
char chType;
|
|
ssKey >> chType;
|
|
if (chType == 'c') {
|
|
leveldb::Slice slValue = pcursor->value();
|
|
CDataStream ssValue(slValue.data(), slValue.data()+slValue.size(), SER_DISK, CLIENT_VERSION);
|
|
CCoins coins;
|
|
ssValue >> coins;
|
|
uint256 txhash;
|
|
ssKey >> txhash;
|
|
ss << txhash;
|
|
ss << VARINT(coins.nVersion);
|
|
ss << (coins.fCoinBase ? 'c' : 'n');
|
|
ss << VARINT(coins.nHeight);
|
|
stats.nTransactions++;
|
|
for (unsigned int i=0; i<coins.vout.size(); i++) {
|
|
const CTxOut &out = coins.vout[i];
|
|
if (!out.IsNull()) {
|
|
stats.nTransactionOutputs++;
|
|
ss << VARINT(i+1);
|
|
ss << out;
|
|
nTotalAmount += out.nValue;
|
|
}
|
|
}
|
|
stats.nSerializedSize += 32 + slValue.size();
|
|
ss << VARINT(0);
|
|
}
|
|
pcursor->Next();
|
|
} catch (std::exception &e) {
|
|
return error("%s : Deserialize or I/O error - %s", __func__, e.what());
|
|
}
|
|
}
|
|
stats.nHeight = mapBlockIndex.find(GetBestBlock())->second->nHeight;
|
|
stats.hashSerialized = ss.GetHash();
|
|
stats.nTotalAmount = nTotalAmount;
|
|
return true;
|
|
}
|
|
|
|
bool CBlockTreeDB::ReadTxIndex(const uint256 &txid, CDiskTxPos &pos) {
|
|
return Read(make_pair('t', txid), pos);
|
|
}
|
|
|
|
bool CBlockTreeDB::WriteTxIndex(const std::vector<std::pair<uint256, CDiskTxPos> >&vect) {
|
|
CLevelDBBatch batch;
|
|
for (std::vector<std::pair<uint256,CDiskTxPos> >::const_iterator it=vect.begin(); it!=vect.end(); it++)
|
|
batch.Write(make_pair('t', it->first), it->second);
|
|
return WriteBatch(batch);
|
|
}
|
|
|
|
bool CBlockTreeDB::WriteFlag(const std::string &name, bool fValue) {
|
|
return Write(std::make_pair('F', name), fValue ? '1' : '0');
|
|
}
|
|
|
|
bool CBlockTreeDB::ReadFlag(const std::string &name, bool &fValue) {
|
|
char ch;
|
|
if (!Read(std::make_pair('F', name), ch))
|
|
return false;
|
|
fValue = ch == '1';
|
|
return true;
|
|
}
|
|
|
|
bool CBlockTreeDB::LoadBlockIndexGuts()
|
|
{
|
|
boost::scoped_ptr<leveldb::Iterator> pcursor(NewIterator());
|
|
|
|
CDataStream ssKeySet(SER_DISK, CLIENT_VERSION);
|
|
ssKeySet << make_pair('b', uint256(0));
|
|
pcursor->Seek(ssKeySet.str());
|
|
|
|
// Load mapBlockIndex
|
|
while (pcursor->Valid()) {
|
|
boost::this_thread::interruption_point();
|
|
try {
|
|
leveldb::Slice slKey = pcursor->key();
|
|
CDataStream ssKey(slKey.data(), slKey.data()+slKey.size(), SER_DISK, CLIENT_VERSION);
|
|
char chType;
|
|
ssKey >> chType;
|
|
if (chType == 'b') {
|
|
leveldb::Slice slValue = pcursor->value();
|
|
CDataStream ssValue(slValue.data(), slValue.data()+slValue.size(), SER_DISK, CLIENT_VERSION);
|
|
CDiskBlockIndex diskindex;
|
|
ssValue >> diskindex;
|
|
|
|
// Construct block index object
|
|
CBlockIndex* pindexNew = InsertBlockIndex(diskindex.GetBlockHash());
|
|
pindexNew->pprev = InsertBlockIndex(diskindex.hashPrev);
|
|
pindexNew->nHeight = diskindex.nHeight;
|
|
pindexNew->nFile = diskindex.nFile;
|
|
pindexNew->nDataPos = diskindex.nDataPos;
|
|
pindexNew->nUndoPos = diskindex.nUndoPos;
|
|
pindexNew->nVersion = diskindex.nVersion;
|
|
pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
|
|
pindexNew->nTime = diskindex.nTime;
|
|
pindexNew->nBits = diskindex.nBits;
|
|
pindexNew->nNonce = diskindex.nNonce;
|
|
pindexNew->nStatus = diskindex.nStatus;
|
|
pindexNew->nTx = diskindex.nTx;
|
|
|
|
if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits))
|
|
return error("LoadBlockIndex() : CheckProofOfWork failed: %s", pindexNew->ToString());
|
|
|
|
pcursor->Next();
|
|
} else {
|
|
break; // if shutdown requested or finished loading block index
|
|
}
|
|
} catch (std::exception &e) {
|
|
return error("%s : Deserialize or I/O error - %s", __func__, e.what());
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|