2010-08-29 18:58:15 +02:00
// Copyright (c) 2009-2010 Satoshi Nakamoto
2018-07-27 00:36:45 +02:00
// Copyright (c) 2009-2018 The Bitcoin Core developers
2014-12-13 05:09:33 +01:00
// Distributed under the MIT software license, see the accompanying
2012-05-18 16:02:28 +02:00
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
2010-08-29 18:58:15 +02:00
2017-11-10 01:57:53 +01:00
# include <wallet/db.h>
2013-04-13 07:13:08 +02:00
2017-11-10 01:57:53 +01:00
# include <addrman.h>
# include <hash.h>
# include <protocol.h>
2018-10-23 00:51:11 +02:00
# include <util/strencodings.h>
2017-10-08 22:48:07 +02:00
# include <wallet/walletutil.h>
2013-04-13 07:13:08 +02:00
# include <stdint.h>
2010-08-29 18:58:15 +02:00
2012-04-15 22:10:54 +02:00
# ifndef WIN32
2013-04-13 07:13:08 +02:00
# include <sys/stat.h>
2012-04-15 22:10:54 +02:00
# endif
Split up util.cpp/h
Split up util.cpp/h into:
- string utilities (hex, base32, base64): no internal dependencies, no dependency on boost (apart from foreach)
- money utilities (parsesmoney, formatmoney)
- time utilities (gettime*, sleep, format date):
- and the rest (logging, argument parsing, config file parsing)
The latter is basically the environment and OS handling,
and is stripped of all utility functions, so we may want to
rename it to something else than util.cpp/h for clarity (Matt suggested
osinterface).
Breaks dependency of sha256.cpp on all the things pulled in by util.
2014-08-21 16:11:09 +02:00
# include <boost/thread.hpp>
2014-09-14 12:43:56 +02:00
2017-10-10 21:27:26 +02:00
namespace {
2018-09-25 15:56:16 +02:00
2017-10-10 21:27:26 +02:00
//! Make sure database has a unique fileid within the environment. If it
//! doesn't, throw an error. BDB caches do not work properly when more than one
//! open database has the same fileid (values written to one database may show
//! up in reads to other databases).
//!
//! BerkeleyDB generates unique fileids by default
//! (https://docs.oracle.com/cd/E17275_01/html/programmer_reference/program_copy.html),
//! so bitcoin should never create different databases with the same fileid, but
//! this error can be triggered if users manually copy database files.
2018-09-25 15:56:16 +02:00
void CheckUniqueFileid ( const BerkeleyEnvironment & env , const std : : string & filename , Db & db , WalletDatabaseFileId & fileid )
2017-10-10 21:27:26 +02:00
{
if ( env . IsMock ( ) ) return ;
2018-09-25 15:56:16 +02:00
int ret = db . get_mpf ( ) - > get_fileid ( fileid . value ) ;
2017-10-10 21:27:26 +02:00
if ( ret ! = 0 ) {
2017-12-08 12:39:22 +01:00
throw std : : runtime_error ( strprintf ( " BerkeleyBatch: Can't open database %s (get_fileid failed with %d) " , filename , ret ) ) ;
2017-10-10 21:27:26 +02:00
}
2018-09-25 15:56:16 +02:00
for ( const auto & item : env . m_fileids ) {
if ( fileid = = item . second & & & fileid ! = & item . second ) {
2017-12-08 12:39:22 +01:00
throw std : : runtime_error ( strprintf ( " BerkeleyBatch: Can't open database %s (duplicates fileid %s from %s) " , filename ,
2018-09-25 15:56:16 +02:00
HexStr ( std : : begin ( item . second . value ) , std : : end ( item . second . value ) ) , item . first ) ) ;
2017-10-10 21:27:26 +02:00
}
}
}
2017-11-14 03:25:46 +01:00
CCriticalSection cs_db ;
2018-05-18 22:28:50 +02:00
std : : map < std : : string , std : : weak_ptr < BerkeleyEnvironment > > g_dbenvs GUARDED_BY ( cs_db ) ; //!< Map from directory name to db environment.
2017-10-10 21:27:26 +02:00
} // namespace
2018-09-25 15:56:16 +02:00
bool WalletDatabaseFileId : : operator = = ( const WalletDatabaseFileId & rhs ) const
{
return memcmp ( value , & rhs . value , sizeof ( value ) ) = = 0 ;
}
2018-10-23 07:26:27 +02:00
static void SplitWalletPath ( const fs : : path & wallet_path , fs : : path & env_directory , std : : string & database_filename )
2017-11-14 03:25:46 +01:00
{
2017-11-15 21:44:36 +01:00
if ( fs : : is_regular_file ( wallet_path ) ) {
// Special case for backwards compatibility: if wallet path points to an
// existing file, treat it as the path to a BDB data file in a parent
// directory that also contains BDB log files.
env_directory = wallet_path . parent_path ( ) ;
database_filename = wallet_path . filename ( ) . string ( ) ;
} else {
// Normal case: Interpret wallet path as a directory path containing
// data and log files.
env_directory = wallet_path ;
database_filename = " wallet.dat " ;
}
2018-10-23 07:26:27 +02:00
}
bool IsWalletLoaded ( const fs : : path & wallet_path )
{
fs : : path env_directory ;
std : : string database_filename ;
SplitWalletPath ( wallet_path , env_directory , database_filename ) ;
LOCK ( cs_db ) ;
auto env = g_dbenvs . find ( env_directory . string ( ) ) ;
if ( env = = g_dbenvs . end ( ) ) return false ;
2018-05-18 22:28:50 +02:00
auto database = env - > second . lock ( ) ;
return database & & database - > IsDatabaseLoaded ( database_filename ) ;
2018-10-23 07:26:27 +02:00
}
2018-09-14 20:13:16 +02:00
/**
* @ param [ in ] wallet_path Path to wallet directory . Or ( for backwards compatibility only ) a path to a berkeley btree data file inside a wallet directory .
* @ param [ out ] database_filename Filename of berkeley btree data file inside the wallet directory .
* @ return A shared pointer to the BerkeleyEnvironment object for the wallet directory , never empty because ~ BerkeleyEnvironment
* erases the weak pointer from the g_dbenvs map .
* @ post A new BerkeleyEnvironment weak pointer is inserted into g_dbenvs if the directory path key was not already in the map .
*/
2018-05-18 22:28:50 +02:00
std : : shared_ptr < BerkeleyEnvironment > GetWalletEnv ( const fs : : path & wallet_path , std : : string & database_filename )
2018-10-23 07:26:27 +02:00
{
fs : : path env_directory ;
SplitWalletPath ( wallet_path , env_directory , database_filename ) ;
2017-11-14 03:25:46 +01:00
LOCK ( cs_db ) ;
2018-05-18 22:28:50 +02:00
auto inserted = g_dbenvs . emplace ( env_directory . string ( ) , std : : weak_ptr < BerkeleyEnvironment > ( ) ) ;
if ( inserted . second ) {
auto env = std : : make_shared < BerkeleyEnvironment > ( env_directory . string ( ) ) ;
inserted . first - > second = env ;
return env ;
}
return inserted . first - > second . lock ( ) ;
2017-11-14 03:25:46 +01:00
}
2010-08-29 18:58:15 +02:00
//
2017-12-08 12:39:22 +01:00
// BerkeleyBatch
2010-08-29 18:58:15 +02:00
//
2017-12-08 12:39:22 +01:00
void BerkeleyEnvironment : : Close ( )
2011-11-11 03:12:46 +01:00
{
if ( ! fDbEnvInit )
return ;
fDbEnvInit = false ;
2017-11-14 03:25:46 +01:00
2018-10-24 22:08:54 +02:00
for ( auto & db : m_databases ) {
2017-11-14 03:25:46 +01:00
auto count = mapFileUseCount . find ( db . first ) ;
assert ( count = = mapFileUseCount . end ( ) | | count - > second = = 0 ) ;
2018-10-24 22:08:54 +02:00
BerkeleyDatabase & database = db . second . get ( ) ;
if ( database . m_db ) {
database . m_db - > close ( 0 ) ;
database . m_db . reset ( ) ;
2017-11-14 03:25:46 +01:00
}
}
2019-01-31 01:04:51 +01:00
FILE * error_file = nullptr ;
dbenv - > get_errfile ( & error_file ) ;
2015-03-03 16:49:12 +01:00
int ret = dbenv - > close ( 0 ) ;
2012-10-08 21:18:04 +02:00
if ( ret ! = 0 )
2018-05-09 00:20:12 +02:00
LogPrintf ( " BerkeleyEnvironment::Close: Error %d closing database environment: %s \n " , ret , DbEnv : : strerror ( ret ) ) ;
2012-05-22 21:51:13 +02:00
if ( ! fMockDb )
2016-08-30 08:00:55 +02:00
DbEnv ( ( u_int32_t ) 0 ) . remove ( strPath . c_str ( ) , 0 ) ;
2019-01-31 01:04:51 +01:00
if ( error_file ) fclose ( error_file ) ;
2011-11-11 03:12:46 +01:00
}
2017-12-08 12:39:22 +01:00
void BerkeleyEnvironment : : Reset ( )
2010-08-29 18:58:15 +02:00
{
2017-08-09 16:24:12 +02:00
dbenv . reset ( new DbEnv ( DB_CXX_NO_EXCEPTIONS ) ) ;
2012-11-18 11:58:32 +01:00
fDbEnvInit = false ;
fMockDb = false ;
2010-08-29 18:58:15 +02:00
}
2017-12-08 12:39:22 +01:00
BerkeleyEnvironment : : BerkeleyEnvironment ( const fs : : path & dir_path ) : strPath ( dir_path . string ( ) )
2015-03-03 16:49:12 +01:00
{
Reset ( ) ;
}
2017-12-08 12:39:22 +01:00
BerkeleyEnvironment : : ~ BerkeleyEnvironment ( )
2012-05-14 03:37:39 +02:00
{
2018-05-18 22:28:50 +02:00
g_dbenvs . erase ( strPath ) ;
2017-11-14 03:25:46 +01:00
Close ( ) ;
2012-05-14 03:37:39 +02:00
}
2017-12-08 12:39:22 +01:00
bool BerkeleyEnvironment : : Open ( bool retry )
2012-05-14 03:37:39 +02:00
{
if ( fDbEnvInit )
return true ;
2013-03-09 18:02:57 +01:00
boost : : this_thread : : interruption_point ( ) ;
2012-05-14 03:37:39 +02:00
2017-11-14 03:25:46 +01:00
fs : : path pathIn = strPath ;
2017-11-14 19:32:41 +01:00
TryCreateDirectories ( pathIn ) ;
2017-12-26 07:41:55 +01:00
if ( ! LockDirectory ( pathIn , " .walletlock " ) ) {
2017-12-26 07:18:39 +01:00
LogPrintf ( " Cannot obtain a lock on wallet directory %s. Another instance of bitcoin may be using it. \n " , strPath ) ;
2017-12-14 23:06:22 +01:00
return false ;
}
2017-03-01 17:05:50 +01:00
fs : : path pathLogDir = pathIn / " database " ;
2017-02-22 10:10:00 +01:00
TryCreateDirectories ( pathLogDir ) ;
2017-03-01 17:05:50 +01:00
fs : : path pathErrorFile = pathIn / " db.log " ;
2017-12-08 12:39:22 +01:00
LogPrintf ( " BerkeleyEnvironment::Open: LogDir=%s ErrorFile=%s \n " , pathLogDir . string ( ) , pathErrorFile . string ( ) ) ;
2012-05-14 03:37:39 +02:00
2012-05-22 23:45:00 +02:00
unsigned int nEnvFlags = 0 ;
2017-08-01 21:17:40 +02:00
if ( gArgs . GetBoolArg ( " -privdb " , DEFAULT_WALLET_PRIVDB ) )
2012-05-22 23:45:00 +02:00
nEnvFlags | = DB_PRIVATE ;
2015-03-03 16:49:12 +01:00
dbenv - > set_lg_dir ( pathLogDir . string ( ) . c_str ( ) ) ;
dbenv - > set_cachesize ( 0 , 0x100000 , 1 ) ; // 1 MiB should be enough for just the wallet
dbenv - > set_lg_bsize ( 0x10000 ) ;
dbenv - > set_lg_max ( 1048576 ) ;
dbenv - > set_lk_max_locks ( 40000 ) ;
dbenv - > set_lk_max_objects ( 40000 ) ;
2017-03-01 17:28:39 +01:00
dbenv - > set_errfile ( fsbridge : : fopen ( pathErrorFile , " a " ) ) ; /// debug
2015-03-03 16:49:12 +01:00
dbenv - > set_flags ( DB_AUTO_COMMIT , 1 ) ;
dbenv - > set_flags ( DB_TXN_WRITE_NOSYNC , 1 ) ;
dbenv - > log_set_config ( DB_LOG_AUTO_REMOVE , 1 ) ;
2015-06-15 07:46:51 +02:00
int ret = dbenv - > open ( strPath . c_str ( ) ,
2014-09-19 19:21:46 +02:00
DB_CREATE |
DB_INIT_LOCK |
DB_INIT_LOG |
DB_INIT_MPOOL |
DB_INIT_TXN |
DB_THREAD |
DB_RECOVER |
nEnvFlags ,
S_IRUSR | S_IWUSR ) ;
2017-08-09 10:24:15 +02:00
if ( ret ! = 0 ) {
2017-12-08 12:39:22 +01:00
LogPrintf ( " BerkeleyEnvironment::Open: Error %d opening database environment: %s \n " , ret , DbEnv : : strerror ( ret ) ) ;
2018-05-09 00:20:12 +02:00
int ret2 = dbenv - > close ( 0 ) ;
if ( ret2 ! = 0 ) {
LogPrintf ( " BerkeleyEnvironment::Open: Error %d closing failed database environment: %s \n " , ret2 , DbEnv : : strerror ( ret2 ) ) ;
}
Reset ( ) ;
2017-12-14 23:06:22 +01:00
if ( retry ) {
// try moving the database env out of the way
fs : : path pathDatabaseBak = pathIn / strprintf ( " database.%d.bak " , GetTime ( ) ) ;
try {
fs : : rename ( pathLogDir , pathDatabaseBak ) ;
LogPrintf ( " Moved old %s to %s. Retrying. \n " , pathLogDir . string ( ) , pathDatabaseBak . string ( ) ) ;
} catch ( const fs : : filesystem_error & ) {
// failure is ok (well, not really, but it's not worse than what we started with)
}
// try opening it again one more time
2017-11-14 03:25:46 +01:00
if ( ! Open ( false /* retry */ ) ) {
2017-12-14 23:06:22 +01:00
// if it still fails, it probably means we can't even create the database env
return false ;
}
} else {
return false ;
}
2017-08-09 10:24:15 +02:00
}
2012-05-14 03:37:39 +02:00
fDbEnvInit = true ;
2012-05-22 21:51:13 +02:00
fMockDb = false ;
2012-05-14 03:37:39 +02:00
return true ;
}
2018-09-14 20:13:16 +02:00
//! Construct an in-memory mock Berkeley environment for testing and as a place-holder for g_dbenvs emplace
2018-05-18 22:28:50 +02:00
BerkeleyEnvironment : : BerkeleyEnvironment ( )
2012-05-22 21:51:13 +02:00
{
2018-05-18 22:28:50 +02:00
Reset ( ) ;
2012-05-22 21:51:13 +02:00
2013-03-09 18:02:57 +01:00
boost : : this_thread : : interruption_point ( ) ;
2012-05-22 21:51:13 +02:00
2017-12-08 12:39:22 +01:00
LogPrint ( BCLog : : DB , " BerkeleyEnvironment::MakeMock \n " ) ;
2012-05-22 21:51:13 +02:00
2015-03-03 16:49:12 +01:00
dbenv - > set_cachesize ( 1 , 0 , 1 ) ;
dbenv - > set_lg_bsize ( 10485760 * 4 ) ;
dbenv - > set_lg_max ( 10485760 ) ;
dbenv - > set_lk_max_locks ( 10000 ) ;
dbenv - > set_lk_max_objects ( 10000 ) ;
dbenv - > set_flags ( DB_AUTO_COMMIT , 1 ) ;
dbenv - > log_set_config ( DB_LOG_IN_MEMORY , 1 ) ;
2017-08-07 07:36:37 +02:00
int ret = dbenv - > open ( nullptr ,
2014-09-19 19:21:46 +02:00
DB_CREATE |
DB_INIT_LOCK |
DB_INIT_LOG |
DB_INIT_MPOOL |
DB_INIT_TXN |
DB_THREAD |
DB_PRIVATE ,
S_IRUSR | S_IWUSR ) ;
2012-05-22 21:51:13 +02:00
if ( ret > 0 )
2017-12-08 12:39:22 +01:00
throw std : : runtime_error ( strprintf ( " BerkeleyEnvironment::MakeMock: Error %d opening database environment. " , ret ) ) ;
2012-05-22 21:51:13 +02:00
fDbEnvInit = true ;
fMockDb = true ;
}
2017-12-08 12:39:22 +01:00
BerkeleyEnvironment : : VerifyResult BerkeleyEnvironment : : Verify ( const std : : string & strFile , recoverFunc_type recoverFunc , std : : string & out_backup_filename )
2012-09-18 20:30:47 +02:00
{
LOCK ( cs_db ) ;
assert ( mapFileUseCount . count ( strFile ) = = 0 ) ;
2017-08-09 16:24:12 +02:00
Db db ( dbenv . get ( ) , 0 ) ;
2017-08-07 07:36:37 +02:00
int result = db . verify ( strFile . c_str ( ) , nullptr , nullptr , 0 ) ;
2012-09-18 20:30:47 +02:00
if ( result = = 0 )
2018-03-09 15:03:40 +01:00
return VerifyResult : : VERIFY_OK ;
2017-08-07 07:36:37 +02:00
else if ( recoverFunc = = nullptr )
2018-03-09 15:03:40 +01:00
return VerifyResult : : RECOVER_FAIL ;
2012-09-18 20:30:47 +02:00
// Try to recover:
2017-11-14 03:25:46 +01:00
bool fRecovered = ( * recoverFunc ) ( fs : : path ( strPath ) / strFile , out_backup_filename ) ;
2018-03-09 15:03:40 +01:00
return ( fRecovered ? VerifyResult : : RECOVER_OK : VerifyResult : : RECOVER_FAIL ) ;
2012-09-18 20:30:47 +02:00
}
2018-11-25 03:49:08 +01:00
BerkeleyBatch : : SafeDbt : : SafeDbt ( )
2018-09-19 08:38:40 +02:00
{
2018-11-25 03:49:08 +01:00
m_dbt . set_flags ( DB_DBT_MALLOC ) ;
2018-09-19 08:38:40 +02:00
}
2018-11-25 03:49:08 +01:00
BerkeleyBatch : : SafeDbt : : SafeDbt ( void * data , size_t size )
2018-09-19 08:38:40 +02:00
: m_dbt ( data , size )
{
}
BerkeleyBatch : : SafeDbt : : ~ SafeDbt ( )
{
if ( m_dbt . get_data ( ) ! = nullptr ) {
// Clear memory, e.g. in case it was a private key
memory_cleanse ( m_dbt . get_data ( ) , m_dbt . get_size ( ) ) ;
// under DB_DBT_MALLOC, data is malloced by the Dbt, but must be
// freed by the caller.
// https://docs.oracle.com/cd/E17275_01/html/api_reference/C/dbt.html
if ( m_dbt . get_flags ( ) & DB_DBT_MALLOC ) {
free ( m_dbt . get_data ( ) ) ;
}
}
}
const void * BerkeleyBatch : : SafeDbt : : get_data ( ) const
{
return m_dbt . get_data ( ) ;
}
u_int32_t BerkeleyBatch : : SafeDbt : : get_size ( ) const
{
return m_dbt . get_size ( ) ;
}
BerkeleyBatch : : SafeDbt : : operator Dbt * ( )
{
return & m_dbt ;
}
2017-12-08 12:39:22 +01:00
bool BerkeleyBatch : : Recover ( const fs : : path & file_path , void * callbackDataIn , bool ( * recoverKVcallback ) ( void * callbackData , CDataStream ssKey , CDataStream ssValue ) , std : : string & newFilename )
2016-08-24 09:57:23 +02:00
{
2017-11-14 03:25:46 +01:00
std : : string filename ;
2018-05-18 22:28:50 +02:00
std : : shared_ptr < BerkeleyEnvironment > env = GetWalletEnv ( file_path , filename ) ;
2017-11-14 03:25:46 +01:00
2016-08-24 09:57:23 +02:00
// Recovery procedure:
2017-06-06 00:02:32 +02:00
// move wallet file to walletfilename.timestamp.bak
2016-08-24 09:57:23 +02:00
// Call Salvage with fAggressive=true to
// get as much data as possible.
// Rewrite salvaged data to fresh wallet file
// Set -rescan so any missing transactions will be
// found.
int64_t now = GetTime ( ) ;
2017-06-06 00:02:32 +02:00
newFilename = strprintf ( " %s.%d.bak " , filename , now ) ;
2016-08-24 09:57:23 +02:00
2017-11-14 03:25:46 +01:00
int result = env - > dbenv - > dbrename ( nullptr , filename . c_str ( ) , nullptr ,
2016-08-24 09:57:23 +02:00
newFilename . c_str ( ) , DB_AUTO_COMMIT ) ;
if ( result = = 0 )
LogPrintf ( " Renamed %s to %s \n " , filename , newFilename ) ;
else
{
LogPrintf ( " Failed to rename %s to %s \n " , filename , newFilename ) ;
return false ;
}
2017-12-08 12:39:22 +01:00
std : : vector < BerkeleyEnvironment : : KeyValPair > salvagedData ;
2017-11-14 03:25:46 +01:00
bool fSuccess = env - > Salvage ( newFilename , true , salvagedData ) ;
2016-08-24 09:57:23 +02:00
if ( salvagedData . empty ( ) )
{
LogPrintf ( " Salvage(aggressive) found no records in %s. \n " , newFilename ) ;
return false ;
}
LogPrintf ( " Salvage(aggressive) found %u records \n " , salvagedData . size ( ) ) ;
2017-11-14 03:25:46 +01:00
std : : unique_ptr < Db > pdbCopy = MakeUnique < Db > ( env - > dbenv . get ( ) , 0 ) ;
2017-08-07 07:36:37 +02:00
int ret = pdbCopy - > open ( nullptr , // Txn pointer
2016-08-24 09:57:23 +02:00
filename . c_str ( ) , // Filename
" main " , // Logical db name
DB_BTREE , // Database type
DB_CREATE , // Flags
0 ) ;
2017-08-09 10:24:15 +02:00
if ( ret > 0 ) {
2016-08-24 09:57:23 +02:00
LogPrintf ( " Cannot create database file %s \n " , filename ) ;
2017-08-09 10:24:15 +02:00
pdbCopy - > close ( 0 ) ;
2016-08-24 09:57:23 +02:00
return false ;
}
2017-11-14 03:25:46 +01:00
DbTxn * ptxn = env - > TxnBegin ( ) ;
2017-12-08 12:39:22 +01:00
for ( BerkeleyEnvironment : : KeyValPair & row : salvagedData )
2016-08-24 09:57:23 +02:00
{
if ( recoverKVcallback )
{
CDataStream ssKey ( row . first , SER_DISK , CLIENT_VERSION ) ;
CDataStream ssValue ( row . second , SER_DISK , CLIENT_VERSION ) ;
if ( ! ( * recoverKVcallback ) ( callbackDataIn , ssKey , ssValue ) )
continue ;
}
Dbt datKey ( & row . first [ 0 ] , row . first . size ( ) ) ;
Dbt datValue ( & row . second [ 0 ] , row . second . size ( ) ) ;
int ret2 = pdbCopy - > put ( ptxn , & datKey , & datValue , DB_NOOVERWRITE ) ;
if ( ret2 > 0 )
fSuccess = false ;
}
ptxn - > commit ( 0 ) ;
pdbCopy - > close ( 0 ) ;
return fSuccess ;
}
2017-12-08 12:39:22 +01:00
bool BerkeleyBatch : : VerifyEnvironment ( const fs : : path & file_path , std : : string & errorStr )
2016-08-24 09:57:23 +02:00
{
2017-11-14 03:25:46 +01:00
std : : string walletFile ;
2018-05-18 22:28:50 +02:00
std : : shared_ptr < BerkeleyEnvironment > env = GetWalletEnv ( file_path , walletFile ) ;
2017-11-14 03:25:46 +01:00
fs : : path walletDir = env - > Directory ( ) ;
2018-07-31 20:02:34 +02:00
LogPrintf ( " Using BerkeleyDB version %s \n " , DbEnv : : version ( nullptr , nullptr , nullptr ) ) ;
2016-08-24 09:57:23 +02:00
LogPrintf ( " Using wallet %s \n " , walletFile ) ;
// Wallet file must be a plain filename without a directory
2017-03-01 17:05:50 +01:00
if ( walletFile ! = fs : : basename ( walletFile ) + fs : : extension ( walletFile ) )
2016-08-24 09:57:23 +02:00
{
2017-10-08 22:48:07 +02:00
errorStr = strprintf ( _ ( " Wallet %s resides outside wallet directory %s " ) , walletFile , walletDir . string ( ) ) ;
2016-08-24 09:57:23 +02:00
return false ;
}
2017-11-14 03:25:46 +01:00
if ( ! env - > Open ( true /* retry */ ) ) {
2017-12-24 00:45:33 +01:00
errorStr = strprintf ( _ ( " Error initializing wallet database environment %s! " ) , walletDir ) ;
2017-12-14 23:06:22 +01:00
return false ;
2016-08-24 09:57:23 +02:00
}
2017-12-14 23:06:22 +01:00
2016-08-24 09:57:23 +02:00
return true ;
}
2017-12-08 12:39:22 +01:00
bool BerkeleyBatch : : VerifyDatabaseFile ( const fs : : path & file_path , std : : string & warningStr , std : : string & errorStr , BerkeleyEnvironment : : recoverFunc_type recoverFunc )
2016-08-24 09:57:23 +02:00
{
2017-11-14 03:25:46 +01:00
std : : string walletFile ;
2018-05-18 22:28:50 +02:00
std : : shared_ptr < BerkeleyEnvironment > env = GetWalletEnv ( file_path , walletFile ) ;
2017-11-14 03:25:46 +01:00
fs : : path walletDir = env - > Directory ( ) ;
2017-10-08 22:48:07 +02:00
if ( fs : : exists ( walletDir / walletFile ) )
2016-08-24 09:57:23 +02:00
{
2017-06-06 00:01:48 +02:00
std : : string backup_filename ;
2017-12-08 12:39:22 +01:00
BerkeleyEnvironment : : VerifyResult r = env - > Verify ( walletFile , recoverFunc , backup_filename ) ;
if ( r = = BerkeleyEnvironment : : VerifyResult : : RECOVER_OK )
2016-08-24 09:57:23 +02:00
{
warningStr = strprintf ( _ ( " Warning: Wallet file corrupt, data salvaged! "
" Original %s saved as %s in %s; if "
" your balance or transactions are incorrect you should "
" restore from a backup. " ) ,
2017-10-08 22:48:07 +02:00
walletFile , backup_filename , walletDir ) ;
2016-08-24 09:57:23 +02:00
}
2017-12-08 12:39:22 +01:00
if ( r = = BerkeleyEnvironment : : VerifyResult : : RECOVER_FAIL )
2016-08-24 09:57:23 +02:00
{
errorStr = strprintf ( _ ( " %s corrupt, salvage failed " ) , walletFile ) ;
return false ;
}
}
// also return true if files does not exists
return true ;
}
2016-02-15 16:09:13 +01:00
/* End of headers, beginning of key/value data */
static const char * HEADER_END = " HEADER=END " ;
/* End of key/value data */
static const char * DATA_END = " DATA=END " ;
2017-12-08 12:39:22 +01:00
bool BerkeleyEnvironment : : Salvage ( const std : : string & strFile , bool fAggressive , std : : vector < BerkeleyEnvironment : : KeyValPair > & vResult )
2012-09-18 20:30:47 +02:00
{
LOCK ( cs_db ) ;
assert ( mapFileUseCount . count ( strFile ) = = 0 ) ;
u_int32_t flags = DB_SALVAGE ;
2014-09-19 19:21:46 +02:00
if ( fAggressive )
flags | = DB_AGGRESSIVE ;
2012-09-18 20:30:47 +02:00
2017-01-27 02:33:45 +01:00
std : : stringstream strDump ;
2012-09-18 20:30:47 +02:00
2017-08-09 16:24:12 +02:00
Db db ( dbenv . get ( ) , 0 ) ;
2017-08-07 07:36:37 +02:00
int result = db . verify ( strFile . c_str ( ) , nullptr , & strDump , flags ) ;
2014-09-19 19:21:46 +02:00
if ( result = = DB_VERIFY_BAD ) {
2017-12-08 12:39:22 +01:00
LogPrintf ( " BerkeleyEnvironment::Salvage: Database salvage found errors, all data may not be recoverable. \n " ) ;
2014-09-19 19:21:46 +02:00
if ( ! fAggressive ) {
2017-12-08 12:39:22 +01:00
LogPrintf ( " BerkeleyEnvironment::Salvage: Rerun with aggressive mode to ignore errors and continue. \n " ) ;
2013-03-24 20:59:03 +01:00
return false ;
}
}
2014-09-19 19:21:46 +02:00
if ( result ! = 0 & & result ! = DB_VERIFY_BAD ) {
2017-12-08 12:39:22 +01:00
LogPrintf ( " BerkeleyEnvironment::Salvage: Database salvage failed with result %d. \n " , result ) ;
2012-09-18 20:30:47 +02:00
return false ;
}
// Format of bdb dump is ascii lines:
// header lines...
// HEADER=END
2016-02-15 15:50:28 +01:00
// hexadecimal key
// hexadecimal value
// ... repeated
2012-09-18 20:30:47 +02:00
// DATA=END
2017-01-27 02:33:45 +01:00
std : : string strLine ;
2016-02-15 16:09:13 +01:00
while ( ! strDump . eof ( ) & & strLine ! = HEADER_END )
2012-09-18 20:30:47 +02:00
getline ( strDump , strLine ) ; // Skip past header
std : : string keyHex , valueHex ;
2016-02-15 16:09:13 +01:00
while ( ! strDump . eof ( ) & & keyHex ! = DATA_END ) {
2012-09-18 20:30:47 +02:00
getline ( strDump , keyHex ) ;
2016-02-15 16:09:13 +01:00
if ( keyHex ! = DATA_END ) {
if ( strDump . eof ( ) )
break ;
2012-09-18 20:30:47 +02:00
getline ( strDump , valueHex ) ;
2016-02-15 16:09:13 +01:00
if ( valueHex = = DATA_END ) {
2017-12-08 12:39:22 +01:00
LogPrintf ( " BerkeleyEnvironment::Salvage: WARNING: Number of keys in data does not match number of values. \n " ) ;
2016-02-15 16:09:13 +01:00
break ;
}
2014-09-19 19:21:46 +02:00
vResult . push_back ( make_pair ( ParseHex ( keyHex ) , ParseHex ( valueHex ) ) ) ;
2012-09-18 20:30:47 +02:00
}
}
2016-02-15 16:09:13 +01:00
if ( keyHex ! = DATA_END ) {
2017-12-08 12:39:22 +01:00
LogPrintf ( " BerkeleyEnvironment::Salvage: WARNING: Unexpected end of file while reading salvage output. \n " ) ;
2016-02-15 16:09:13 +01:00
return false ;
}
2012-09-18 20:30:47 +02:00
return ( result = = 0 ) ;
}
2017-12-08 12:39:22 +01:00
void BerkeleyEnvironment : : CheckpointLSN ( const std : : string & strFile )
2012-05-14 03:37:39 +02:00
{
2015-03-03 16:49:12 +01:00
dbenv - > txn_checkpoint ( 0 , 0 , 0 ) ;
2012-05-22 21:51:13 +02:00
if ( fMockDb )
return ;
2015-03-03 16:49:12 +01:00
dbenv - > lsn_reset ( strFile . c_str ( ) , 0 ) ;
2012-05-14 03:37:39 +02:00
}
2010-08-29 18:58:15 +02:00
2017-12-08 12:39:22 +01:00
BerkeleyBatch : : BerkeleyBatch ( BerkeleyDatabase & database , const char * pszMode , bool fFlushOnCloseIn ) : pdb ( nullptr ) , activeTxn ( nullptr )
2010-08-29 18:58:15 +02:00
{
2014-08-28 15:28:57 +02:00
fReadOnly = ( ! strchr ( pszMode , ' + ' ) & & ! strchr ( pszMode , ' w ' ) ) ;
2014-08-31 05:55:27 +02:00
fFlushOnClose = fFlushOnCloseIn ;
2018-05-18 22:28:50 +02:00
env = database . env . get ( ) ;
2017-12-08 12:39:22 +01:00
if ( database . IsDummy ( ) ) {
2017-03-08 13:08:26 +01:00
return ;
}
2017-12-08 12:39:22 +01:00
const std : : string & strFilename = database . strFile ;
2010-08-29 18:58:15 +02:00
2017-08-07 07:36:37 +02:00
bool fCreate = strchr ( pszMode , ' c ' ) ! = nullptr ;
2010-08-29 18:58:15 +02:00
unsigned int nFlags = DB_THREAD ;
if ( fCreate )
nFlags | = DB_CREATE ;
{
2017-11-14 03:25:46 +01:00
LOCK ( cs_db ) ;
if ( ! env - > Open ( false /* retry */ ) )
2017-12-08 12:39:22 +01:00
throw std : : runtime_error ( " BerkeleyBatch: Failed to open database environment. " ) ;
2010-08-29 18:58:15 +02:00
2018-10-24 22:08:54 +02:00
pdb = database . m_db . get ( ) ;
2017-08-07 07:36:37 +02:00
if ( pdb = = nullptr ) {
2017-06-04 22:45:22 +02:00
int ret ;
2017-10-18 23:21:06 +02:00
std : : unique_ptr < Db > pdb_temp = MakeUnique < Db > ( env - > dbenv . get ( ) , 0 ) ;
2010-08-29 18:58:15 +02:00
2017-03-08 14:34:47 +01:00
bool fMockDb = env - > IsMock ( ) ;
2014-09-19 19:21:46 +02:00
if ( fMockDb ) {
2017-10-13 00:14:46 +02:00
DbMpoolFile * mpf = pdb_temp - > get_mpf ( ) ;
2012-05-22 21:51:13 +02:00
ret = mpf - > set_flags ( DB_MPOOL_NOFILE , 1 ) ;
2017-10-13 00:14:46 +02:00
if ( ret ! = 0 ) {
2017-12-08 12:39:22 +01:00
throw std : : runtime_error ( strprintf ( " BerkeleyBatch: Failed to configure for no temp file backing for database %s " , strFilename ) ) ;
2017-10-13 00:14:46 +02:00
}
2012-05-22 21:51:13 +02:00
}
2017-10-13 00:14:46 +02:00
ret = pdb_temp - > open ( nullptr , // Txn pointer
fMockDb ? nullptr : strFilename . c_str ( ) , // Filename
fMockDb ? strFilename . c_str ( ) : " main " , // Logical db name
DB_BTREE , // Database type
nFlags , // Flags
2010-08-29 18:58:15 +02:00
0 ) ;
2014-09-19 19:21:46 +02:00
if ( ret ! = 0 ) {
2017-12-08 12:39:22 +01:00
throw std : : runtime_error ( strprintf ( " BerkeleyBatch: Error %d, can't open database %s " , ret , strFilename ) ) ;
2010-08-29 18:58:15 +02:00
}
2017-11-14 03:25:46 +01:00
// Call CheckUniqueFileid on the containing BDB environment to
// avoid BDB data consistency bugs that happen when different data
// files in the same environment have the same fileid.
//
// Also call CheckUniqueFileid on all the other g_dbenvs to prevent
// bitcoin from opening the same data file through another
// environment when the file is referenced through equivalent but
// not obviously identical symlinked or hard linked or bind mounted
// paths. In the future a more relaxed check for equal inode and
// device ids could be done instead, which would allow opening
// different backup copies of a wallet at the same time. Maybe even
// more ideally, an exclusive lock for accessing the database could
// be implemented, so no equality checks are needed at all. (Newer
// versions of BDB have an set_lk_exclusive method for this
// purpose, but the older version we use does not.)
2018-06-18 07:58:28 +02:00
for ( const auto & env : g_dbenvs ) {
2018-05-18 22:28:50 +02:00
CheckUniqueFileid ( * env . second . lock ( ) . get ( ) , strFilename , * pdb_temp , this - > env - > m_fileids [ strFilename ] ) ;
2017-11-14 03:25:46 +01:00
}
2010-08-29 18:58:15 +02:00
2017-10-13 00:14:46 +02:00
pdb = pdb_temp . release ( ) ;
2018-10-24 22:08:54 +02:00
database . m_db . reset ( pdb ) ;
2017-10-13 00:14:46 +02:00
2017-01-27 02:33:45 +01:00
if ( fCreate & & ! Exists ( std : : string ( " version " ) ) ) {
2010-08-29 18:58:15 +02:00
bool fTmp = fReadOnly ;
fReadOnly = false ;
2011-12-16 22:26:14 +01:00
WriteVersion ( CLIENT_VERSION ) ;
2010-08-29 18:58:15 +02:00
fReadOnly = fTmp ;
}
}
2017-10-13 00:14:46 +02:00
+ + env - > mapFileUseCount [ strFilename ] ;
strFile = strFilename ;
2010-08-29 18:58:15 +02:00
}
}
2017-12-08 12:39:22 +01:00
void BerkeleyBatch : : Flush ( )
2010-08-29 18:58:15 +02:00
{
2012-05-14 18:39:29 +02:00
if ( activeTxn )
2012-07-06 16:33:34 +02:00
return ;
2010-08-29 18:58:15 +02:00
// Flush database activity from memory pool to disk log
unsigned int nMinutes = 0 ;
2010-12-05 10:29:30 +01:00
if ( fReadOnly )
nMinutes = 1 ;
2012-03-28 22:09:18 +02:00
2017-08-01 21:17:40 +02:00
env - > dbenv - > txn_checkpoint ( nMinutes ? gArgs . GetArg ( " -dblogsize " , DEFAULT_WALLET_DBLOGSIZE ) * 1024 : 0 , nMinutes , 0 ) ;
2012-07-06 16:33:34 +02:00
}
2017-12-08 12:39:22 +01:00
void BerkeleyDatabase : : IncrementUpdateCounter ( )
2017-03-09 21:56:58 +01:00
{
+ + nUpdateCounter ;
}
2017-12-08 12:39:22 +01:00
void BerkeleyBatch : : Close ( )
2012-07-06 16:33:34 +02:00
{
if ( ! pdb )
return ;
if ( activeTxn )
activeTxn - > abort ( ) ;
2017-08-07 07:36:37 +02:00
activeTxn = nullptr ;
pdb = nullptr ;
2012-07-06 16:33:34 +02:00
2014-08-31 05:55:27 +02:00
if ( fFlushOnClose )
Flush ( ) ;
2010-08-29 18:58:15 +02:00
2012-04-06 18:39:12 +02:00
{
2017-11-14 03:25:46 +01:00
LOCK ( cs_db ) ;
2017-03-08 14:34:47 +01:00
- - env - > mapFileUseCount [ strFile ] ;
2012-04-06 18:39:12 +02:00
}
2018-02-20 21:28:42 +01:00
env - > m_db_in_use . notify_all ( ) ;
2010-08-29 18:58:15 +02:00
}
2017-12-08 12:39:22 +01:00
void BerkeleyEnvironment : : CloseDb ( const std : : string & strFile )
2010-08-29 18:58:15 +02:00
{
{
2012-04-06 18:39:12 +02:00
LOCK ( cs_db ) ;
2018-10-24 22:08:54 +02:00
auto it = m_databases . find ( strFile ) ;
assert ( it ! = m_databases . end ( ) ) ;
BerkeleyDatabase & database = it - > second . get ( ) ;
if ( database . m_db ) {
2010-08-29 18:58:15 +02:00
// Close the database handle
2018-10-24 22:08:54 +02:00
database . m_db - > close ( 0 ) ;
database . m_db . reset ( ) ;
2010-08-29 18:58:15 +02:00
}
}
}
2018-02-20 21:28:42 +01:00
void BerkeleyEnvironment : : ReloadDbEnv ( )
{
// Make sure that no Db's are in use
AssertLockNotHeld ( cs_db ) ;
std : : unique_lock < CCriticalSection > lock ( cs_db ) ;
m_db_in_use . wait ( lock , [ this ] ( ) {
for ( auto & count : mapFileUseCount ) {
if ( count . second > 0 ) return false ;
}
return true ;
} ) ;
std : : vector < std : : string > filenames ;
2018-10-24 22:08:54 +02:00
for ( auto it : m_databases ) {
2018-02-20 21:28:42 +01:00
filenames . push_back ( it . first ) ;
}
// Close the individual Db's
for ( const std : : string & filename : filenames ) {
CloseDb ( filename ) ;
}
// Reset the environment
Flush ( true ) ; // This will flush and close the environment
Reset ( ) ;
Open ( true ) ;
}
2017-12-08 12:39:22 +01:00
bool BerkeleyBatch : : Rewrite ( BerkeleyDatabase & database , const char * pszSkip )
2011-11-10 21:29:23 +01:00
{
2017-12-08 12:39:22 +01:00
if ( database . IsDummy ( ) ) {
2017-03-08 11:48:58 +01:00
return true ;
}
2018-05-18 22:28:50 +02:00
BerkeleyEnvironment * env = database . env . get ( ) ;
2017-12-08 12:39:22 +01:00
const std : : string & strFile = database . strFile ;
2014-09-19 19:21:46 +02:00
while ( true ) {
2011-11-10 21:29:23 +01:00
{
2017-11-14 03:25:46 +01:00
LOCK ( cs_db ) ;
2017-03-08 14:34:47 +01:00
if ( ! env - > mapFileUseCount . count ( strFile ) | | env - > mapFileUseCount [ strFile ] = = 0 ) {
2011-11-10 21:29:23 +01:00
// Flush log data to the dat file
2017-03-08 14:34:47 +01:00
env - > CloseDb ( strFile ) ;
env - > CheckpointLSN ( strFile ) ;
env - > mapFileUseCount . erase ( strFile ) ;
2011-11-10 21:29:23 +01:00
bool fSuccess = true ;
2017-12-08 12:39:22 +01:00
LogPrintf ( " BerkeleyBatch::Rewrite: Rewriting %s... \n " , strFile ) ;
2017-01-27 02:33:45 +01:00
std : : string strFileRes = strFile + " .rewrite " ;
2011-11-20 17:12:00 +01:00
{ // surround usage of db with extra {}
2017-12-08 12:39:22 +01:00
BerkeleyBatch db ( database , " r " ) ;
2017-08-15 07:46:56 +02:00
std : : unique_ptr < Db > pdbCopy = MakeUnique < Db > ( env - > dbenv . get ( ) , 0 ) ;
2012-09-18 21:07:58 +02:00
2017-08-07 07:36:37 +02:00
int ret = pdbCopy - > open ( nullptr , // Txn pointer
2014-09-19 19:21:46 +02:00
strFileRes . c_str ( ) , // Filename
" main " , // Logical db name
DB_BTREE , // Database type
DB_CREATE , // Flags
2011-11-20 17:12:00 +01:00
0 ) ;
2014-09-19 19:21:46 +02:00
if ( ret > 0 ) {
2017-12-08 12:39:22 +01:00
LogPrintf ( " BerkeleyBatch::Rewrite: Can't create database file %s \n " , strFileRes ) ;
2011-11-20 17:12:00 +01:00
fSuccess = false ;
}
2012-09-18 21:07:58 +02:00
2011-11-20 17:12:00 +01:00
Dbc * pcursor = db . GetCursor ( ) ;
if ( pcursor )
2014-09-19 19:21:46 +02:00
while ( fSuccess ) {
2012-04-16 14:56:45 +02:00
CDataStream ssKey ( SER_DISK , CLIENT_VERSION ) ;
CDataStream ssValue ( SER_DISK , CLIENT_VERSION ) ;
2016-06-11 16:35:19 +02:00
int ret1 = db . ReadAtCursor ( pcursor , ssKey , ssValue ) ;
if ( ret1 = = DB_NOTFOUND ) {
2011-11-20 17:12:00 +01:00
pcursor - > close ( ) ;
break ;
2016-06-11 16:35:19 +02:00
} else if ( ret1 ! = 0 ) {
2011-11-20 17:12:00 +01:00
pcursor - > close ( ) ;
fSuccess = false ;
break ;
}
if ( pszSkip & &
2016-12-15 17:34:59 +01:00
strncmp ( ssKey . data ( ) , pszSkip , std : : min ( ssKey . size ( ) , strlen ( pszSkip ) ) ) = = 0 )
2011-11-20 17:12:00 +01:00
continue ;
2016-12-15 17:34:59 +01:00
if ( strncmp ( ssKey . data ( ) , " \x07 version " , 8 ) = = 0 ) {
2011-11-20 17:12:00 +01:00
// Update version:
ssValue . clear ( ) ;
2011-12-16 22:26:14 +01:00
ssValue < < CLIENT_VERSION ;
2011-11-20 17:12:00 +01:00
}
2016-12-15 17:34:59 +01:00
Dbt datKey ( ssKey . data ( ) , ssKey . size ( ) ) ;
Dbt datValue ( ssValue . data ( ) , ssValue . size ( ) ) ;
2017-08-07 07:36:37 +02:00
int ret2 = pdbCopy - > put ( nullptr , & datKey , & datValue , DB_NOOVERWRITE ) ;
2011-11-20 17:12:00 +01:00
if ( ret2 > 0 )
fSuccess = false ;
2011-11-11 03:12:46 +01:00
}
2014-09-19 19:21:46 +02:00
if ( fSuccess ) {
2011-11-20 17:12:00 +01:00
db . Close ( ) ;
2017-03-08 14:34:47 +01:00
env - > CloseDb ( strFile ) ;
2011-11-20 17:12:00 +01:00
if ( pdbCopy - > close ( 0 ) )
2011-11-10 21:29:23 +01:00
fSuccess = false ;
2017-08-09 10:24:15 +02:00
} else {
pdbCopy - > close ( 0 ) ;
2011-11-10 21:29:23 +01:00
}
}
2014-09-19 19:21:46 +02:00
if ( fSuccess ) {
2017-08-09 16:24:12 +02:00
Db dbA ( env - > dbenv . get ( ) , 0 ) ;
2017-08-07 07:36:37 +02:00
if ( dbA . remove ( strFile . c_str ( ) , nullptr , 0 ) )
2011-11-10 21:29:23 +01:00
fSuccess = false ;
2017-08-09 16:24:12 +02:00
Db dbB ( env - > dbenv . get ( ) , 0 ) ;
2017-08-07 07:36:37 +02:00
if ( dbB . rename ( strFileRes . c_str ( ) , nullptr , strFile . c_str ( ) , 0 ) )
2011-11-10 21:29:23 +01:00
fSuccess = false ;
}
if ( ! fSuccess )
2017-12-08 12:39:22 +01:00
LogPrintf ( " BerkeleyBatch::Rewrite: Failed to rewrite database file %s \n " , strFileRes ) ;
2011-11-10 21:29:23 +01:00
return fSuccess ;
}
}
2013-03-07 20:25:21 +01:00
MilliSleep ( 100 ) ;
2011-11-10 21:29:23 +01:00
}
}
2017-12-08 12:39:22 +01:00
void BerkeleyEnvironment : : Flush ( bool fShutdown )
2010-08-29 18:58:15 +02:00
{
2013-04-13 07:13:08 +02:00
int64_t nStart = GetTimeMillis ( ) ;
2014-01-22 14:41:24 +01:00
// Flush log data to the actual data file on all files that are not in use
2018-11-19 16:56:36 +01:00
LogPrint ( BCLog : : DB , " BerkeleyEnvironment::Flush: [%s] Flush(%s)%s \n " , strPath , fShutdown ? " true " : " false " , fDbEnvInit ? " " : " database not started " ) ;
2010-08-29 18:58:15 +02:00
if ( ! fDbEnvInit )
return ;
{
2012-04-06 18:39:12 +02:00
LOCK ( cs_db ) ;
2017-01-27 02:33:45 +01:00
std : : map < std : : string , int > : : iterator mi = mapFileUseCount . begin ( ) ;
2014-09-19 19:21:46 +02:00
while ( mi ! = mapFileUseCount . end ( ) ) {
2017-01-27 02:33:45 +01:00
std : : string strFile = ( * mi ) . first ;
2010-08-29 18:58:15 +02:00
int nRefCount = ( * mi ) . second ;
2017-12-08 12:39:22 +01:00
LogPrint ( BCLog : : DB , " BerkeleyEnvironment::Flush: Flushing %s (refcount = %d)... \n " , strFile , nRefCount ) ;
2014-09-19 19:21:46 +02:00
if ( nRefCount = = 0 ) {
2010-08-29 18:58:15 +02:00
// Move log data to the dat file
CloseDb ( strFile ) ;
2017-12-08 12:39:22 +01:00
LogPrint ( BCLog : : DB , " BerkeleyEnvironment::Flush: %s checkpoint \n " , strFile ) ;
2015-03-03 16:49:12 +01:00
dbenv - > txn_checkpoint ( 0 , 0 , 0 ) ;
2017-12-08 12:39:22 +01:00
LogPrint ( BCLog : : DB , " BerkeleyEnvironment::Flush: %s detach \n " , strFile ) ;
2012-11-04 12:48:45 +01:00
if ( ! fMockDb )
2015-03-03 16:49:12 +01:00
dbenv - > lsn_reset ( strFile . c_str ( ) , 0 ) ;
2017-12-08 12:39:22 +01:00
LogPrint ( BCLog : : DB , " BerkeleyEnvironment::Flush: %s closed \n " , strFile ) ;
2010-08-29 18:58:15 +02:00
mapFileUseCount . erase ( mi + + ) ;
2014-09-19 19:21:46 +02:00
} else
2010-08-29 18:58:15 +02:00
mi + + ;
}
2017-12-08 12:39:22 +01:00
LogPrint ( BCLog : : DB , " BerkeleyEnvironment::Flush: Flush(%s)%s took %15dms \n " , fShutdown ? " true " : " false " , fDbEnvInit ? " " : " database not started " , GetTimeMillis ( ) - nStart ) ;
2014-09-19 19:21:46 +02:00
if ( fShutdown ) {
2010-08-29 18:58:15 +02:00
char * * listp ;
2014-09-19 19:21:46 +02:00
if ( mapFileUseCount . empty ( ) ) {
2015-03-03 16:49:12 +01:00
dbenv - > log_archive ( & listp , DB_ARCH_REMOVE ) ;
2012-05-14 03:37:39 +02:00
Close ( ) ;
2018-06-05 00:15:03 +02:00
if ( ! fMockDb ) {
2017-03-01 17:05:50 +01:00
fs : : remove_all ( fs : : path ( strPath ) / " database " ) ;
2018-06-05 00:15:03 +02:00
}
2011-11-11 03:12:46 +01:00
}
2010-08-29 18:58:15 +02:00
}
}
}
2016-08-24 09:57:23 +02:00
2017-12-08 12:39:22 +01:00
bool BerkeleyBatch : : PeriodicFlush ( BerkeleyDatabase & database )
2016-08-24 09:57:23 +02:00
{
2017-12-08 12:39:22 +01:00
if ( database . IsDummy ( ) ) {
2017-03-08 13:08:26 +01:00
return true ;
}
2016-08-24 09:57:23 +02:00
bool ret = false ;
2018-05-18 22:28:50 +02:00
BerkeleyEnvironment * env = database . env . get ( ) ;
2017-12-08 12:39:22 +01:00
const std : : string & strFile = database . strFile ;
2017-11-14 03:25:46 +01:00
TRY_LOCK ( cs_db , lockDb ) ;
2016-08-24 09:57:23 +02:00
if ( lockDb )
{
// Don't do this if any databases are in use
int nRefCount = 0 ;
2017-03-08 14:34:47 +01:00
std : : map < std : : string , int > : : iterator mit = env - > mapFileUseCount . begin ( ) ;
while ( mit ! = env - > mapFileUseCount . end ( ) )
2016-08-24 09:57:23 +02:00
{
2017-03-18 07:19:16 +01:00
nRefCount + = ( * mit ) . second ;
mit + + ;
2016-08-24 09:57:23 +02:00
}
if ( nRefCount = = 0 )
{
boost : : this_thread : : interruption_point ( ) ;
2017-03-08 14:34:47 +01:00
std : : map < std : : string , int > : : iterator mi = env - > mapFileUseCount . find ( strFile ) ;
if ( mi ! = env - > mapFileUseCount . end ( ) )
2016-08-24 09:57:23 +02:00
{
2016-12-25 21:19:40 +01:00
LogPrint ( BCLog : : DB , " Flushing %s \n " , strFile ) ;
2016-08-24 09:57:23 +02:00
int64_t nStart = GetTimeMillis ( ) ;
// Flush wallet file so it's self contained
2017-03-08 14:34:47 +01:00
env - > CloseDb ( strFile ) ;
env - > CheckpointLSN ( strFile ) ;
2016-08-24 09:57:23 +02:00
2017-03-08 14:34:47 +01:00
env - > mapFileUseCount . erase ( mi + + ) ;
2016-12-25 21:19:40 +01:00
LogPrint ( BCLog : : DB , " Flushed %s %dms \n " , strFile , GetTimeMillis ( ) - nStart ) ;
2016-08-24 09:57:23 +02:00
ret = true ;
}
}
}
return ret ;
}
2017-03-08 11:48:58 +01:00
2017-12-08 12:39:22 +01:00
bool BerkeleyDatabase : : Rewrite ( const char * pszSkip )
2017-03-08 11:48:58 +01:00
{
2017-12-08 12:39:22 +01:00
return BerkeleyBatch : : Rewrite ( * this , pszSkip ) ;
2017-03-08 11:48:58 +01:00
}
2017-12-08 12:39:22 +01:00
bool BerkeleyDatabase : : Backup ( const std : : string & strDest )
2017-03-08 11:48:58 +01:00
{
2017-03-08 13:08:26 +01:00
if ( IsDummy ( ) ) {
2017-03-08 11:48:58 +01:00
return false ;
}
while ( true )
{
{
2017-11-14 03:25:46 +01:00
LOCK ( cs_db ) ;
2017-03-08 14:34:47 +01:00
if ( ! env - > mapFileUseCount . count ( strFile ) | | env - > mapFileUseCount [ strFile ] = = 0 )
2017-03-08 11:48:58 +01:00
{
// Flush log data to the dat file
2017-03-08 14:34:47 +01:00
env - > CloseDb ( strFile ) ;
env - > CheckpointLSN ( strFile ) ;
env - > mapFileUseCount . erase ( strFile ) ;
2017-03-08 11:48:58 +01:00
// Copy wallet file
2018-07-15 11:17:50 +02:00
fs : : path pathSrc = env - > Directory ( ) / strFile ;
2017-03-08 11:48:58 +01:00
fs : : path pathDest ( strDest ) ;
if ( fs : : is_directory ( pathDest ) )
pathDest / = strFile ;
try {
2017-09-21 00:10:46 +02:00
if ( fs : : equivalent ( pathSrc , pathDest ) ) {
LogPrintf ( " cannot backup to wallet source file %s \n " , pathDest . string ( ) ) ;
return false ;
}
2017-03-08 11:48:58 +01:00
fs : : copy_file ( pathSrc , pathDest , fs : : copy_option : : overwrite_if_exists ) ;
LogPrintf ( " copied %s to %s \n " , strFile , pathDest . string ( ) ) ;
return true ;
} catch ( const fs : : filesystem_error & e ) {
2018-09-10 20:08:56 +02:00
LogPrintf ( " error copying %s to %s - %s \n " , strFile , pathDest . string ( ) , fsbridge : : get_filesystem_error_message ( e ) ) ;
2017-03-08 11:48:58 +01:00
return false ;
}
}
}
MilliSleep ( 100 ) ;
}
}
2017-03-08 14:34:47 +01:00
2017-12-08 12:39:22 +01:00
void BerkeleyDatabase : : Flush ( bool shutdown )
2017-03-08 14:34:47 +01:00
{
if ( ! IsDummy ( ) ) {
env - > Flush ( shutdown ) ;
2018-07-14 04:15:30 +02:00
if ( shutdown ) {
LOCK ( cs_db ) ;
g_dbenvs . erase ( env - > Directory ( ) . string ( ) ) ;
env = nullptr ;
2018-09-25 15:56:16 +02:00
} else {
// TODO: To avoid g_dbenvs.erase erasing the environment prematurely after the
// first database shutdown when multiple databases are open in the same
// environment, should replace raw database `env` pointers with shared or weak
// pointers, or else separate the database and environment shutdowns so
// environments can be shut down after databases.
env - > m_fileids . erase ( strFile ) ;
2018-07-14 04:15:30 +02:00
}
2017-03-08 14:34:47 +01:00
}
}
2018-02-20 21:28:42 +01:00
void BerkeleyDatabase : : ReloadDbEnv ( )
{
if ( ! IsDummy ( ) ) {
env - > ReloadDbEnv ( ) ;
2017-03-08 14:34:47 +01:00
}
}