Merge #9424: Change LogAcceptCategory to use uint32_t rather than sets of strings.
6b3bb3d
Change LogAcceptCategory to use uint32_t rather than sets of strings. (Gregory Maxwell)
Tree-SHA512: ebb5bcf9a7d00a32dd1390b727ff4d29330a038423611da01268d8e1d2c0229e52a1098e751d4e6db73ef4ae862e1e96d38249883fcaf12b68f55ebb01035b34
This commit is contained in:
commit
1a5aaabb8a
30 changed files with 376 additions and 272 deletions
|
@ -233,7 +233,7 @@ void CAddrMan::Good_(const CService& addr, int64_t nTime)
|
|||
if (nUBucket == -1)
|
||||
return;
|
||||
|
||||
LogPrint("addrman", "Moving %s to tried\n", addr.ToString());
|
||||
LogPrint(BCLog::ADDRMAN, "Moving %s to tried\n", addr.ToString());
|
||||
|
||||
// move nId to the tried tables
|
||||
MakeTried(info, nId);
|
||||
|
|
|
@ -442,7 +442,7 @@ public:
|
|||
}
|
||||
}
|
||||
if (nLost + nLostUnk > 0) {
|
||||
LogPrint("addrman", "addrman lost %i new and %i tried addresses due to collisions\n", nLostUnk, nLost);
|
||||
LogPrint(BCLog::ADDRMAN, "addrman lost %i new and %i tried addresses due to collisions\n", nLostUnk, nLost);
|
||||
}
|
||||
|
||||
Check();
|
||||
|
@ -507,8 +507,9 @@ public:
|
|||
Check();
|
||||
fRet |= Add_(addr, source, nTimePenalty);
|
||||
Check();
|
||||
if (fRet)
|
||||
LogPrint("addrman", "Added %s from %s: %i tried, %i new\n", addr.ToStringIPPort(), source.ToString(), nTried, nNew);
|
||||
if (fRet) {
|
||||
LogPrint(BCLog::ADDRMAN, "Added %s from %s: %i tried, %i new\n", addr.ToStringIPPort(), source.ToString(), nTried, nNew);
|
||||
}
|
||||
return fRet;
|
||||
}
|
||||
|
||||
|
@ -521,8 +522,9 @@ public:
|
|||
for (std::vector<CAddress>::const_iterator it = vAddr.begin(); it != vAddr.end(); it++)
|
||||
nAdd += Add_(*it, source, nTimePenalty) ? 1 : 0;
|
||||
Check();
|
||||
if (nAdd)
|
||||
LogPrint("addrman", "Added %i addresses from %s: %i tried, %i new\n", nAdd, source.ToString(), nTried, nNew);
|
||||
if (nAdd) {
|
||||
LogPrint(BCLog::ADDRMAN, "Added %i addresses from %s: %i tried, %i new\n", nAdd, source.ToString(), nTried, nNew);
|
||||
}
|
||||
return nAdd > 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -164,7 +164,7 @@ ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& c
|
|||
break;
|
||||
}
|
||||
|
||||
LogPrint("cmpctblock", "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock, SER_NETWORK, PROTOCOL_VERSION));
|
||||
LogPrint(BCLog::CMPCTBLOCK, "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock, SER_NETWORK, PROTOCOL_VERSION));
|
||||
|
||||
return READ_STATUS_OK;
|
||||
}
|
||||
|
@ -209,10 +209,11 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<
|
|||
return READ_STATUS_CHECKBLOCK_FAILED;
|
||||
}
|
||||
|
||||
LogPrint("cmpctblock", "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size());
|
||||
LogPrint(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size());
|
||||
if (vtx_missing.size() < 5) {
|
||||
for (const auto& tx : vtx_missing)
|
||||
LogPrint("cmpctblock", "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
|
||||
for (const auto& tx : vtx_missing) {
|
||||
LogPrint(BCLog::CMPCTBLOCK, "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
|
||||
}
|
||||
}
|
||||
|
||||
return READ_STATUS_OK;
|
||||
|
|
|
@ -21,8 +21,9 @@ public:
|
|||
// This code is adapted from posix_logger.h, which is why it is using vsprintf.
|
||||
// Please do not do this in normal code
|
||||
virtual void Logv(const char * format, va_list ap) override {
|
||||
if (!LogAcceptCategory("leveldb"))
|
||||
if (!LogAcceptCategory(BCLog::LEVELDB)) {
|
||||
return;
|
||||
}
|
||||
char buffer[500];
|
||||
for (int iter = 0; iter < 2; iter++) {
|
||||
char* base;
|
||||
|
|
|
@ -233,7 +233,7 @@ static bool InitRPCAuthentication()
|
|||
|
||||
bool StartHTTPRPC()
|
||||
{
|
||||
LogPrint("rpc", "Starting HTTP RPC server\n");
|
||||
LogPrint(BCLog::RPC, "Starting HTTP RPC server\n");
|
||||
if (!InitRPCAuthentication())
|
||||
return false;
|
||||
|
||||
|
@ -247,12 +247,12 @@ bool StartHTTPRPC()
|
|||
|
||||
void InterruptHTTPRPC()
|
||||
{
|
||||
LogPrint("rpc", "Interrupting HTTP RPC server\n");
|
||||
LogPrint(BCLog::RPC, "Interrupting HTTP RPC server\n");
|
||||
}
|
||||
|
||||
void StopHTTPRPC()
|
||||
{
|
||||
LogPrint("rpc", "Stopping HTTP RPC server\n");
|
||||
LogPrint(BCLog::RPC, "Stopping HTTP RPC server\n");
|
||||
UnregisterHTTPHandler("/", true);
|
||||
if (httpRPCTimerInterface) {
|
||||
RPCUnsetTimerInterface(httpRPCTimerInterface);
|
||||
|
|
|
@ -213,7 +213,7 @@ static bool InitHTTPAllowList()
|
|||
std::string strAllowed;
|
||||
for (const CSubNet& subnet : rpc_allow_subnets)
|
||||
strAllowed += subnet.ToString() + " ";
|
||||
LogPrint("http", "Allowing HTTP connections from: %s\n", strAllowed);
|
||||
LogPrint(BCLog::HTTP, "Allowing HTTP connections from: %s\n", strAllowed);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -243,7 +243,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
|
|||
{
|
||||
std::unique_ptr<HTTPRequest> hreq(new HTTPRequest(req));
|
||||
|
||||
LogPrint("http", "Received a %s request for %s from %s\n",
|
||||
LogPrint(BCLog::HTTP, "Received a %s request for %s from %s\n",
|
||||
RequestMethodString(hreq->GetRequestMethod()), hreq->GetURI(), hreq->GetPeer().ToString());
|
||||
|
||||
// Early address-based allow check
|
||||
|
@ -293,7 +293,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
|
|||
/** Callback to reject HTTP requests after shutdown. */
|
||||
static void http_reject_request_cb(struct evhttp_request* req, void*)
|
||||
{
|
||||
LogPrint("http", "Rejecting request while shutting down\n");
|
||||
LogPrint(BCLog::HTTP, "Rejecting request while shutting down\n");
|
||||
evhttp_send_error(req, HTTP_SERVUNAVAIL, NULL);
|
||||
}
|
||||
|
||||
|
@ -301,10 +301,10 @@ static void http_reject_request_cb(struct evhttp_request* req, void*)
|
|||
static bool ThreadHTTP(struct event_base* base, struct evhttp* http)
|
||||
{
|
||||
RenameThread("bitcoin-http");
|
||||
LogPrint("http", "Entering http event loop\n");
|
||||
LogPrint(BCLog::HTTP, "Entering http event loop\n");
|
||||
event_base_dispatch(base);
|
||||
// Event loop will be interrupted by InterruptHTTPServer()
|
||||
LogPrint("http", "Exited http event loop\n");
|
||||
LogPrint(BCLog::HTTP, "Exited http event loop\n");
|
||||
return event_base_got_break(base) == 0;
|
||||
}
|
||||
|
||||
|
@ -336,7 +336,7 @@ static bool HTTPBindAddresses(struct evhttp* http)
|
|||
|
||||
// Bind addresses
|
||||
for (std::vector<std::pair<std::string, uint16_t> >::iterator i = endpoints.begin(); i != endpoints.end(); ++i) {
|
||||
LogPrint("http", "Binding RPC on address %s port %i\n", i->first, i->second);
|
||||
LogPrint(BCLog::HTTP, "Binding RPC on address %s port %i\n", i->first, i->second);
|
||||
evhttp_bound_socket *bind_handle = evhttp_bind_socket_with_handle(http, i->first.empty() ? NULL : i->first.c_str(), i->second);
|
||||
if (bind_handle) {
|
||||
boundSockets.push_back(bind_handle);
|
||||
|
@ -364,7 +364,7 @@ static void libevent_log_cb(int severity, const char *msg)
|
|||
if (severity >= EVENT_LOG_WARN) // Log warn messages and higher without debug category
|
||||
LogPrintf("libevent: %s\n", msg);
|
||||
else
|
||||
LogPrint("libevent", "libevent: %s\n", msg);
|
||||
LogPrint(BCLog::LIBEVENT, "libevent: %s\n", msg);
|
||||
}
|
||||
|
||||
bool InitHTTPServer()
|
||||
|
@ -387,10 +387,11 @@ bool InitHTTPServer()
|
|||
#if LIBEVENT_VERSION_NUMBER >= 0x02010100
|
||||
// If -debug=libevent, set full libevent debugging.
|
||||
// Otherwise, disable all libevent debugging.
|
||||
if (LogAcceptCategory("libevent"))
|
||||
if (LogAcceptCategory(BCLog::LIBEVENT)) {
|
||||
event_enable_debug_logging(EVENT_DBG_ALL);
|
||||
else
|
||||
} else {
|
||||
event_enable_debug_logging(EVENT_DBG_NONE);
|
||||
}
|
||||
#endif
|
||||
#ifdef WIN32
|
||||
evthread_use_windows_threads();
|
||||
|
@ -424,7 +425,7 @@ bool InitHTTPServer()
|
|||
return false;
|
||||
}
|
||||
|
||||
LogPrint("http", "Initialized HTTP server\n");
|
||||
LogPrint(BCLog::HTTP, "Initialized HTTP server\n");
|
||||
int workQueueDepth = std::max((long)GetArg("-rpcworkqueue", DEFAULT_HTTP_WORKQUEUE), 1L);
|
||||
LogPrintf("HTTP: creating work queue of depth %d\n", workQueueDepth);
|
||||
|
||||
|
@ -439,7 +440,7 @@ std::future<bool> threadResult;
|
|||
|
||||
bool StartHTTPServer()
|
||||
{
|
||||
LogPrint("http", "Starting HTTP server\n");
|
||||
LogPrint(BCLog::HTTP, "Starting HTTP server\n");
|
||||
int rpcThreads = std::max((long)GetArg("-rpcthreads", DEFAULT_HTTP_THREADS), 1L);
|
||||
LogPrintf("HTTP: starting %d worker threads\n", rpcThreads);
|
||||
std::packaged_task<bool(event_base*, evhttp*)> task(ThreadHTTP);
|
||||
|
@ -455,7 +456,7 @@ bool StartHTTPServer()
|
|||
|
||||
void InterruptHTTPServer()
|
||||
{
|
||||
LogPrint("http", "Interrupting HTTP server\n");
|
||||
LogPrint(BCLog::HTTP, "Interrupting HTTP server\n");
|
||||
if (eventHTTP) {
|
||||
// Unlisten sockets
|
||||
for (evhttp_bound_socket *socket : boundSockets) {
|
||||
|
@ -470,15 +471,15 @@ void InterruptHTTPServer()
|
|||
|
||||
void StopHTTPServer()
|
||||
{
|
||||
LogPrint("http", "Stopping HTTP server\n");
|
||||
LogPrint(BCLog::HTTP, "Stopping HTTP server\n");
|
||||
if (workQueue) {
|
||||
LogPrint("http", "Waiting for HTTP worker threads to exit\n");
|
||||
LogPrint(BCLog::HTTP, "Waiting for HTTP worker threads to exit\n");
|
||||
workQueue->WaitExit();
|
||||
delete workQueue;
|
||||
workQueue = nullptr;
|
||||
}
|
||||
if (eventBase) {
|
||||
LogPrint("http", "Waiting for HTTP event thread to exit\n");
|
||||
LogPrint(BCLog::HTTP, "Waiting for HTTP event thread to exit\n");
|
||||
// Give event loop a few seconds to exit (to send back last RPC responses), then break it
|
||||
// Before this was solved with event_base_loopexit, but that didn't work as expected in
|
||||
// at least libevent 2.0.21 and always introduced a delay. In libevent
|
||||
|
@ -499,7 +500,7 @@ void StopHTTPServer()
|
|||
event_base_free(eventBase);
|
||||
eventBase = 0;
|
||||
}
|
||||
LogPrint("http", "Stopped HTTP server\n");
|
||||
LogPrint(BCLog::HTTP, "Stopped HTTP server\n");
|
||||
}
|
||||
|
||||
struct event_base* EventBase()
|
||||
|
@ -646,7 +647,7 @@ HTTPRequest::RequestMethod HTTPRequest::GetRequestMethod()
|
|||
|
||||
void RegisterHTTPHandler(const std::string &prefix, bool exactMatch, const HTTPRequestHandler &handler)
|
||||
{
|
||||
LogPrint("http", "Registering HTTP handler for %s (exactmatch %d)\n", prefix, exactMatch);
|
||||
LogPrint(BCLog::HTTP, "Registering HTTP handler for %s (exactmatch %d)\n", prefix, exactMatch);
|
||||
pathHandlers.push_back(HTTPPathHandler(prefix, exactMatch, handler));
|
||||
}
|
||||
|
||||
|
@ -659,7 +660,7 @@ void UnregisterHTTPHandler(const std::string &prefix, bool exactMatch)
|
|||
break;
|
||||
if (i != iend)
|
||||
{
|
||||
LogPrint("http", "Unregistering HTTP handler for %s (exactmatch %d)\n", prefix, exactMatch);
|
||||
LogPrint(BCLog::HTTP, "Unregistering HTTP handler for %s (exactmatch %d)\n", prefix, exactMatch);
|
||||
pathHandlers.erase(i);
|
||||
}
|
||||
}
|
||||
|
|
28
src/init.cpp
28
src/init.cpp
|
@ -312,7 +312,7 @@ void OnRPCStopped()
|
|||
uiInterface.NotifyBlockTip.disconnect(&RPCNotifyBlockChange);
|
||||
RPCNotifyBlockChange(false, nullptr);
|
||||
cvBlockChange.notify_all();
|
||||
LogPrint("rpc", "RPC stopped.\n");
|
||||
LogPrint(BCLog::RPC, "RPC stopped.\n");
|
||||
}
|
||||
|
||||
void OnRPCPreCommand(const CRPCCommand& cmd)
|
||||
|
@ -441,11 +441,8 @@ std::string HelpMessage(HelpMessageMode mode)
|
|||
strUsage += HelpMessageOpt("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT));
|
||||
strUsage += HelpMessageOpt("-bip9params=deployment:start:end", "Use given start/end times for specified BIP9 deployment (regtest-only)");
|
||||
}
|
||||
std::string debugCategories = "addrman, alert, bench, cmpctblock, coindb, db, http, leveldb, libevent, lock, mempool, mempoolrej, net, proxy, prune, rand, reindex, rpc, selectcoins, tor, zmq"; // Don't translate these and qt below
|
||||
if (mode == HMM_BITCOIN_QT)
|
||||
debugCategories += ", qt";
|
||||
strUsage += HelpMessageOpt("-debug=<category>", strprintf(_("Output debugging information (default: %u, supplying <category> is optional)"), 0) + ". " +
|
||||
_("If <category> is not supplied or if <category> = 1, output all debugging information.") + _("<category> can be:") + " " + debugCategories + ".");
|
||||
_("If <category> is not supplied or if <category> = 1, output all debugging information.") + " " + _("<category> can be:") + " " + ListLogCategories() + ".");
|
||||
if (showDebug)
|
||||
strUsage += HelpMessageOpt("-nodebug", "Turn off debugging messages, same as -debug=0");
|
||||
strUsage += HelpMessageOpt("-help-debug", _("Show all debugging options (usage: --help -help-debug)"));
|
||||
|
@ -909,12 +906,19 @@ bool AppInitParameterInteraction()
|
|||
|
||||
// ********************************************************* Step 3: parameter-to-internal-flags
|
||||
|
||||
fDebug = mapMultiArgs.count("-debug");
|
||||
// Special-case: if -debug=0/-nodebug is set, turn off debugging messages
|
||||
if (fDebug) {
|
||||
if (mapMultiArgs.count("-debug") > 0) {
|
||||
// Special-case: if -debug=0/-nodebug is set, turn off debugging messages
|
||||
const std::vector<std::string>& categories = mapMultiArgs.at("-debug");
|
||||
if (GetBoolArg("-nodebug", false) || find(categories.begin(), categories.end(), std::string("0")) != categories.end())
|
||||
fDebug = false;
|
||||
|
||||
if (!(GetBoolArg("-nodebug", false) || find(categories.begin(), categories.end(), std::string("0")) != categories.end())) {
|
||||
for (const auto& cat : categories) {
|
||||
uint32_t flag;
|
||||
if (!GetLogCategory(&flag, &cat)) {
|
||||
InitWarning(strprintf(_("Unsupported logging category %s.\n"), cat));
|
||||
}
|
||||
logCategories |= flag;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for -debugnet
|
||||
|
@ -1168,7 +1172,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
|
|||
#ifndef WIN32
|
||||
CreatePidFile(GetPidFile(), getpid());
|
||||
#endif
|
||||
if (GetBoolArg("-shrinkdebugfile", !fDebug)) {
|
||||
if (GetBoolArg("-shrinkdebugfile", logCategories != BCLog::NONE)) {
|
||||
// Do this first since it both loads a bunch of debug.log into memory,
|
||||
// and because this needs to happen before any other debug.log printing
|
||||
ShrinkDebugFile();
|
||||
|
@ -1492,7 +1496,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
|
|||
break;
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
if (fDebug) LogPrintf("%s\n", e.what());
|
||||
LogPrintf("%s\n", e.what());
|
||||
strLoadError = _("Error opening block database");
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -204,7 +204,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
|
|||
}
|
||||
int64_t nTime2 = GetTimeMicros();
|
||||
|
||||
LogPrint("bench", "CreateNewBlock() packages: %.2fms (%d packages, %d updated descendants), validity: %.2fms (total %.2fms)\n", 0.001 * (nTime1 - nTimeStart), nPackagesSelected, nDescendantsUpdated, 0.001 * (nTime2 - nTime1), 0.001 * (nTime2 - nTimeStart));
|
||||
LogPrint(BCLog::BENCH, "CreateNewBlock() packages: %.2fms (%d packages, %d updated descendants), validity: %.2fms (total %.2fms)\n", 0.001 * (nTime1 - nTimeStart), nPackagesSelected, nDescendantsUpdated, 0.001 * (nTime2 - nTime1), 0.001 * (nTime2 - nTimeStart));
|
||||
|
||||
return std::move(pblocktemplate);
|
||||
}
|
||||
|
|
46
src/net.cpp
46
src/net.cpp
|
@ -190,7 +190,7 @@ void AdvertiseLocal(CNode *pnode)
|
|||
}
|
||||
if (addrLocal.IsRoutable())
|
||||
{
|
||||
LogPrint("net", "AdvertiseLocal: advertising address %s\n", addrLocal.ToString());
|
||||
LogPrint(BCLog::NET, "AdvertiseLocal: advertising address %s\n", addrLocal.ToString());
|
||||
FastRandomContext insecure_rand;
|
||||
pnode->PushAddress(addrLocal, insecure_rand);
|
||||
}
|
||||
|
@ -356,7 +356,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
|
|||
}
|
||||
|
||||
/// debug print
|
||||
LogPrint("net", "trying connection %s lastseen=%.1fhrs\n",
|
||||
LogPrint(BCLog::NET, "trying connection %s lastseen=%.1fhrs\n",
|
||||
pszDest ? pszDest : addrConnect.ToString(),
|
||||
pszDest ? 0.0 : (double)(GetAdjustedTime() - addrConnect.nTime)/3600.0);
|
||||
|
||||
|
@ -423,7 +423,7 @@ void CConnman::DumpBanlist()
|
|||
if (!bandb.Write(banmap))
|
||||
SetBannedSetDirty(true);
|
||||
|
||||
LogPrint("net", "Flushed %d banned node ips/subnets to banlist.dat %dms\n",
|
||||
LogPrint(BCLog::NET, "Flushed %d banned node ips/subnets to banlist.dat %dms\n",
|
||||
banmap.size(), GetTimeMillis() - nStart);
|
||||
}
|
||||
|
||||
|
@ -433,7 +433,7 @@ void CNode::CloseSocketDisconnect()
|
|||
LOCK(cs_hSocket);
|
||||
if (hSocket != INVALID_SOCKET)
|
||||
{
|
||||
LogPrint("net", "disconnecting peer=%d\n", id);
|
||||
LogPrint(BCLog::NET, "disconnecting peer=%d\n", id);
|
||||
CloseSocket(hSocket);
|
||||
}
|
||||
}
|
||||
|
@ -565,7 +565,7 @@ void CConnman::SweepBanned()
|
|||
{
|
||||
setBanned.erase(it++);
|
||||
setBannedIsDirty = true;
|
||||
LogPrint("net", "%s: Removed banned node ip/subnet from banlist.dat: %s\n", __func__, subNet.ToString());
|
||||
LogPrint(BCLog::NET, "%s: Removed banned node ip/subnet from banlist.dat: %s\n", __func__, subNet.ToString());
|
||||
}
|
||||
else
|
||||
++it;
|
||||
|
@ -711,7 +711,7 @@ bool CNode::ReceiveMsgBytes(const char *pch, unsigned int nBytes, bool& complete
|
|||
return false;
|
||||
|
||||
if (msg.in_data && msg.hdr.nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH) {
|
||||
LogPrint("net", "Oversized message from peer=%i, disconnecting\n", GetId());
|
||||
LogPrint(BCLog::NET, "Oversized message from peer=%i, disconnecting\n", GetId());
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1087,7 +1087,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
|
|||
{
|
||||
if (!AttemptToEvictConnection()) {
|
||||
// No connection to evict, disconnect the new connection
|
||||
LogPrint("net", "failed to find an eviction candidate - connection dropped (full)\n");
|
||||
LogPrint(BCLog::NET, "failed to find an eviction candidate - connection dropped (full)\n");
|
||||
CloseSocket(hSocket);
|
||||
return;
|
||||
}
|
||||
|
@ -1101,7 +1101,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
|
|||
pnode->fWhitelisted = whitelisted;
|
||||
GetNodeSignals().InitializeNode(pnode, *this);
|
||||
|
||||
LogPrint("net", "connection from %s accepted\n", addr.ToString());
|
||||
LogPrint(BCLog::NET, "connection from %s accepted\n", addr.ToString());
|
||||
|
||||
{
|
||||
LOCK(cs_vNodes);
|
||||
|
@ -1336,8 +1336,9 @@ void CConnman::ThreadSocketHandler()
|
|||
else if (nBytes == 0)
|
||||
{
|
||||
// socket closed gracefully
|
||||
if (!pnode->fDisconnect)
|
||||
LogPrint("net", "socket closed\n");
|
||||
if (!pnode->fDisconnect) {
|
||||
LogPrint(BCLog::NET, "socket closed\n");
|
||||
}
|
||||
pnode->CloseSocketDisconnect();
|
||||
}
|
||||
else if (nBytes < 0)
|
||||
|
@ -1375,7 +1376,7 @@ void CConnman::ThreadSocketHandler()
|
|||
{
|
||||
if (pnode->nLastRecv == 0 || pnode->nLastSend == 0)
|
||||
{
|
||||
LogPrint("net", "socket no message in first 60 seconds, %d %d from %d\n", pnode->nLastRecv != 0, pnode->nLastSend != 0, pnode->id);
|
||||
LogPrint(BCLog::NET, "socket no message in first 60 seconds, %d %d from %d\n", pnode->nLastRecv != 0, pnode->nLastSend != 0, pnode->id);
|
||||
pnode->fDisconnect = true;
|
||||
}
|
||||
else if (nTime - pnode->nLastSend > TIMEOUT_INTERVAL)
|
||||
|
@ -1634,7 +1635,7 @@ void CConnman::DumpAddresses()
|
|||
CAddrDB adb;
|
||||
adb.Write(addrman);
|
||||
|
||||
LogPrint("net", "Flushed %d addresses to peers.dat %dms\n",
|
||||
LogPrint(BCLog::NET, "Flushed %d addresses to peers.dat %dms\n",
|
||||
addrman.size(), GetTimeMillis() - nStart);
|
||||
}
|
||||
|
||||
|
@ -1807,7 +1808,7 @@ void CConnman::ThreadOpenConnections()
|
|||
int randsleep = GetRandInt(FEELER_SLEEP_WINDOW * 1000);
|
||||
if (!interruptNet.sleep_for(std::chrono::milliseconds(randsleep)))
|
||||
return;
|
||||
LogPrint("net", "Making feeler connection to %s\n", addrConnect.ToString());
|
||||
LogPrint(BCLog::NET, "Making feeler connection to %s\n", addrConnect.ToString());
|
||||
}
|
||||
|
||||
OpenNetworkConnection(addrConnect, (int)setConnected.size() >= std::min(nMaxConnections - 1, 2), &grant, NULL, false, fFeeler);
|
||||
|
@ -2150,9 +2151,7 @@ void Discover(boost::thread_group& threadGroup)
|
|||
|
||||
void CConnman::SetNetworkActive(bool active)
|
||||
{
|
||||
if (fDebug) {
|
||||
LogPrint("net", "SetNetworkActive: %s\n", active);
|
||||
}
|
||||
LogPrint(BCLog::NET, "SetNetworkActive: %s\n", active);
|
||||
|
||||
if (!active) {
|
||||
fNetworkActive = false;
|
||||
|
@ -2241,7 +2240,7 @@ bool CConnman::Start(CScheduler& scheduler, std::string& strNodeError, Options c
|
|||
SetBannedSetDirty(false); // no need to write down, just read data
|
||||
SweepBanned(); // sweep out unused entries
|
||||
|
||||
LogPrint("net", "Loaded %d banned node ips/subnets from banlist.dat %dms\n",
|
||||
LogPrint(BCLog::NET, "Loaded %d banned node ips/subnets from banlist.dat %dms\n",
|
||||
banmap.size(), GetTimeMillis() - nStart);
|
||||
} else {
|
||||
LogPrintf("Invalid or missing banlist.dat; recreating\n");
|
||||
|
@ -2683,10 +2682,11 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn
|
|||
mapRecvBytesPerMsgCmd[msg] = 0;
|
||||
mapRecvBytesPerMsgCmd[NET_MESSAGE_COMMAND_OTHER] = 0;
|
||||
|
||||
if (fLogIPs)
|
||||
LogPrint("net", "Added connection to %s peer=%d\n", addrName, id);
|
||||
else
|
||||
LogPrint("net", "Added connection peer=%d\n", id);
|
||||
if (fLogIPs) {
|
||||
LogPrint(BCLog::NET, "Added connection to %s peer=%d\n", addrName, id);
|
||||
} else {
|
||||
LogPrint(BCLog::NET, "Added connection peer=%d\n", id);
|
||||
}
|
||||
}
|
||||
|
||||
CNode::~CNode()
|
||||
|
@ -2713,7 +2713,7 @@ void CNode::AskFor(const CInv& inv)
|
|||
nRequestTime = it->second;
|
||||
else
|
||||
nRequestTime = 0;
|
||||
LogPrint("net", "askfor %s %d (%s) peer=%d\n", inv.ToString(), nRequestTime, DateTimeStrFormat("%H:%M:%S", nRequestTime/1000000), id);
|
||||
LogPrint(BCLog::NET, "askfor %s %d (%s) peer=%d\n", inv.ToString(), nRequestTime, DateTimeStrFormat("%H:%M:%S", nRequestTime/1000000), id);
|
||||
|
||||
// Make sure not to reuse time indexes to keep things in the same order
|
||||
int64_t nNow = GetTimeMicros() - 1000000;
|
||||
|
@ -2740,7 +2740,7 @@ void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg)
|
|||
{
|
||||
size_t nMessageSize = msg.data.size();
|
||||
size_t nTotalSize = nMessageSize + CMessageHeader::HEADER_SIZE;
|
||||
LogPrint("net", "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.command.c_str()), nMessageSize, pnode->id);
|
||||
LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.command.c_str()), nMessageSize, pnode->id);
|
||||
|
||||
std::vector<unsigned char> serializedHeader;
|
||||
serializedHeader.reserve(CMessageHeader::HEADER_SIZE);
|
||||
|
|
|
@ -256,10 +256,11 @@ void PushNodeVersion(CNode *pnode, CConnman& connman, int64_t nTime)
|
|||
connman.PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
|
||||
nonce, strSubVersion, nNodeStartingHeight, ::fRelayTxes));
|
||||
|
||||
if (fLogIPs)
|
||||
LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
|
||||
else
|
||||
LogPrint("net", "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
|
||||
if (fLogIPs) {
|
||||
LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
|
||||
} else {
|
||||
LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
|
||||
}
|
||||
}
|
||||
|
||||
void InitializeNode(CNode *pnode, CConnman& connman) {
|
||||
|
@ -619,7 +620,7 @@ bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRE
|
|||
unsigned int sz = GetTransactionWeight(*tx);
|
||||
if (sz >= MAX_STANDARD_TX_WEIGHT)
|
||||
{
|
||||
LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
|
||||
LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -631,7 +632,7 @@ bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRE
|
|||
|
||||
AddToCompactExtraTransactions(tx);
|
||||
|
||||
LogPrint("mempool", "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
|
||||
LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
|
||||
mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
|
||||
return true;
|
||||
}
|
||||
|
@ -666,7 +667,7 @@ void EraseOrphansFor(NodeId peer)
|
|||
nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
|
||||
}
|
||||
}
|
||||
if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer=%d\n", nErased, peer);
|
||||
if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer);
|
||||
}
|
||||
|
||||
|
||||
|
@ -691,7 +692,7 @@ unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRE
|
|||
}
|
||||
// Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
|
||||
nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
|
||||
if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx due to expiration\n", nErased);
|
||||
if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased);
|
||||
}
|
||||
while (mapOrphanTransactions.size() > nMaxOrphans)
|
||||
{
|
||||
|
@ -767,7 +768,7 @@ void PeerLogicValidation::SyncTransaction(const CTransaction& tx, const CBlockIn
|
|||
BOOST_FOREACH(uint256 &orphanHash, vOrphanErase) {
|
||||
nErased += EraseOrphanTx(orphanHash);
|
||||
}
|
||||
LogPrint("mempool", "Erased %d orphan tx included or conflicted by block\n", nErased);
|
||||
LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -808,7 +809,7 @@ void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std:
|
|||
if (state.fPreferHeaderAndIDs && (!fWitnessEnabled || state.fWantsCmpctWitness) &&
|
||||
!PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
|
||||
|
||||
LogPrint("net", "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock",
|
||||
LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock",
|
||||
hashBlock.ToString(), pnode->id);
|
||||
connman->PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
|
||||
state.pindexBestHeaderSent = pindex;
|
||||
|
@ -1024,7 +1025,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
|
|||
static const int nOneWeek = 7 * 24 * 60 * 60; // assume > 1 week = historical
|
||||
if (send && connman.OutboundTargetReached(true) && ( ((pindexBestHeader != NULL) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
|
||||
{
|
||||
LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
|
||||
LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
|
||||
|
||||
//disconnect node
|
||||
pfrom->fDisconnect = true;
|
||||
|
@ -1168,7 +1169,7 @@ inline void static SendBlockTransactions(const CBlock& block, const BlockTransac
|
|||
|
||||
bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman& connman, const std::atomic<bool>& interruptMsgProc)
|
||||
{
|
||||
LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id);
|
||||
LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id);
|
||||
if (IsArgSet("-dropmessagestest") && GetRand(GetArg("-dropmessagestest", 0)) == 0)
|
||||
{
|
||||
LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
|
||||
|
@ -1192,7 +1193,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
|
||||
if (strCommand == NetMsgType::REJECT)
|
||||
{
|
||||
if (fDebug) {
|
||||
if (LogAcceptCategory(BCLog::NET)) {
|
||||
try {
|
||||
std::string strMsg; unsigned char ccode; std::string strReason;
|
||||
vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
|
||||
|
@ -1206,10 +1207,10 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
vRecv >> hash;
|
||||
ss << ": hash " << hash.ToString();
|
||||
}
|
||||
LogPrint("net", "Reject %s\n", SanitizeString(ss.str()));
|
||||
LogPrint(BCLog::NET, "Reject %s\n", SanitizeString(ss.str()));
|
||||
} catch (const std::ios_base::failure&) {
|
||||
// Avoid feedback loops by preventing reject messages from triggering a new reject message.
|
||||
LogPrint("net", "Unparseable reject message received\n");
|
||||
LogPrint(BCLog::NET, "Unparseable reject message received\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1247,7 +1248,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
}
|
||||
if (pfrom->nServicesExpected & ~nServices)
|
||||
{
|
||||
LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, nServices, pfrom->nServicesExpected);
|
||||
LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, nServices, pfrom->nServicesExpected);
|
||||
connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
|
||||
strprintf("Expected to offer services %08x", pfrom->nServicesExpected)));
|
||||
pfrom->fDisconnect = true;
|
||||
|
@ -1335,11 +1336,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
FastRandomContext insecure_rand;
|
||||
if (addr.IsRoutable())
|
||||
{
|
||||
LogPrint("net", "ProcessMessages: advertising address %s\n", addr.ToString());
|
||||
LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
|
||||
pfrom->PushAddress(addr, insecure_rand);
|
||||
} else if (IsPeerAddrLocalGood(pfrom)) {
|
||||
addr.SetIP(addrMe);
|
||||
LogPrint("net", "ProcessMessages: advertising address %s\n", addr.ToString());
|
||||
LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
|
||||
pfrom->PushAddress(addr, insecure_rand);
|
||||
}
|
||||
}
|
||||
|
@ -1541,7 +1542,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
return true;
|
||||
|
||||
bool fAlreadyHave = AlreadyHave(inv);
|
||||
LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
|
||||
LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
|
||||
|
||||
if (inv.type == MSG_TX) {
|
||||
inv.type |= nFetchFlags;
|
||||
|
@ -1556,16 +1557,17 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
// we now only provide a getheaders response here. When we receive the headers, we will
|
||||
// then ask for the blocks we need.
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash));
|
||||
LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
|
||||
LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
pfrom->AddInventoryKnown(inv);
|
||||
if (fBlocksOnly)
|
||||
LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id);
|
||||
else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload())
|
||||
if (fBlocksOnly) {
|
||||
LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id);
|
||||
} else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload()) {
|
||||
pfrom->AskFor(inv);
|
||||
}
|
||||
}
|
||||
|
||||
// Track requests for our stuff
|
||||
|
@ -1588,11 +1590,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
return error("message getdata size() = %u", vInv.size());
|
||||
}
|
||||
|
||||
if (fDebug || (vInv.size() != 1))
|
||||
LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id);
|
||||
LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id);
|
||||
|
||||
if ((fDebug && vInv.size() > 0) || (vInv.size() == 1))
|
||||
LogPrint("net", "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id);
|
||||
if (vInv.size() > 0) {
|
||||
LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id);
|
||||
}
|
||||
|
||||
pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
|
||||
ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
|
||||
|
@ -1631,12 +1633,12 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
if (pindex)
|
||||
pindex = chainActive.Next(pindex);
|
||||
int nLimit = 500;
|
||||
LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id);
|
||||
LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id);
|
||||
for (; pindex; pindex = chainActive.Next(pindex))
|
||||
{
|
||||
if (pindex->GetBlockHash() == hashStop)
|
||||
{
|
||||
LogPrint("net", " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
|
||||
LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
|
||||
break;
|
||||
}
|
||||
// If pruning, don't inv blocks unless we have on disk and are likely to still have
|
||||
|
@ -1644,7 +1646,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing;
|
||||
if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave))
|
||||
{
|
||||
LogPrint("net", " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
|
||||
LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
|
||||
break;
|
||||
}
|
||||
pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
|
||||
|
@ -1652,7 +1654,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
{
|
||||
// When this block is requested, we'll send an inv that'll
|
||||
// trigger the peer to getblocks the next batch of inventory.
|
||||
LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
|
||||
LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
|
||||
pfrom->hashContinue = pindex->GetBlockHash();
|
||||
break;
|
||||
}
|
||||
|
@ -1693,7 +1695,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
// might maliciously send lots of getblocktxn requests to trigger
|
||||
// expensive disk reads, because it will require the peer to
|
||||
// actually receive all the data read from disk over the network.
|
||||
LogPrint("net", "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->id, MAX_BLOCKTXN_DEPTH);
|
||||
LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->id, MAX_BLOCKTXN_DEPTH);
|
||||
CInv inv;
|
||||
inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
|
||||
inv.hash = req.blockhash;
|
||||
|
@ -1718,7 +1720,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
|
||||
LOCK(cs_main);
|
||||
if (IsInitialBlockDownload() && !pfrom->fWhitelisted) {
|
||||
LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->id);
|
||||
LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->id);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1743,7 +1745,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
|
||||
std::vector<CBlock> vHeaders;
|
||||
int nLimit = MAX_HEADERS_RESULTS;
|
||||
LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->id);
|
||||
LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->id);
|
||||
for (; pindex; pindex = chainActive.Next(pindex))
|
||||
{
|
||||
vHeaders.push_back(pindex->GetBlockHeader());
|
||||
|
@ -1773,7 +1775,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
// We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
|
||||
if (!fRelayTxes && (!pfrom->fWhitelisted || !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)))
|
||||
{
|
||||
LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom->id);
|
||||
LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom->id);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1805,7 +1807,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
|
||||
pfrom->nLastTXTime = GetTime();
|
||||
|
||||
LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
|
||||
LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
|
||||
pfrom->id,
|
||||
tx.GetHash().ToString(),
|
||||
mempool.size(), mempool.DynamicMemoryUsage() / 1000);
|
||||
|
@ -1835,7 +1837,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
if (setMisbehaving.count(fromPeer))
|
||||
continue;
|
||||
if (AcceptToMemoryPool(mempool, stateDummy, porphanTx, true, &fMissingInputs2, &lRemovedTxn)) {
|
||||
LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString());
|
||||
LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
|
||||
RelayTransaction(orphanTx, connman);
|
||||
for (unsigned int i = 0; i < orphanTx.vout.size(); i++) {
|
||||
vWorkQueue.emplace_back(orphanHash, i);
|
||||
|
@ -1850,11 +1852,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
// Punish peer that gave us an invalid orphan tx
|
||||
Misbehaving(fromPeer, nDos);
|
||||
setMisbehaving.insert(fromPeer);
|
||||
LogPrint("mempool", " invalid orphan tx %s\n", orphanHash.ToString());
|
||||
LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n", orphanHash.ToString());
|
||||
}
|
||||
// Has inputs but not accepted to mempool
|
||||
// Probably non-standard or insufficient fee
|
||||
LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString());
|
||||
LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
|
||||
vEraseQueue.push_back(orphanHash);
|
||||
if (!orphanTx.HasWitness() && !stateDummy.CorruptionPossible()) {
|
||||
// Do not use rejection cache for witness transactions or
|
||||
|
@ -1892,10 +1894,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded
|
||||
unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
|
||||
unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
|
||||
if (nEvicted > 0)
|
||||
LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted);
|
||||
if (nEvicted > 0) {
|
||||
LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted);
|
||||
}
|
||||
} else {
|
||||
LogPrint("mempool", "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
|
||||
LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
|
||||
// We will continue to reject this tx since it has rejected
|
||||
// parents so avoid re-requesting it from other peers.
|
||||
recentRejects->insert(tx.GetHash());
|
||||
|
@ -1939,7 +1942,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
int nDoS = 0;
|
||||
if (state.IsInvalid(nDoS))
|
||||
{
|
||||
LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
|
||||
LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
|
||||
pfrom->id,
|
||||
FormatStateMessage(state));
|
||||
if (state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
|
||||
|
@ -2046,7 +2049,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
(*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool));
|
||||
else {
|
||||
// The block was already in flight using compact blocks from the same peer
|
||||
LogPrint("net", "Peer sent us compact block we were already syncing!\n");
|
||||
LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -2161,7 +2164,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
|
||||
if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
|
||||
it->second.first != pfrom->GetId()) {
|
||||
LogPrint("net", "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->id);
|
||||
LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->id);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2254,7 +2257,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
|
||||
nodestate->nUnconnectingHeaders++;
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
|
||||
LogPrint("net", "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
|
||||
LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
|
||||
headers[0].GetHash().ToString(),
|
||||
headers[0].hashPrevBlock.ToString(),
|
||||
pindexBestHeader->nHeight,
|
||||
|
@ -2296,7 +2299,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
LOCK(cs_main);
|
||||
CNodeState *nodestate = State(pfrom->GetId());
|
||||
if (nodestate->nUnconnectingHeaders > 0) {
|
||||
LogPrint("net", "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->id, nodestate->nUnconnectingHeaders);
|
||||
LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->id, nodestate->nUnconnectingHeaders);
|
||||
}
|
||||
nodestate->nUnconnectingHeaders = 0;
|
||||
|
||||
|
@ -2307,7 +2310,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
// Headers message had its maximum size; the peer may have more headers.
|
||||
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
|
||||
// from there instead.
|
||||
LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
|
||||
LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256()));
|
||||
}
|
||||
|
||||
|
@ -2332,7 +2335,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
// the main chain -- this shouldn't really happen. Bail out on the
|
||||
// direct fetch and rely on parallel download instead.
|
||||
if (!chainActive.Contains(pindexWalk)) {
|
||||
LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n",
|
||||
LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
|
||||
pindexLast->GetBlockHash().ToString(),
|
||||
pindexLast->nHeight);
|
||||
} else {
|
||||
|
@ -2346,11 +2349,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
uint32_t nFetchFlags = GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus());
|
||||
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
|
||||
MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex);
|
||||
LogPrint("net", "Requesting block %s from peer=%d\n",
|
||||
LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
|
||||
pindex->GetBlockHash().ToString(), pfrom->id);
|
||||
}
|
||||
if (vGetData.size() > 1) {
|
||||
LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n",
|
||||
LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
|
||||
pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
|
||||
}
|
||||
if (vGetData.size() > 0) {
|
||||
|
@ -2370,7 +2373,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
|
||||
vRecv >> *pblock;
|
||||
|
||||
LogPrint("net", "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->id);
|
||||
LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->id);
|
||||
|
||||
// Process all blocks from whitelisted peers, even if not requested,
|
||||
// unless we're still syncing with the network.
|
||||
|
@ -2402,14 +2405,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
// Making nodes which are behind NAT and can only make outgoing connections ignore
|
||||
// the getaddr message mitigates the attack.
|
||||
if (!pfrom->fInbound) {
|
||||
LogPrint("net", "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->id);
|
||||
LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->id);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Only send one GetAddr response per connection to reduce resource waste
|
||||
// and discourage addr stamping of INV announcements.
|
||||
if (pfrom->fSentAddr) {
|
||||
LogPrint("net", "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->id);
|
||||
LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->id);
|
||||
return true;
|
||||
}
|
||||
pfrom->fSentAddr = true;
|
||||
|
@ -2426,14 +2429,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
{
|
||||
if (!(pfrom->GetLocalServices() & NODE_BLOOM) && !pfrom->fWhitelisted)
|
||||
{
|
||||
LogPrint("net", "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom->GetId());
|
||||
LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom->GetId());
|
||||
pfrom->fDisconnect = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (connman.OutboundTargetReached(false) && !pfrom->fWhitelisted)
|
||||
{
|
||||
LogPrint("net", "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId());
|
||||
LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId());
|
||||
pfrom->fDisconnect = true;
|
||||
return true;
|
||||
}
|
||||
|
@ -2509,7 +2512,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
}
|
||||
|
||||
if (!(sProblem.empty())) {
|
||||
LogPrint("net", "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
|
||||
LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
|
||||
pfrom->id,
|
||||
sProblem,
|
||||
pfrom->nPingNonceSent,
|
||||
|
@ -2587,7 +2590,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
LOCK(pfrom->cs_feeFilter);
|
||||
pfrom->minFeeFilter = newFeeFilter;
|
||||
}
|
||||
LogPrint("net", "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->id);
|
||||
LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2598,7 +2601,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
|
||||
else {
|
||||
// Ignore unknown commands for extensibility
|
||||
LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id);
|
||||
LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2876,7 +2879,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
|
|||
got back an empty response. */
|
||||
if (pindexStart->pprev)
|
||||
pindexStart = pindexStart->pprev;
|
||||
LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
|
||||
LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
|
||||
connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256()));
|
||||
}
|
||||
}
|
||||
|
@ -2960,7 +2963,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
|
|||
if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
|
||||
// We only send up to 1 block as header-and-ids, as otherwise
|
||||
// probably means we're doing an initial-ish-sync or they're slow
|
||||
LogPrint("net", "%s sending header-and-ids %s to peer=%d\n", __func__,
|
||||
LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
|
||||
vHeaders.front().GetHash().ToString(), pto->id);
|
||||
|
||||
int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
|
||||
|
@ -2988,12 +2991,12 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
|
|||
state.pindexBestHeaderSent = pBestIndex;
|
||||
} else if (state.fPreferHeaders) {
|
||||
if (vHeaders.size() > 1) {
|
||||
LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
|
||||
LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
|
||||
vHeaders.size(),
|
||||
vHeaders.front().GetHash().ToString(),
|
||||
vHeaders.back().GetHash().ToString(), pto->id);
|
||||
} else {
|
||||
LogPrint("net", "%s: sending header %s to peer=%d\n", __func__,
|
||||
LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
|
||||
vHeaders.front().GetHash().ToString(), pto->id);
|
||||
}
|
||||
connman.PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
|
||||
|
@ -3015,14 +3018,14 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
|
|||
// This should be very rare and could be optimized out.
|
||||
// Just log for now.
|
||||
if (chainActive[pindex->nHeight] != pindex) {
|
||||
LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n",
|
||||
LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
|
||||
hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString());
|
||||
}
|
||||
|
||||
// If the peer's chain has this block, don't inv it back.
|
||||
if (!PeerHasHeader(&state, pindex)) {
|
||||
pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
|
||||
LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__,
|
||||
LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
|
||||
pto->id, hashToAnnounce.ToString());
|
||||
}
|
||||
}
|
||||
|
@ -3201,13 +3204,13 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
|
|||
uint32_t nFetchFlags = GetFetchFlags(pto, pindex->pprev, consensusParams);
|
||||
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
|
||||
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
|
||||
LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
|
||||
LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
|
||||
pindex->nHeight, pto->id);
|
||||
}
|
||||
if (state.nBlocksInFlight == 0 && staller != -1) {
|
||||
if (State(staller)->nStallingSince == 0) {
|
||||
State(staller)->nStallingSince = nNow;
|
||||
LogPrint("net", "Stall started peer=%d\n", staller);
|
||||
LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3220,8 +3223,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
|
|||
const CInv& inv = (*pto->mapAskFor.begin()).second;
|
||||
if (!AlreadyHave(inv))
|
||||
{
|
||||
if (fDebug)
|
||||
LogPrint("net", "Requesting %s peer=%d\n", inv.ToString(), pto->id);
|
||||
LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->id);
|
||||
vGetData.push_back(inv);
|
||||
if (vGetData.size() >= 1000)
|
||||
{
|
||||
|
|
|
@ -281,7 +281,7 @@ std::string Socks5ErrorString(int err)
|
|||
static bool Socks5(const std::string& strDest, int port, const ProxyCredentials *auth, SOCKET& hSocket)
|
||||
{
|
||||
IntrRecvError recvr;
|
||||
LogPrint("net", "SOCKS5 connecting %s\n", strDest);
|
||||
LogPrint(BCLog::NET, "SOCKS5 connecting %s\n", strDest);
|
||||
if (strDest.size() > 255) {
|
||||
CloseSocket(hSocket);
|
||||
return error("Hostname too long");
|
||||
|
@ -327,7 +327,7 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials
|
|||
CloseSocket(hSocket);
|
||||
return error("Error sending authentication to proxy");
|
||||
}
|
||||
LogPrint("proxy", "SOCKS5 sending proxy authentication %s:%s\n", auth->username, auth->password);
|
||||
LogPrint(BCLog::PROXY, "SOCKS5 sending proxy authentication %s:%s\n", auth->username, auth->password);
|
||||
char pchRetA[2];
|
||||
if ((recvr = InterruptibleRecv(pchRetA, 2, SOCKS5_RECV_TIMEOUT, hSocket)) != IntrRecvError::OK) {
|
||||
CloseSocket(hSocket);
|
||||
|
@ -409,7 +409,7 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials
|
|||
CloseSocket(hSocket);
|
||||
return error("Error reading from proxy");
|
||||
}
|
||||
LogPrint("net", "SOCKS5 connected %s\n", strDest);
|
||||
LogPrint(BCLog::NET, "SOCKS5 connected %s\n", strDest);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -458,7 +458,7 @@ bool static ConnectSocketDirectly(const CService &addrConnect, SOCKET& hSocketRe
|
|||
int nRet = select(hSocket + 1, NULL, &fdset, NULL, &timeout);
|
||||
if (nRet == 0)
|
||||
{
|
||||
LogPrint("net", "connection to %s timeout\n", addrConnect.ToString());
|
||||
LogPrint(BCLog::NET, "connection to %s timeout\n", addrConnect.ToString());
|
||||
CloseSocket(hSocket);
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -165,7 +165,7 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
|
|||
}
|
||||
}
|
||||
|
||||
LogPrint("estimatefee", "%3d: For conf success %s %4.2f need feerate %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
|
||||
LogPrint(BCLog::ESTIMATEFEE, "%3d: For conf success %s %4.2f need feerate %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
|
||||
confTarget, requireGreater ? ">" : "<", successBreakPoint,
|
||||
requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket],
|
||||
100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum);
|
||||
|
@ -241,7 +241,7 @@ void TxConfirmStats::Read(CAutoFile& filein)
|
|||
for (unsigned int i = 0; i < buckets.size(); i++)
|
||||
bucketMap[buckets[i]] = i;
|
||||
|
||||
LogPrint("estimatefee", "Reading estimates: %u buckets counting confirms up to %u blocks\n",
|
||||
LogPrint(BCLog::ESTIMATEFEE, "Reading estimates: %u buckets counting confirms up to %u blocks\n",
|
||||
numBuckets, maxConfirms);
|
||||
}
|
||||
|
||||
|
@ -260,24 +260,26 @@ void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHe
|
|||
if (nBestSeenHeight == 0) // the BlockPolicyEstimator hasn't seen any blocks yet
|
||||
blocksAgo = 0;
|
||||
if (blocksAgo < 0) {
|
||||
LogPrint("estimatefee", "Blockpolicy error, blocks ago is negative for mempool tx\n");
|
||||
LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error, blocks ago is negative for mempool tx\n");
|
||||
return; //This can't happen because we call this with our best seen height, no entries can have higher
|
||||
}
|
||||
|
||||
if (blocksAgo >= (int)unconfTxs.size()) {
|
||||
if (oldUnconfTxs[bucketindex] > 0)
|
||||
if (oldUnconfTxs[bucketindex] > 0) {
|
||||
oldUnconfTxs[bucketindex]--;
|
||||
else
|
||||
LogPrint("estimatefee", "Blockpolicy error, mempool tx removed from >25 blocks,bucketIndex=%u already\n",
|
||||
} else {
|
||||
LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error, mempool tx removed from >25 blocks,bucketIndex=%u already\n",
|
||||
bucketindex);
|
||||
}
|
||||
}
|
||||
else {
|
||||
unsigned int blockIndex = entryHeight % unconfTxs.size();
|
||||
if (unconfTxs[blockIndex][bucketindex] > 0)
|
||||
if (unconfTxs[blockIndex][bucketindex] > 0) {
|
||||
unconfTxs[blockIndex][bucketindex]--;
|
||||
else
|
||||
LogPrint("estimatefee", "Blockpolicy error, mempool tx removed from blockIndex=%u,bucketIndex=%u already\n",
|
||||
} else {
|
||||
LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error, mempool tx removed from blockIndex=%u,bucketIndex=%u already\n",
|
||||
blockIndex, bucketindex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -316,7 +318,7 @@ void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, boo
|
|||
unsigned int txHeight = entry.GetHeight();
|
||||
uint256 hash = entry.GetTx().GetHash();
|
||||
if (mapMemPoolTxs.count(hash)) {
|
||||
LogPrint("estimatefee", "Blockpolicy error mempool tx %s already being tracked\n",
|
||||
LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error mempool tx %s already being tracked\n",
|
||||
hash.ToString().c_str());
|
||||
return;
|
||||
}
|
||||
|
@ -358,7 +360,7 @@ bool CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxM
|
|||
if (blocksToConfirm <= 0) {
|
||||
// This can't happen because we don't process transactions from a block with a height
|
||||
// lower than our greatest seen height
|
||||
LogPrint("estimatefee", "Blockpolicy error Transaction had negative blocksToConfirm\n");
|
||||
LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error Transaction had negative blocksToConfirm\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -399,7 +401,7 @@ void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
|
|||
// Update all exponential averages with the current block state
|
||||
feeStats.UpdateMovingAverages();
|
||||
|
||||
LogPrint("estimatefee", "Blockpolicy after updating estimates for %u of %u txs in block, since last block %u of %u tracked, new mempool map size %u\n",
|
||||
LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy after updating estimates for %u of %u txs in block, since last block %u of %u tracked, new mempool map size %u\n",
|
||||
countedTxs, entries.size(), trackedTxs, trackedTxs + untrackedTxs, mapMemPoolTxs.size());
|
||||
|
||||
trackedTxs = 0;
|
||||
|
|
|
@ -152,15 +152,21 @@ static void initTranslations(QTranslator &qtTranslatorBase, QTranslator &qtTrans
|
|||
#if QT_VERSION < 0x050000
|
||||
void DebugMessageHandler(QtMsgType type, const char *msg)
|
||||
{
|
||||
const char *category = (type == QtDebugMsg) ? "qt" : NULL;
|
||||
LogPrint(category, "GUI: %s\n", msg);
|
||||
if (type == QtDebugMsg) {
|
||||
LogPrint(BCLog::QT, "GUI: %s\n", msg);
|
||||
} else {
|
||||
LogPrintf("GUI: %s\n", msg);
|
||||
}
|
||||
}
|
||||
#else
|
||||
void DebugMessageHandler(QtMsgType type, const QMessageLogContext& context, const QString &msg)
|
||||
{
|
||||
Q_UNUSED(context);
|
||||
const char *category = (type == QtDebugMsg) ? "qt" : NULL;
|
||||
LogPrint(category, "GUI: %s\n", msg.toStdString());
|
||||
if (type == QtDebugMsg) {
|
||||
LogPrint(BCLog::QT, "GUI: %s\n", msg.toStdString());
|
||||
} else {
|
||||
LogPrintf("GUI: %s\n", msg.toStdString());
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -273,7 +273,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
|
|||
//
|
||||
// Debug view
|
||||
//
|
||||
if (fDebug)
|
||||
if (logCategories != BCLog::NONE)
|
||||
{
|
||||
strHTML += "<hr><br>" + tr("Debug information") + "<br><br>";
|
||||
BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
|
||||
|
|
|
@ -91,7 +91,7 @@ static void RandAddSeedPerfmon()
|
|||
if (ret == ERROR_SUCCESS) {
|
||||
RAND_add(vData.data(), nSize, nSize / 100.0);
|
||||
memory_cleanse(vData.data(), nSize);
|
||||
LogPrint("rand", "%s: %lu bytes\n", __func__, nSize);
|
||||
LogPrint(BCLog::RAND, "%s: %lu bytes\n", __func__, nSize);
|
||||
} else {
|
||||
static bool warned = false; // Warn only once
|
||||
if (!warned) {
|
||||
|
|
|
@ -855,7 +855,7 @@ UniValue pruneblockchain(const JSONRPCRequest& request)
|
|||
else if (height > chainHeight)
|
||||
throw JSONRPCError(RPC_INVALID_PARAMETER, "Blockchain is shorter than the attempted prune height.");
|
||||
else if (height > chainHeight - MIN_BLOCKS_TO_KEEP) {
|
||||
LogPrint("rpc", "Attempt to prune blocks close to the tip. Retaining the minimum number of blocks.");
|
||||
LogPrint(BCLog::RPC, "Attempt to prune blocks close to the tip. Retaining the minimum number of blocks.");
|
||||
height = chainHeight - MIN_BLOCKS_TO_KEEP;
|
||||
}
|
||||
|
||||
|
|
|
@ -305,7 +305,7 @@ bool CRPCTable::appendCommand(const std::string& name, const CRPCCommand* pcmd)
|
|||
|
||||
bool StartRPC()
|
||||
{
|
||||
LogPrint("rpc", "Starting RPC\n");
|
||||
LogPrint(BCLog::RPC, "Starting RPC\n");
|
||||
fRPCRunning = true;
|
||||
g_rpcSignals.Started();
|
||||
return true;
|
||||
|
@ -313,14 +313,14 @@ bool StartRPC()
|
|||
|
||||
void InterruptRPC()
|
||||
{
|
||||
LogPrint("rpc", "Interrupting RPC\n");
|
||||
LogPrint(BCLog::RPC, "Interrupting RPC\n");
|
||||
// Interrupt e.g. running longpolls
|
||||
fRPCRunning = false;
|
||||
}
|
||||
|
||||
void StopRPC()
|
||||
{
|
||||
LogPrint("rpc", "Stopping RPC\n");
|
||||
LogPrint(BCLog::RPC, "Stopping RPC\n");
|
||||
deadlineTimers.clear();
|
||||
g_rpcSignals.Stopped();
|
||||
}
|
||||
|
@ -368,8 +368,9 @@ void JSONRPCRequest::parse(const UniValue& valRequest)
|
|||
if (!valMethod.isStr())
|
||||
throw JSONRPCError(RPC_INVALID_REQUEST, "Method must be a string");
|
||||
strMethod = valMethod.get_str();
|
||||
if (strMethod != "getblocktemplate")
|
||||
LogPrint("rpc", "ThreadRPCServer method=%s\n", SanitizeString(strMethod));
|
||||
if (strMethod != "getblocktemplate") {
|
||||
LogPrint(BCLog::RPC, "ThreadRPCServer method=%s\n", SanitizeString(strMethod));
|
||||
}
|
||||
|
||||
// Parse params
|
||||
UniValue valParams = find_value(request, "params");
|
||||
|
@ -531,7 +532,7 @@ void RPCRunLater(const std::string& name, boost::function<void(void)> func, int6
|
|||
if (!timerInterface)
|
||||
throw JSONRPCError(RPC_INTERNAL_ERROR, "No timer handler registered for RPC");
|
||||
deadlineTimers.erase(name);
|
||||
LogPrint("rpc", "queue run of timer %s in %i seconds (using %s)\n", name, nSeconds, timerInterface->Name());
|
||||
LogPrint(BCLog::RPC, "queue run of timer %s in %i seconds (using %s)\n", name, nSeconds, timerInterface->Name());
|
||||
deadlineTimers.emplace(name, std::unique_ptr<RPCTimerBase>(timerInterface->NewTimer(func, nSeconds*1000)));
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ void AddTimeData(const CNetAddr& ip, int64_t nOffsetSample)
|
|||
// Add data
|
||||
static CMedianFilter<int64_t> vTimeOffsets(BITCOIN_TIMEDATA_MAX_SAMPLES, 0);
|
||||
vTimeOffsets.input(nOffsetSample);
|
||||
LogPrint("net","added time data, samples %d, offset %+d (%+d minutes)\n", vTimeOffsets.size(), nOffsetSample, nOffsetSample/60);
|
||||
LogPrint(BCLog::NET,"added time data, samples %d, offset %+d (%+d minutes)\n", vTimeOffsets.size(), nOffsetSample, nOffsetSample/60);
|
||||
|
||||
// There is a known issue here (see issue #4521):
|
||||
//
|
||||
|
@ -108,11 +108,14 @@ void AddTimeData(const CNetAddr& ip, int64_t nOffsetSample)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_FOREACH(int64_t n, vSorted)
|
||||
LogPrint("net", "%+d ", n);
|
||||
LogPrint("net", "| ");
|
||||
|
||||
LogPrint("net", "nTimeOffset = %+d (%+d minutes)\n", nTimeOffset, nTimeOffset/60);
|
||||
|
||||
if (LogAcceptCategory(BCLog::NET)) {
|
||||
BOOST_FOREACH(int64_t n, vSorted) {
|
||||
LogPrint(BCLog::NET, "%+d ", n);
|
||||
}
|
||||
LogPrint(BCLog::NET, "| ");
|
||||
|
||||
LogPrint(BCLog::NET, "nTimeOffset = %+d (%+d minutes)\n", nTimeOffset, nTimeOffset/60);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -163,7 +163,7 @@ void TorControlConnection::readcb(struct bufferevent *bev, void *ctx)
|
|||
self->reply_handlers.front()(*self, self->message);
|
||||
self->reply_handlers.pop_front();
|
||||
} else {
|
||||
LogPrint("tor", "tor: Received unexpected sync reply %i\n", self->message.code);
|
||||
LogPrint(BCLog::TOR, "tor: Received unexpected sync reply %i\n", self->message.code);
|
||||
}
|
||||
}
|
||||
self->message.Clear();
|
||||
|
@ -182,13 +182,14 @@ void TorControlConnection::eventcb(struct bufferevent *bev, short what, void *ct
|
|||
{
|
||||
TorControlConnection *self = (TorControlConnection*)ctx;
|
||||
if (what & BEV_EVENT_CONNECTED) {
|
||||
LogPrint("tor", "tor: Successfully connected!\n");
|
||||
LogPrint(BCLog::TOR, "tor: Successfully connected!\n");
|
||||
self->connected(*self);
|
||||
} else if (what & (BEV_EVENT_EOF|BEV_EVENT_ERROR)) {
|
||||
if (what & BEV_EVENT_ERROR)
|
||||
LogPrint("tor", "tor: Error connecting to Tor control socket\n");
|
||||
else
|
||||
LogPrint("tor", "tor: End of stream\n");
|
||||
if (what & BEV_EVENT_ERROR) {
|
||||
LogPrint(BCLog::TOR, "tor: Error connecting to Tor control socket\n");
|
||||
} else {
|
||||
LogPrint(BCLog::TOR, "tor: End of stream\n");
|
||||
}
|
||||
self->Disconnect();
|
||||
self->disconnected(*self);
|
||||
}
|
||||
|
@ -410,7 +411,7 @@ TorController::TorController(struct event_base* _base, const std::string& _targe
|
|||
// Read service private key if cached
|
||||
std::pair<bool,std::string> pkf = ReadBinaryFile(GetPrivateKeyFile());
|
||||
if (pkf.first) {
|
||||
LogPrint("tor", "tor: Reading cached private key from %s\n", GetPrivateKeyFile());
|
||||
LogPrint(BCLog::TOR, "tor: Reading cached private key from %s\n", GetPrivateKeyFile());
|
||||
private_key = pkf.second;
|
||||
}
|
||||
}
|
||||
|
@ -429,7 +430,7 @@ TorController::~TorController()
|
|||
void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlReply& reply)
|
||||
{
|
||||
if (reply.code == 250) {
|
||||
LogPrint("tor", "tor: ADD_ONION successful\n");
|
||||
LogPrint(BCLog::TOR, "tor: ADD_ONION successful\n");
|
||||
BOOST_FOREACH(const std::string &s, reply.lines) {
|
||||
std::map<std::string,std::string> m = ParseTorReplyMapping(s);
|
||||
std::map<std::string,std::string>::iterator i;
|
||||
|
@ -441,7 +442,7 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe
|
|||
service = LookupNumeric(std::string(service_id+".onion").c_str(), GetListenPort());
|
||||
LogPrintf("tor: Got service ID %s, advertising service %s\n", service_id, service.ToString());
|
||||
if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) {
|
||||
LogPrint("tor", "tor: Cached service private key to %s\n", GetPrivateKeyFile());
|
||||
LogPrint(BCLog::TOR, "tor: Cached service private key to %s\n", GetPrivateKeyFile());
|
||||
} else {
|
||||
LogPrintf("tor: Error writing service private key to %s\n", GetPrivateKeyFile());
|
||||
}
|
||||
|
@ -457,7 +458,7 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe
|
|||
void TorController::auth_cb(TorControlConnection& _conn, const TorControlReply& reply)
|
||||
{
|
||||
if (reply.code == 250) {
|
||||
LogPrint("tor", "tor: Authentication successful\n");
|
||||
LogPrint(BCLog::TOR, "tor: Authentication successful\n");
|
||||
|
||||
// Now that we know Tor is running setup the proxy for onion addresses
|
||||
// if -onion isn't set to something else.
|
||||
|
@ -511,13 +512,13 @@ static std::vector<uint8_t> ComputeResponse(const std::string &key, const std::v
|
|||
void TorController::authchallenge_cb(TorControlConnection& _conn, const TorControlReply& reply)
|
||||
{
|
||||
if (reply.code == 250) {
|
||||
LogPrint("tor", "tor: SAFECOOKIE authentication challenge successful\n");
|
||||
LogPrint(BCLog::TOR, "tor: SAFECOOKIE authentication challenge successful\n");
|
||||
std::pair<std::string,std::string> l = SplitTorReplyLine(reply.lines[0]);
|
||||
if (l.first == "AUTHCHALLENGE") {
|
||||
std::map<std::string,std::string> m = ParseTorReplyMapping(l.second);
|
||||
std::vector<uint8_t> serverHash = ParseHex(m["SERVERHASH"]);
|
||||
std::vector<uint8_t> serverNonce = ParseHex(m["SERVERNONCE"]);
|
||||
LogPrint("tor", "tor: AUTHCHALLENGE ServerHash %s ServerNonce %s\n", HexStr(serverHash), HexStr(serverNonce));
|
||||
LogPrint(BCLog::TOR, "tor: AUTHCHALLENGE ServerHash %s ServerNonce %s\n", HexStr(serverHash), HexStr(serverNonce));
|
||||
if (serverNonce.size() != 32) {
|
||||
LogPrintf("tor: ServerNonce is not 32 bytes, as required by spec\n");
|
||||
return;
|
||||
|
@ -562,12 +563,12 @@ void TorController::protocolinfo_cb(TorControlConnection& _conn, const TorContro
|
|||
std::map<std::string,std::string> m = ParseTorReplyMapping(l.second);
|
||||
std::map<std::string,std::string>::iterator i;
|
||||
if ((i = m.find("Tor")) != m.end()) {
|
||||
LogPrint("tor", "tor: Connected to Tor version %s\n", i->second);
|
||||
LogPrint(BCLog::TOR, "tor: Connected to Tor version %s\n", i->second);
|
||||
}
|
||||
}
|
||||
}
|
||||
BOOST_FOREACH(const std::string &s, methods) {
|
||||
LogPrint("tor", "tor: Supported authentication method: %s\n", s);
|
||||
LogPrint(BCLog::TOR, "tor: Supported authentication method: %s\n", s);
|
||||
}
|
||||
// Prefer NULL, otherwise SAFECOOKIE. If a password is provided, use HASHEDPASSWORD
|
||||
/* Authentication:
|
||||
|
@ -577,18 +578,18 @@ void TorController::protocolinfo_cb(TorControlConnection& _conn, const TorContro
|
|||
std::string torpassword = GetArg("-torpassword", "");
|
||||
if (!torpassword.empty()) {
|
||||
if (methods.count("HASHEDPASSWORD")) {
|
||||
LogPrint("tor", "tor: Using HASHEDPASSWORD authentication\n");
|
||||
LogPrint(BCLog::TOR, "tor: Using HASHEDPASSWORD authentication\n");
|
||||
boost::replace_all(torpassword, "\"", "\\\"");
|
||||
_conn.Command("AUTHENTICATE \"" + torpassword + "\"", boost::bind(&TorController::auth_cb, this, _1, _2));
|
||||
} else {
|
||||
LogPrintf("tor: Password provided with -torpassword, but HASHEDPASSWORD authentication is not available\n");
|
||||
}
|
||||
} else if (methods.count("NULL")) {
|
||||
LogPrint("tor", "tor: Using NULL authentication\n");
|
||||
LogPrint(BCLog::TOR, "tor: Using NULL authentication\n");
|
||||
_conn.Command("AUTHENTICATE", boost::bind(&TorController::auth_cb, this, _1, _2));
|
||||
} else if (methods.count("SAFECOOKIE")) {
|
||||
// Cookie: hexdump -e '32/1 "%02x""\n"' ~/.tor/control_auth_cookie
|
||||
LogPrint("tor", "tor: Using SAFECOOKIE authentication, reading cookie authentication from %s\n", cookiefile);
|
||||
LogPrint(BCLog::TOR, "tor: Using SAFECOOKIE authentication, reading cookie authentication from %s\n", cookiefile);
|
||||
std::pair<bool,std::string> status_cookie = ReadBinaryFile(cookiefile, TOR_COOKIE_SIZE);
|
||||
if (status_cookie.first && status_cookie.second.size() == TOR_COOKIE_SIZE) {
|
||||
// _conn.Command("AUTHENTICATE " + HexStr(status_cookie.second), boost::bind(&TorController::auth_cb, this, _1, _2));
|
||||
|
@ -630,7 +631,7 @@ void TorController::disconnected_cb(TorControlConnection& _conn)
|
|||
if (!reconnect)
|
||||
return;
|
||||
|
||||
LogPrint("tor", "tor: Not connected to Tor control port %s, trying to reconnect\n", target);
|
||||
LogPrint(BCLog::TOR, "tor: Not connected to Tor control port %s, trying to reconnect\n", target);
|
||||
|
||||
// Single-shot timer for reconnect. Use exponential backoff.
|
||||
struct timeval time = MillisToTimeval(int64_t(reconnect_timeout * 1000.0));
|
||||
|
|
|
@ -63,7 +63,7 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
|
|||
if (!hashBlock.IsNull())
|
||||
batch.Write(DB_BEST_BLOCK, hashBlock);
|
||||
|
||||
LogPrint("coindb", "Committing %u changed transactions (out of %u) to coin database...\n", (unsigned int)changed, (unsigned int)count);
|
||||
LogPrint(BCLog::COINDB, "Committing %u changed transactions (out of %u) to coin database...\n", (unsigned int)changed, (unsigned int)count);
|
||||
return db.WriteBatch(batch);
|
||||
}
|
||||
|
||||
|
|
|
@ -634,7 +634,7 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const
|
|||
if (GetRand(std::numeric_limits<uint32_t>::max()) >= nCheckFrequency)
|
||||
return;
|
||||
|
||||
LogPrint("mempool", "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size());
|
||||
LogPrint(BCLog::MEMPOOL, "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size());
|
||||
|
||||
uint64_t checkTotal = 0;
|
||||
uint64_t innerUsage = 0;
|
||||
|
@ -1108,8 +1108,9 @@ void CTxMemPool::TrimToSize(size_t sizelimit, std::vector<uint256>* pvNoSpendsRe
|
|||
}
|
||||
}
|
||||
|
||||
if (maxFeeRateRemoved > CFeeRate(0))
|
||||
LogPrint("mempool", "Removed %u txn, rolling minimum fee bumped to %s\n", nTxnRemoved, maxFeeRateRemoved.ToString());
|
||||
if (maxFeeRateRemoved > CFeeRate(0)) {
|
||||
LogPrint(BCLog::MEMPOOL, "Removed %u txn, rolling minimum fee bumped to %s\n", nTxnRemoved, maxFeeRateRemoved.ToString());
|
||||
}
|
||||
}
|
||||
|
||||
bool CTxMemPool::TransactionWithinChainLimit(const uint256& txid, size_t chainLimit) const {
|
||||
|
|
90
src/util.cpp
90
src/util.cpp
|
@ -110,7 +110,6 @@ CCriticalSection cs_args;
|
|||
std::map<std::string, std::string> mapArgs;
|
||||
static std::map<std::string, std::vector<std::string> > _mapMultiArgs;
|
||||
const std::map<std::string, std::vector<std::string> >& mapMultiArgs = _mapMultiArgs;
|
||||
bool fDebug = false;
|
||||
bool fPrintToConsole = false;
|
||||
bool fPrintToDebugLog = true;
|
||||
|
||||
|
@ -120,6 +119,9 @@ bool fLogIPs = DEFAULT_LOGIPS;
|
|||
std::atomic<bool> fReopenDebugLog(false);
|
||||
CTranslationInterface translationInterface;
|
||||
|
||||
/** Log categories bitfield. Leveldb/libevent need special handling if their flags are changed at runtime. */
|
||||
std::atomic<uint32_t> logCategories(0);
|
||||
|
||||
/** Init OpenSSL library multithreading support */
|
||||
static CCriticalSection** ppmutexOpenSSL;
|
||||
void locking_callback(int mode, int i, const char* file, int line) NO_THREAD_SAFETY_ANALYSIS
|
||||
|
@ -231,36 +233,70 @@ void OpenDebugLog()
|
|||
vMsgsBeforeOpenLog = NULL;
|
||||
}
|
||||
|
||||
bool LogAcceptCategory(const char* category)
|
||||
struct CLogCategoryDesc
|
||||
{
|
||||
if (category != NULL)
|
||||
{
|
||||
if (!fDebug)
|
||||
return false;
|
||||
uint32_t flag;
|
||||
std::string category;
|
||||
};
|
||||
|
||||
// Give each thread quick access to -debug settings.
|
||||
// This helps prevent issues debugging global destructors,
|
||||
// where mapMultiArgs might be deleted before another
|
||||
// global destructor calls LogPrint()
|
||||
static boost::thread_specific_ptr<std::set<std::string> > ptrCategory;
|
||||
if (ptrCategory.get() == NULL)
|
||||
{
|
||||
if (mapMultiArgs.count("-debug")) {
|
||||
const std::vector<std::string>& categories = mapMultiArgs.at("-debug");
|
||||
ptrCategory.reset(new std::set<std::string>(categories.begin(), categories.end()));
|
||||
// thread_specific_ptr automatically deletes the set when the thread ends.
|
||||
} else
|
||||
ptrCategory.reset(new std::set<std::string>());
|
||||
const CLogCategoryDesc LogCategories[] =
|
||||
{
|
||||
{BCLog::NONE, "0"},
|
||||
{BCLog::NET, "net"},
|
||||
{BCLog::TOR, "tor"},
|
||||
{BCLog::MEMPOOL, "mempool"},
|
||||
{BCLog::HTTP, "http"},
|
||||
{BCLog::BENCH, "bench"},
|
||||
{BCLog::ZMQ, "zmq"},
|
||||
{BCLog::DB, "db"},
|
||||
{BCLog::RPC, "rpc"},
|
||||
{BCLog::ESTIMATEFEE, "estimatefee"},
|
||||
{BCLog::ADDRMAN, "addrman"},
|
||||
{BCLog::SELECTCOINS, "selectcoins"},
|
||||
{BCLog::REINDEX, "reindex"},
|
||||
{BCLog::CMPCTBLOCK, "cmpctblock"},
|
||||
{BCLog::RAND, "rand"},
|
||||
{BCLog::PRUNE, "prune"},
|
||||
{BCLog::PROXY, "proxy"},
|
||||
{BCLog::MEMPOOLREJ, "mempoolrej"},
|
||||
{BCLog::LIBEVENT, "libevent"},
|
||||
{BCLog::COINDB, "coindb"},
|
||||
{BCLog::QT, "qt"},
|
||||
{BCLog::LEVELDB, "leveldb"},
|
||||
{BCLog::ALL, "1"},
|
||||
{BCLog::ALL, "all"},
|
||||
};
|
||||
|
||||
bool GetLogCategory(uint32_t *f, const std::string *str)
|
||||
{
|
||||
if (f && str) {
|
||||
if (*str == "") {
|
||||
*f = BCLog::ALL;
|
||||
return true;
|
||||
}
|
||||
for (unsigned int i = 0; i < ARRAYLEN(LogCategories); i++) {
|
||||
if (LogCategories[i].category == *str) {
|
||||
*f = LogCategories[i].flag;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
const std::set<std::string>& setCategories = *ptrCategory;
|
||||
|
||||
// if not debugging everything and not debugging specific category, LogPrint does nothing.
|
||||
if (setCategories.count(std::string("")) == 0 &&
|
||||
setCategories.count(std::string("1")) == 0 &&
|
||||
setCategories.count(std::string(category)) == 0)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string ListLogCategories()
|
||||
{
|
||||
std::string ret;
|
||||
int outcount = 0;
|
||||
for (unsigned int i = 0; i < ARRAYLEN(LogCategories); i++) {
|
||||
// Omit the special cases.
|
||||
if (LogCategories[i].flag != BCLog::NONE && LogCategories[i].flag != BCLog::ALL) {
|
||||
if (outcount != 0) ret += ", ";
|
||||
ret += LogCategories[i].category;
|
||||
outcount++;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
42
src/util.h
42
src/util.h
|
@ -42,7 +42,6 @@ public:
|
|||
};
|
||||
|
||||
extern const std::map<std::string, std::vector<std::string> >& mapMultiArgs;
|
||||
extern bool fDebug;
|
||||
extern bool fPrintToConsole;
|
||||
extern bool fPrintToDebugLog;
|
||||
|
||||
|
@ -55,6 +54,8 @@ extern CTranslationInterface translationInterface;
|
|||
extern const char * const BITCOIN_CONF_FILENAME;
|
||||
extern const char * const BITCOIN_PID_FILENAME;
|
||||
|
||||
extern std::atomic<uint32_t> logCategories;
|
||||
|
||||
/**
|
||||
* Translation function: Call Translate signal on UI interface, which returns a boost::optional result.
|
||||
* If no translation slot is registered, nothing is returned, and simply return the input.
|
||||
|
@ -68,8 +69,45 @@ inline std::string _(const char* psz)
|
|||
void SetupEnvironment();
|
||||
bool SetupNetworking();
|
||||
|
||||
namespace BCLog {
|
||||
enum LogFlags : uint32_t {
|
||||
NONE = 0,
|
||||
NET = (1 << 0),
|
||||
TOR = (1 << 1),
|
||||
MEMPOOL = (1 << 2),
|
||||
HTTP = (1 << 3),
|
||||
BENCH = (1 << 4),
|
||||
ZMQ = (1 << 5),
|
||||
DB = (1 << 6),
|
||||
RPC = (1 << 7),
|
||||
ESTIMATEFEE = (1 << 8),
|
||||
ADDRMAN = (1 << 9),
|
||||
SELECTCOINS = (1 << 10),
|
||||
REINDEX = (1 << 11),
|
||||
CMPCTBLOCK = (1 << 12),
|
||||
RAND = (1 << 13),
|
||||
PRUNE = (1 << 14),
|
||||
PROXY = (1 << 15),
|
||||
MEMPOOLREJ = (1 << 16),
|
||||
LIBEVENT = (1 << 17),
|
||||
COINDB = (1 << 18),
|
||||
QT = (1 << 19),
|
||||
LEVELDB = (1 << 20),
|
||||
ALL = ~(uint32_t)0,
|
||||
};
|
||||
}
|
||||
/** Return true if log accepts specified category */
|
||||
bool LogAcceptCategory(const char* category);
|
||||
static inline bool LogAcceptCategory(uint32_t category)
|
||||
{
|
||||
return (logCategories.load(std::memory_order_relaxed) & category) != 0;
|
||||
}
|
||||
|
||||
/** Returns a string with the supported log categories */
|
||||
std::string ListLogCategories();
|
||||
|
||||
/** Return true if str parses as a log category and set the flags in f */
|
||||
bool GetLogCategory(uint32_t *f, const std::string *str);
|
||||
|
||||
/** Send a string to the log output */
|
||||
int LogPrintStr(const std::string &str);
|
||||
|
||||
|
|
|
@ -540,8 +540,9 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state, bool fChe
|
|||
|
||||
void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age) {
|
||||
int expired = pool.Expire(GetTime() - age);
|
||||
if (expired != 0)
|
||||
LogPrint("mempool", "Expired %i transactions from the memory pool\n", expired);
|
||||
if (expired != 0) {
|
||||
LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
|
||||
}
|
||||
|
||||
std::vector<uint256> vNoSpendsRemaining;
|
||||
pool.TrimToSize(limit, &vNoSpendsRemaining);
|
||||
|
@ -955,7 +956,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
|
|||
// Remove conflicting transactions from the mempool
|
||||
BOOST_FOREACH(const CTxMemPool::txiter it, allConflicting)
|
||||
{
|
||||
LogPrint("mempool", "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
|
||||
LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
|
||||
it->GetTx().GetHash().ToString(),
|
||||
hash.ToString(),
|
||||
FormatMoney(nModifiedFees - nConflictingFees),
|
||||
|
@ -1763,7 +1764,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
|
|||
}
|
||||
|
||||
int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
|
||||
LogPrint("bench", " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1 - nTimeStart), nTimeCheck * 0.000001);
|
||||
LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1 - nTimeStart), nTimeCheck * 0.000001);
|
||||
|
||||
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
|
||||
// unless those are already completely spent.
|
||||
|
@ -1830,7 +1831,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
|
|||
}
|
||||
|
||||
int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
|
||||
LogPrint("bench", " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2 - nTime1), nTimeForks * 0.000001);
|
||||
LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2 - nTime1), nTimeForks * 0.000001);
|
||||
|
||||
CBlockUndo blockundo;
|
||||
|
||||
|
@ -1904,7 +1905,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
|
|||
pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
|
||||
}
|
||||
int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
|
||||
LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2), 0.001 * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * 0.000001);
|
||||
LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2), 0.001 * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * 0.000001);
|
||||
|
||||
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
|
||||
if (block.vtx[0]->GetValueOut() > blockReward)
|
||||
|
@ -1916,7 +1917,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
|
|||
if (!control.Wait())
|
||||
return state.DoS(100, false);
|
||||
int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
|
||||
LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime4 - nTime2), nInputs <= 1 ? 0 : 0.001 * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * 0.000001);
|
||||
LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime4 - nTime2), nInputs <= 1 ? 0 : 0.001 * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * 0.000001);
|
||||
|
||||
if (fJustCheck)
|
||||
return true;
|
||||
|
@ -1948,7 +1949,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
|
|||
view.SetBestBlock(pindex->GetBlockHash());
|
||||
|
||||
int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
|
||||
LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5 - nTime4), nTimeIndex * 0.000001);
|
||||
LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5 - nTime4), nTimeIndex * 0.000001);
|
||||
|
||||
// Watch for changes to the previous coinbase transaction.
|
||||
static uint256 hashPrevBestCoinBase;
|
||||
|
@ -1957,7 +1958,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
|
|||
|
||||
|
||||
int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
|
||||
LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001);
|
||||
LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -2164,7 +2165,7 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara
|
|||
bool flushed = view.Flush();
|
||||
assert(flushed);
|
||||
}
|
||||
LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001);
|
||||
LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001);
|
||||
// Write the chain state to disk, if necessary.
|
||||
if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED))
|
||||
return false;
|
||||
|
@ -2239,7 +2240,7 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams,
|
|||
// Apply the block atomically to the chain state.
|
||||
int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
|
||||
int64_t nTime3;
|
||||
LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001);
|
||||
LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001);
|
||||
{
|
||||
CCoinsViewCache view(pcoinsTip);
|
||||
bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, chainparams);
|
||||
|
@ -2250,25 +2251,25 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams,
|
|||
return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString());
|
||||
}
|
||||
nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
|
||||
LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001);
|
||||
LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001);
|
||||
bool flushed = view.Flush();
|
||||
assert(flushed);
|
||||
}
|
||||
int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
|
||||
LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001);
|
||||
LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001);
|
||||
// Write the chain state to disk, if necessary.
|
||||
if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED))
|
||||
return false;
|
||||
int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
|
||||
LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001);
|
||||
LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001);
|
||||
// Remove conflicting transactions from the mempool.;
|
||||
mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
|
||||
// Update chainActive & related variables.
|
||||
UpdateTip(pindexNew, chainparams);
|
||||
|
||||
int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
|
||||
LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001);
|
||||
LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001);
|
||||
LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001);
|
||||
LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -3391,7 +3392,7 @@ void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight
|
|||
}
|
||||
}
|
||||
|
||||
LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
|
||||
LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
|
||||
nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
|
||||
((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
|
||||
nLastBlockWeCanPrune, count);
|
||||
|
@ -3885,7 +3886,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
|
|||
// detect out of order blocks, and store them for later
|
||||
uint256 hash = block.GetHash();
|
||||
if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex.find(block.hashPrevBlock) == mapBlockIndex.end()) {
|
||||
LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
|
||||
LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
|
||||
block.hashPrevBlock.ToString());
|
||||
if (dbp)
|
||||
mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
|
||||
|
@ -3901,7 +3902,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
|
|||
if (state.IsError())
|
||||
break;
|
||||
} else if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex[hash]->nHeight % 1000 == 0) {
|
||||
LogPrint("reindex", "Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight);
|
||||
LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight);
|
||||
}
|
||||
|
||||
// Activate the genesis block so normal node progress can continue
|
||||
|
@ -3926,7 +3927,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
|
|||
std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
|
||||
if (ReadBlockFromDisk(*pblockrecursive, it->second, chainparams.GetConsensus()))
|
||||
{
|
||||
LogPrint("reindex", "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
|
||||
LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
|
||||
head.ToString());
|
||||
LOCK(cs_main);
|
||||
CValidationState dummy;
|
||||
|
|
|
@ -118,7 +118,7 @@ void CDBEnv::MakeMock()
|
|||
|
||||
boost::this_thread::interruption_point();
|
||||
|
||||
LogPrint("db", "CDBEnv::MakeMock\n");
|
||||
LogPrint(BCLog::DB, "CDBEnv::MakeMock\n");
|
||||
|
||||
dbenv->set_cachesize(1, 0, 1);
|
||||
dbenv->set_lg_bsize(10485760 * 4);
|
||||
|
@ -560,7 +560,7 @@ void CDBEnv::Flush(bool fShutdown)
|
|||
{
|
||||
int64_t nStart = GetTimeMillis();
|
||||
// Flush log data to the actual data file on all files that are not in use
|
||||
LogPrint("db", "CDBEnv::Flush: Flush(%s)%s\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started");
|
||||
LogPrint(BCLog::DB, "CDBEnv::Flush: Flush(%s)%s\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started");
|
||||
if (!fDbEnvInit)
|
||||
return;
|
||||
{
|
||||
|
@ -569,21 +569,21 @@ void CDBEnv::Flush(bool fShutdown)
|
|||
while (mi != mapFileUseCount.end()) {
|
||||
std::string strFile = (*mi).first;
|
||||
int nRefCount = (*mi).second;
|
||||
LogPrint("db", "CDBEnv::Flush: Flushing %s (refcount = %d)...\n", strFile, nRefCount);
|
||||
LogPrint(BCLog::DB, "CDBEnv::Flush: Flushing %s (refcount = %d)...\n", strFile, nRefCount);
|
||||
if (nRefCount == 0) {
|
||||
// Move log data to the dat file
|
||||
CloseDb(strFile);
|
||||
LogPrint("db", "CDBEnv::Flush: %s checkpoint\n", strFile);
|
||||
LogPrint(BCLog::DB, "CDBEnv::Flush: %s checkpoint\n", strFile);
|
||||
dbenv->txn_checkpoint(0, 0, 0);
|
||||
LogPrint("db", "CDBEnv::Flush: %s detach\n", strFile);
|
||||
LogPrint(BCLog::DB, "CDBEnv::Flush: %s detach\n", strFile);
|
||||
if (!fMockDb)
|
||||
dbenv->lsn_reset(strFile.c_str(), 0);
|
||||
LogPrint("db", "CDBEnv::Flush: %s closed\n", strFile);
|
||||
LogPrint(BCLog::DB, "CDBEnv::Flush: %s closed\n", strFile);
|
||||
mapFileUseCount.erase(mi++);
|
||||
} else
|
||||
mi++;
|
||||
}
|
||||
LogPrint("db", "CDBEnv::Flush: Flush(%s)%s took %15dms\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started", GetTimeMillis() - nStart);
|
||||
LogPrint(BCLog::DB, "CDBEnv::Flush: Flush(%s)%s took %15dms\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started", GetTimeMillis() - nStart);
|
||||
if (fShutdown) {
|
||||
char** listp;
|
||||
if (mapFileUseCount.empty()) {
|
||||
|
@ -617,7 +617,7 @@ bool CDB::PeriodicFlush(std::string strFile)
|
|||
std::map<std::string, int>::iterator mi = bitdb.mapFileUseCount.find(strFile);
|
||||
if (mi != bitdb.mapFileUseCount.end())
|
||||
{
|
||||
LogPrint("db", "Flushing %s\n", strFile);
|
||||
LogPrint(BCLog::DB, "Flushing %s\n", strFile);
|
||||
int64_t nStart = GetTimeMillis();
|
||||
|
||||
// Flush wallet file so it's self contained
|
||||
|
@ -625,7 +625,7 @@ bool CDB::PeriodicFlush(std::string strFile)
|
|||
bitdb.CheckpointLSN(strFile);
|
||||
|
||||
bitdb.mapFileUseCount.erase(mi++);
|
||||
LogPrint("db", "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart);
|
||||
LogPrint(BCLog::DB, "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart);
|
||||
ret = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3028,7 +3028,7 @@ UniValue bumpfee(const JSONRPCRequest& request)
|
|||
// If the output would become dust, discard it (converting the dust to fee)
|
||||
poutput->nValue -= nDelta;
|
||||
if (poutput->nValue <= poutput->GetDustThreshold(::dustRelayFee)) {
|
||||
LogPrint("rpc", "Bumping fee and discarding dust output\n");
|
||||
LogPrint(BCLog::RPC, "Bumping fee and discarding dust output\n");
|
||||
nNewFee += poutput->nValue;
|
||||
tx.vout.erase(tx.vout.begin() + nOutput);
|
||||
}
|
||||
|
|
|
@ -2172,11 +2172,15 @@ bool CWallet::SelectCoinsMinConf(const CAmount& nTargetValue, const int nConfMin
|
|||
nValueRet += vValue[i].first;
|
||||
}
|
||||
|
||||
LogPrint("selectcoins", "SelectCoins() best subset: ");
|
||||
for (unsigned int i = 0; i < vValue.size(); i++)
|
||||
if (vfBest[i])
|
||||
LogPrint("selectcoins", "%s ", FormatMoney(vValue[i].first));
|
||||
LogPrint("selectcoins", "total %s\n", FormatMoney(nBest));
|
||||
if (LogAcceptCategory(BCLog::SELECTCOINS)) {
|
||||
LogPrint(BCLog::SELECTCOINS, "SelectCoins() best subset: ");
|
||||
for (unsigned int i = 0; i < vValue.size(); i++) {
|
||||
if (vfBest[i]) {
|
||||
LogPrint(BCLog::SELECTCOINS, "%s ", FormatMoney(vValue[i].first));
|
||||
}
|
||||
}
|
||||
LogPrint(BCLog::SELECTCOINS, "total %s\n", FormatMoney(nBest));
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -745,7 +745,7 @@ DBErrors CWalletDB::ZapSelectTx(std::vector<uint256>& vTxHashIn, std::vector<uin
|
|||
}
|
||||
else if ((*it) == hash) {
|
||||
if(!EraseTx(hash)) {
|
||||
LogPrint("db", "Transaction was found for deletion but returned database error: %s\n", hash.GetHex());
|
||||
LogPrint(BCLog::DB, "Transaction was found for deletion but returned database error: %s\n", hash.GetHex());
|
||||
delerror = true;
|
||||
}
|
||||
vTxHashOut.push_back(hash);
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
void zmqError(const char *str)
|
||||
{
|
||||
LogPrint("zmq", "zmq: Error: %s, errno=%s\n", str, zmq_strerror(errno));
|
||||
LogPrint(BCLog::ZMQ, "zmq: Error: %s, errno=%s\n", str, zmq_strerror(errno));
|
||||
}
|
||||
|
||||
CZMQNotificationInterface::CZMQNotificationInterface() : pcontext(NULL)
|
||||
|
@ -72,7 +72,7 @@ CZMQNotificationInterface* CZMQNotificationInterface::Create()
|
|||
// Called at startup to conditionally set up ZMQ socket(s)
|
||||
bool CZMQNotificationInterface::Initialize()
|
||||
{
|
||||
LogPrint("zmq", "zmq: Initialize notification interface\n");
|
||||
LogPrint(BCLog::ZMQ, "zmq: Initialize notification interface\n");
|
||||
assert(!pcontext);
|
||||
|
||||
pcontext = zmq_init(1);
|
||||
|
@ -89,11 +89,11 @@ bool CZMQNotificationInterface::Initialize()
|
|||
CZMQAbstractNotifier *notifier = *i;
|
||||
if (notifier->Initialize(pcontext))
|
||||
{
|
||||
LogPrint("zmq", " Notifier %s ready (address = %s)\n", notifier->GetType(), notifier->GetAddress());
|
||||
LogPrint(BCLog::ZMQ, " Notifier %s ready (address = %s)\n", notifier->GetType(), notifier->GetAddress());
|
||||
}
|
||||
else
|
||||
{
|
||||
LogPrint("zmq", " Notifier %s failed (address = %s)\n", notifier->GetType(), notifier->GetAddress());
|
||||
LogPrint(BCLog::ZMQ, " Notifier %s failed (address = %s)\n", notifier->GetType(), notifier->GetAddress());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -109,13 +109,13 @@ bool CZMQNotificationInterface::Initialize()
|
|||
// Called during shutdown sequence
|
||||
void CZMQNotificationInterface::Shutdown()
|
||||
{
|
||||
LogPrint("zmq", "zmq: Shutdown notification interface\n");
|
||||
LogPrint(BCLog::ZMQ, "zmq: Shutdown notification interface\n");
|
||||
if (pcontext)
|
||||
{
|
||||
for (std::list<CZMQAbstractNotifier*>::iterator i=notifiers.begin(); i!=notifiers.end(); ++i)
|
||||
{
|
||||
CZMQAbstractNotifier *notifier = *i;
|
||||
LogPrint("zmq", " Shutdown notifier %s at %s\n", notifier->GetType(), notifier->GetAddress());
|
||||
LogPrint(BCLog::ZMQ, " Shutdown notifier %s at %s\n", notifier->GetType(), notifier->GetAddress());
|
||||
notifier->Shutdown();
|
||||
}
|
||||
zmq_ctx_destroy(pcontext);
|
||||
|
|
|
@ -89,7 +89,7 @@ bool CZMQAbstractPublishNotifier::Initialize(void *pcontext)
|
|||
}
|
||||
else
|
||||
{
|
||||
LogPrint("zmq", "zmq: Reusing socket for address %s\n", address);
|
||||
LogPrint(BCLog::ZMQ, "zmq: Reusing socket for address %s\n", address);
|
||||
|
||||
psocket = i->second->psocket;
|
||||
mapPublishNotifiers.insert(std::make_pair(address, this));
|
||||
|
@ -119,7 +119,7 @@ void CZMQAbstractPublishNotifier::Shutdown()
|
|||
|
||||
if (count == 1)
|
||||
{
|
||||
LogPrint("zmq", "Close socket at address %s\n", address);
|
||||
LogPrint(BCLog::ZMQ, "Close socket at address %s\n", address);
|
||||
int linger = 0;
|
||||
zmq_setsockopt(psocket, ZMQ_LINGER, &linger, sizeof(linger));
|
||||
zmq_close(psocket);
|
||||
|
@ -148,7 +148,7 @@ bool CZMQAbstractPublishNotifier::SendMessage(const char *command, const void* d
|
|||
bool CZMQPublishHashBlockNotifier::NotifyBlock(const CBlockIndex *pindex)
|
||||
{
|
||||
uint256 hash = pindex->GetBlockHash();
|
||||
LogPrint("zmq", "zmq: Publish hashblock %s\n", hash.GetHex());
|
||||
LogPrint(BCLog::ZMQ, "zmq: Publish hashblock %s\n", hash.GetHex());
|
||||
char data[32];
|
||||
for (unsigned int i = 0; i < 32; i++)
|
||||
data[31 - i] = hash.begin()[i];
|
||||
|
@ -158,7 +158,7 @@ bool CZMQPublishHashBlockNotifier::NotifyBlock(const CBlockIndex *pindex)
|
|||
bool CZMQPublishHashTransactionNotifier::NotifyTransaction(const CTransaction &transaction)
|
||||
{
|
||||
uint256 hash = transaction.GetHash();
|
||||
LogPrint("zmq", "zmq: Publish hashtx %s\n", hash.GetHex());
|
||||
LogPrint(BCLog::ZMQ, "zmq: Publish hashtx %s\n", hash.GetHex());
|
||||
char data[32];
|
||||
for (unsigned int i = 0; i < 32; i++)
|
||||
data[31 - i] = hash.begin()[i];
|
||||
|
@ -167,7 +167,7 @@ bool CZMQPublishHashTransactionNotifier::NotifyTransaction(const CTransaction &t
|
|||
|
||||
bool CZMQPublishRawBlockNotifier::NotifyBlock(const CBlockIndex *pindex)
|
||||
{
|
||||
LogPrint("zmq", "zmq: Publish rawblock %s\n", pindex->GetBlockHash().GetHex());
|
||||
LogPrint(BCLog::ZMQ, "zmq: Publish rawblock %s\n", pindex->GetBlockHash().GetHex());
|
||||
|
||||
const Consensus::Params& consensusParams = Params().GetConsensus();
|
||||
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags());
|
||||
|
@ -189,7 +189,7 @@ bool CZMQPublishRawBlockNotifier::NotifyBlock(const CBlockIndex *pindex)
|
|||
bool CZMQPublishRawTransactionNotifier::NotifyTransaction(const CTransaction &transaction)
|
||||
{
|
||||
uint256 hash = transaction.GetHash();
|
||||
LogPrint("zmq", "zmq: Publish rawtx %s\n", hash.GetHex());
|
||||
LogPrint(BCLog::ZMQ, "zmq: Publish rawtx %s\n", hash.GetHex());
|
||||
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags());
|
||||
ss << transaction;
|
||||
return SendMessage(MSG_RAWTX, &(*ss.begin()), ss.size());
|
||||
|
|
Loading…
Reference in a new issue