Convert btcd to use new btclog package.

Also, make every subsystem within btcd use its own logger instance so each
subsystem can have its own level specified independent of the others.

This is work towards #48.
This commit is contained in:
Dave Collins 2013-11-21 12:03:56 -06:00
parent 41d2d36643
commit eb8688df79
12 changed files with 312 additions and 236 deletions

View file

@ -153,7 +153,7 @@ func (a *AddrManager) updateAddress(netAddr, srcAddr *btcwire.NetAddress) {
// Enforce max addresses.
if len(a.addrNew[bucket]) > newBucketSize {
log.Tracef("AMGR: new bucket is full, expiring old ")
amgrLog.Tracef("new bucket is full, expiring old ")
a.expireNew(bucket)
}
@ -161,7 +161,7 @@ func (a *AddrManager) updateAddress(netAddr, srcAddr *btcwire.NetAddress) {
ka.refs++
a.addrNew[bucket][addr] = ka
log.Tracef("AMGR: Added new address %s for a total of %d addresses",
amgrLog.Tracef("Added new address %s for a total of %d addresses",
addr, a.nTried+a.nNew)
}
@ -259,7 +259,7 @@ func (a *AddrManager) expireNew(bucket int) {
var oldest *knownAddress
for k, v := range a.addrNew[bucket] {
if bad(v) {
log.Tracef("AMGR: expiring bad address %v", k)
amgrLog.Tracef("expiring bad address %v", k)
delete(a.addrNew[bucket], k)
v.refs--
if v.refs == 0 {
@ -277,7 +277,7 @@ func (a *AddrManager) expireNew(bucket int) {
if oldest != nil {
key := NetAddressKey(oldest.na)
log.Tracef("AMGR: expiring oldest address %v", key)
amgrLog.Tracef("expiring oldest address %v", key)
delete(a.addrNew[bucket], key)
oldest.refs--
@ -393,7 +393,7 @@ out:
dumpAddressTicker.Stop()
a.savePeers()
a.wg.Done()
log.Trace("AMGR: Address handler done")
amgrLog.Trace("Address handler done")
}
type serialisedKnownAddress struct {
@ -465,7 +465,7 @@ func (a *AddrManager) savePeers() {
w, err := os.Create(filePath)
if err != nil {
log.Error("Error opening file: ", filePath, err)
amgrLog.Error("Error opening file: ", filePath, err)
return
}
enc := json.NewEncoder(w)
@ -485,19 +485,17 @@ func (a *AddrManager) loadPeers() {
err := a.deserialisePeers(filePath)
if err != nil {
log.Errorf("AMGR: Failed to parse %s: %v", filePath,
err)
amgrLog.Errorf("Failed to parse %s: %v", filePath, err)
// if it is invalid we nuke the old one unconditionally.
err = os.Remove(filePath)
if err != nil {
log.Warn("Failed to remove corrupt peers "+
amgrLog.Warn("Failed to remove corrupt peers "+
"file: ", err)
}
a.reset()
return
}
log.Infof("AMGR: Loaded %d addresses from '%s'", a.nNew+a.nTried,
filePath)
amgrLog.Infof("Loaded %d addresses from '%s'", a.nNew+a.nTried, filePath)
}
func (a *AddrManager) deserialisePeers(filePath string) error {
@ -611,7 +609,7 @@ func (a *AddrManager) Start() {
return
}
log.Trace("AMGR: Starting address manager")
amgrLog.Trace("Starting address manager")
a.wg.Add(1)
@ -625,12 +623,12 @@ func (a *AddrManager) Start() {
// Stop gracefully shuts down the address manager by stopping the main handler.
func (a *AddrManager) Stop() error {
if atomic.AddInt32(&a.shutdown, 1) != 1 {
log.Warnf("AMGR: Address manager is already in the process of " +
amgrLog.Warnf("Address manager is already in the process of " +
"shutting down")
return nil
}
log.Infof("AMGR: Address manager shutting down")
amgrLog.Infof("Address manager shutting down")
close(a.quit)
a.wg.Wait()
return nil
@ -660,7 +658,7 @@ func (a *AddrManager) AddAddressByIP(addrIP string) {
// Split IP and port
addr, portStr, err := net.SplitHostPort(addrIP)
if err != nil {
log.Warnf("AMGR: AddADddressByIP given bullshit adddress"+
amgrLog.Warnf("AddADddressByIP given bullshit adddress"+
"(%s): %v", err)
return
}
@ -669,12 +667,12 @@ func (a *AddrManager) AddAddressByIP(addrIP string) {
na.Timestamp = time.Now()
na.IP = net.ParseIP(addr)
if na.IP == nil {
log.Error("AMGR: Invalid ip address:", addr)
amgrLog.Error("Invalid ip address:", addr)
return
}
port, err := strconv.ParseUint(portStr, 10, 0)
if err != nil {
log.Error("AMGR: Invalid port: ", portStr, err)
amgrLog.Error("Invalid port: ", portStr, err)
return
}
na.Port = uint16(port)
@ -809,8 +807,8 @@ func (a *AddrManager) GetAddress(class string, newBias int) *knownAddress {
ka := e.Value.(*knownAddress)
randval := a.rand.Intn(large)
if float64(randval) < (factor * chance(ka) * float64(large)) {
log.Tracef("AMGR: Selected %v from tried "+
"bucket", NetAddressKey(ka.na))
amgrLog.Tracef("Selected %v from tried bucket",
NetAddressKey(ka.na))
return ka
}
factor *= 1.2
@ -837,7 +835,7 @@ func (a *AddrManager) GetAddress(class string, newBias int) *knownAddress {
}
randval := a.rand.Intn(large)
if float64(randval) < (factor * chance(ka) * float64(large)) {
log.Tracef("AMGR: Selected %v from new bucket",
amgrLog.Tracef("Selected %v from new bucket",
NetAddressKey(ka.na))
return ka
}
@ -972,7 +970,7 @@ func (a *AddrManager) Good(addr *btcwire.NetAddress) {
a.nNew++
rmkey := NetAddressKey(rmka.na)
log.Tracef("AMGR: replacing %s with %s in tried", rmkey, addrKey)
amgrLog.Tracef("Replacing %s with %s in tried", rmkey, addrKey)
// We made sure there is space here just above.
a.addrNew[newBucket][rmkey] = rmka

View file

@ -92,7 +92,7 @@ func (b *blockManager) startSync(peers *list.List) {
// Find the height of the current known best block.
_, height, err := b.server.db.NewestSha()
if err != nil {
log.Errorf("BMGR: %v", err)
bmgrLog.Errorf("%v", err)
return
}
@ -122,17 +122,17 @@ func (b *blockManager) startSync(peers *list.List) {
if bestPeer != nil {
locator, err := b.blockChain.LatestBlockLocator()
if err != nil {
log.Errorf("BMGR: Failed to get block locator for the "+
bmgrLog.Errorf("Failed to get block locator for the "+
"latest block: %v", err)
return
}
log.Infof("BMGR: Syncing to block height %d from peer %v",
bmgrLog.Infof("Syncing to block height %d from peer %v",
bestPeer.lastBlock, bestPeer.addr)
bestPeer.PushGetBlocksMsg(locator, &zeroHash)
b.syncPeer = bestPeer
} else {
log.Warnf("BMGR: No sync peer candidates available")
bmgrLog.Warnf("No sync peer candidates available")
}
}
@ -173,7 +173,7 @@ func (b *blockManager) handleNewPeerMsg(peers *list.List, p *peer) {
return
}
log.Infof("BMGR: New valid peer %s", p)
bmgrLog.Infof("New valid peer %s", p)
// Ignore the peer if it's not a sync candidate.
if !b.isSyncCandidate(p) {
@ -200,7 +200,7 @@ func (b *blockManager) handleDonePeerMsg(peers *list.List, p *peer) {
}
}
log.Infof("BMGR: Lost peer %s", p)
bmgrLog.Infof("Lost peer %s", p)
// Remove requested transactions from the global map so that they will
// be fetched from elsewhere next time we get an inv.
@ -257,7 +257,7 @@ func (b *blockManager) logBlockHeight(numTx, height int64, latestHash *btcwire.S
if b.receivedLogTx == 1 {
txStr = "transaction"
}
log.Infof("BMGR: Processed %d %s in the last %s (%d %s, height %d%s)",
bmgrLog.Infof("Processed %d %s in the last %s (%d %s, height %d%s)",
b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx,
txStr, height, blockTimeStr)
@ -273,7 +273,7 @@ func (b *blockManager) handleTxMsg(tmsg *txMsg) {
// If we didn't ask for this transaction then the peer is misbehaving.
if _, ok := tmsg.peer.requestedTxns[*txHash]; !ok {
log.Warnf("BMGR: Got unrequested transaction %v from %s -- "+
bmgrLog.Warnf("Got unrequested transaction %v from %s -- "+
"disconnecting", txHash, tmsg.peer.addr)
tmsg.peer.Disconnect()
return
@ -296,9 +296,9 @@ func (b *blockManager) handleTxMsg(tmsg *txMsg) {
// so log it as such. Otherwise, something really did go wrong,
// so log it as an actual error.
if _, ok := err.(TxRuleError); ok {
log.Debugf("Rejected transaction %v: %v", txHash, err)
bmgrLog.Debugf("Rejected transaction %v: %v", txHash, err)
} else {
log.Errorf("Failed to process transaction %v: %v", txHash, err)
bmgrLog.Errorf("Failed to process transaction %v: %v", txHash, err)
}
return
}
@ -343,7 +343,7 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
// mode in this case so the chain code is actually fed the
// duplicate blocks.
if !cfg.RegressionTest {
log.Warnf("BMGR: Got unrequested block %v from %s -- "+
bmgrLog.Warnf("Got unrequested block %v from %s -- "+
"disconnecting", blockSha, bmsg.peer.addr)
bmsg.peer.Disconnect()
return
@ -369,9 +369,9 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
// it as such. Otherwise, something really did go wrong, so log
// it as an actual error.
if _, ok := err.(btcchain.RuleError); ok {
log.Infof("BMGR: Rejected block %v: %v", blockSha, err)
bmgrLog.Infof("Rejected block %v: %v", blockSha, err)
} else {
log.Errorf("BMGR: Failed to process block %v: %v", blockSha, err)
bmgrLog.Errorf("Failed to process block %v: %v", blockSha, err)
}
return
}
@ -385,7 +385,7 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
// Log info about the new block height.
latestHash, height, err := b.server.db.NewestSha()
if err != nil {
log.Warnf("BMGR: Failed to obtain latest sha - %v", err)
bmgrLog.Warnf("Failed to obtain latest sha - %v", err)
return
}
b.logBlockHeight(int64(len(bmsg.block.MsgBlock().Transactions)), height,
@ -484,7 +484,7 @@ func (b *blockManager) handleInvMsg(imsg *invMsg) {
orphanRoot := chain.GetOrphanRoot(&iv.Hash)
locator, err := chain.LatestBlockLocator()
if err != nil {
log.Errorf("PEER: Failed to get block "+
bmgrLog.Errorf("PEER: Failed to get block "+
"locator for the latest block: "+
"%v", err)
continue
@ -586,7 +586,7 @@ out:
}
}
b.wg.Done()
log.Trace("BMGR: Block handler done")
bmgrLog.Trace("Block handler done")
}
// handleNotifyMsg handles notifications from btcchain. It does things such
@ -601,14 +601,14 @@ func (b *blockManager) handleNotifyMsg(notification *btcchain.Notification) {
orphanRoot := b.blockChain.GetOrphanRoot(orphanHash)
locator, err := b.blockChain.LatestBlockLocator()
if err != nil {
log.Errorf("BMGR: Failed to get block locator "+
bmgrLog.Errorf("Failed to get block locator "+
"for the latest block: %v", err)
break
}
peer.PushGetBlocksMsg(locator, orphanRoot)
delete(b.blockPeer, *orphanRoot)
} else {
log.Warnf("Notification for orphan %v with no peer",
bmgrLog.Warnf("Notification for orphan %v with no peer",
orphanHash)
}
@ -624,7 +624,7 @@ func (b *blockManager) handleNotifyMsg(notification *btcchain.Notification) {
block, ok := notification.Data.(*btcutil.Block)
if !ok {
log.Warnf("BMGR: Chain accepted notification is not a block.")
bmgrLog.Warnf("Chain accepted notification is not a block.")
break
}
@ -640,7 +640,7 @@ func (b *blockManager) handleNotifyMsg(notification *btcchain.Notification) {
case btcchain.NTBlockConnected:
block, ok := notification.Data.(*btcutil.Block)
if !ok {
log.Warnf("BMGR: Chain connected notification is not a block.")
bmgrLog.Warnf("Chain connected notification is not a block.")
break
}
@ -667,7 +667,7 @@ func (b *blockManager) handleNotifyMsg(notification *btcchain.Notification) {
case btcchain.NTBlockDisconnected:
block, ok := notification.Data.(*btcutil.Block)
if !ok {
log.Warnf("BMGR: Chain disconnected notification is not a block.")
bmgrLog.Warnf("Chain disconnected notification is not a block.")
break
}
@ -751,7 +751,7 @@ func (b *blockManager) Start() {
return
}
log.Trace("BMGR: Starting block manager")
bmgrLog.Trace("Starting block manager")
b.wg.Add(1)
go b.blockHandler()
}
@ -760,12 +760,12 @@ func (b *blockManager) Start() {
// handlers and waiting for them to finish.
func (b *blockManager) Stop() error {
if atomic.AddInt32(&b.shutdown, 1) != 1 {
log.Warnf("BMGR: Block manager is already in the process of " +
bmgrLog.Warnf("Block manager is already in the process of " +
"shutting down")
return nil
}
log.Infof("BMGR: Block manager shutting down")
bmgrLog.Infof("Block manager shutting down")
close(b.quit)
b.wg.Wait()
return nil
@ -786,16 +786,16 @@ func newBlockManager(s *server) (*blockManager, error) {
bm.blockChain = btcchain.New(s.db, s.btcnet, bm.handleNotifyMsg)
bm.blockChain.DisableCheckpoints(cfg.DisableCheckpoints)
if cfg.DisableCheckpoints {
log.Info("BMGR: Checkpoints are disabled")
bmgrLog.Info("Checkpoints are disabled")
}
log.Infof("BMGR: Generating initial block node index. This may " +
bmgrLog.Infof("Generating initial block node index. This may " +
"take a while...")
err := bm.blockChain.GenerateInitialIndex()
if err != nil {
return nil, err
}
log.Infof("BMGR: Block index generation complete")
bmgrLog.Infof("Block index generation complete")
return &bm, nil
}
@ -811,7 +811,7 @@ func removeRegressionDB(dbPath string) error {
// Remove the old regression test database if it already exists.
fi, err := os.Stat(dbPath)
if err == nil {
log.Infof("BMGR: Removing regression test database from '%s'", dbPath)
btcdLog.Infof("Removing regression test database from '%s'", dbPath)
if fi.IsDir() {
err := os.RemoveAll(dbPath)
if err != nil {
@ -863,7 +863,7 @@ func warnMultipeDBs() {
// Warn if there are extra databases.
if len(duplicateDbPaths) > 0 {
selectedDbPath := blockDbPath(cfg.DbType)
log.Warnf("WARNING: There are multiple block chain databases "+
btcdLog.Warnf("WARNING: There are multiple block chain databases "+
"using different database types.\nYou probably don't "+
"want to waste disk space by having more than one.\n"+
"Your current database is located at [%v].\nThe "+
@ -883,7 +883,7 @@ func loadBlockDB() (btcdb.Db, error) {
// each run, so remove it now if it already exists.
removeRegressionDB(dbPath)
log.Infof("BMGR: Loading block database from '%s'", dbPath)
btcdLog.Infof("Loading block database from '%s'", dbPath)
db, err := btcdb.OpenDB(cfg.DbType, dbPath)
if err != nil {
// Return the error if it's not because the database doesn't
@ -919,11 +919,11 @@ func loadBlockDB() (btcdb.Db, error) {
db.Close()
return nil, err
}
log.Infof("BMGR: Inserted genesis block %v",
btcdLog.Infof("Inserted genesis block %v",
activeNetParams.genesisHash)
height = 0
}
log.Infof("BMGR: Block database loaded with block height %d", height)
btcdLog.Infof("Block database loaded with block height %d", height)
return db, nil
}

31
btcd.go
View file

@ -21,14 +21,9 @@ var (
// btcdMain is the real main function for btcd. It is necessary to work around
// the fact that deferred functions do not run when os.Exit() is called.
func btcdMain() error {
// Initialize logging and setup deferred flushing to ensure all
// outstanding messages are written on shutdown.
loggers := setLogLevel(defaultLogLevel)
defer func() {
for _, logger := range loggers {
logger.Flush()
}
}()
// Initialize logging at the default logging level.
setLogLevels(defaultLogLevel)
defer backendLog.Flush()
// Load configuration and parse command line.
tcfg, _, err := loadConfig()
@ -39,21 +34,21 @@ func btcdMain() error {
// Change the logging level if needed.
if cfg.DebugLevel != defaultLogLevel {
loggers = setLogLevel(cfg.DebugLevel)
setLogLevels(cfg.DebugLevel)
}
// Show version at startup.
log.Infof("Version %s", version())
btcdLog.Infof("Version %s", version())
// Enable http profiling server if requested.
if cfg.Profile != "" {
go func() {
listenAddr := net.JoinHostPort("", cfg.Profile)
log.Infof("Profile server listening on %s", listenAddr)
btcdLog.Infof("Profile server listening on %s", listenAddr)
profileRedirect := http.RedirectHandler("/debug/pprof",
http.StatusSeeOther)
http.Handle("/", profileRedirect)
log.Errorf("%v", http.ListenAndServe(listenAddr, nil))
btcdLog.Errorf("%v", http.ListenAndServe(listenAddr, nil))
}()
}
@ -61,7 +56,7 @@ func btcdMain() error {
if cfg.CpuProfile != "" {
f, err := os.Create(cfg.CpuProfile)
if err != nil {
log.Errorf("Unable to create cpu profile: %v", err)
btcdLog.Errorf("Unable to create cpu profile: %v", err)
return err
}
pprof.StartCPUProfile(f)
@ -70,21 +65,21 @@ func btcdMain() error {
// Perform upgrades to btcd as new versions require it.
if err := doUpgrades(); err != nil {
log.Errorf("%v", err)
btcdLog.Errorf("%v", err)
return err
}
// Load the block database.
db, err := loadBlockDB()
if err != nil {
log.Errorf("%v", err)
btcdLog.Errorf("%v", err)
return err
}
defer db.Close()
// Ensure the database is sync'd and closed on Ctrl+C.
addInterruptHandler(func() {
log.Infof("Gracefully shutting down the database...")
btcdLog.Infof("Gracefully shutting down the database...")
db.RollbackClose()
})
@ -92,7 +87,7 @@ func btcdMain() error {
server, err := newServer(cfg.Listeners, db, activeNetParams.btcnet)
if err != nil {
// TODO(oga) this logging could do with some beautifying.
log.Errorf("Unable to start server on %v: %v",
btcdLog.Errorf("Unable to start server on %v: %v",
cfg.Listeners, err)
return err
}
@ -112,7 +107,7 @@ func btcdMain() error {
// Wait for shutdown signal from either a graceful server stop or from
// the interrupt handler.
<-shutdownChannel
log.Info("Shutdown complete")
btcdLog.Info("Shutdown complete")
return nil
}

View file

@ -189,7 +189,7 @@ func loadConfig() (*config, []string, error) {
// Create the home directory if it doesn't already exist.
err := os.MkdirAll(btcdHomeDir, 0700)
if err != nil {
log.Errorf("%v", err)
btcdLog.Errorf("%v", err)
return nil, nil, err
}
@ -218,7 +218,7 @@ func loadConfig() (*config, []string, error) {
parser.WriteHelp(os.Stderr)
return nil, nil, err
}
log.Warnf("%v", err)
btcdLog.Warnf("%v", err)
}
}

View file

@ -150,7 +150,7 @@ func torLookupIP(host, proxy string) ([]net.IP, error) {
// any hosts in the list. Therefore if all hosts failed an empty slice of
// strings will be returned.
func dnsDiscover(seeder string, proxy string) []net.IP {
log.Debugf("DISC: Fetching list of seeds from %v", seeder)
discLog.Debugf("Fetching list of seeds from %v", seeder)
peers, err := doDNSLookup(seeder, proxy)
if err != nil {
seederPlusProxy := seeder
@ -158,8 +158,8 @@ func dnsDiscover(seeder string, proxy string) []net.IP {
seederPlusProxy = fmt.Sprintf("%s (proxy %s)",
seeder, proxy)
}
log.Debugf("DISC: Unable to fetch dns seeds "+
"from %s: %v", seederPlusProxy, err)
discLog.Debugf("Unable to fetch dns seeds from %s: %v",
seederPlusProxy, err)
return []net.IP{}
}

159
log.go
View file

@ -8,6 +8,7 @@ import (
"fmt"
"github.com/conformal/btcchain"
"github.com/conformal/btcdb"
"github.com/conformal/btclog"
"github.com/conformal/btcscript"
"github.com/conformal/btcwire"
"github.com/conformal/seelog"
@ -24,10 +25,40 @@ const (
lockTimeThreshold uint32 = 5e8 // Tue Nov 5 00:53:20 1985 UTC
)
// Loggers per subsytem. Note that backendLog is a seelog logger that all of
// the subsystem loggers route their messages to. When adding new subsystems,
// add a reference here, to the subsystemLoggers map, and the useLogger
// function.
var (
log = seelog.Disabled
backendLog = seelog.Disabled
btcdLog = btclog.Disabled
bcdbLog = btclog.Disabled
chanLog = btclog.Disabled
scrpLog = btclog.Disabled
amgrLog = btclog.Disabled
bmgrLog = btclog.Disabled
discLog = btclog.Disabled
peerLog = btclog.Disabled
rpcsLog = btclog.Disabled
srvrLog = btclog.Disabled
txmpLog = btclog.Disabled
)
// subsystemLoggers maps each subsystem identifier to its associated logger.
var subsystemLoggers = map[string]btclog.Logger{
"BTCD": btcdLog,
"BCDB": bcdbLog,
"CHAN": chanLog,
"SCRP": scrpLog,
"AMGR": amgrLog,
"BMGR": bmgrLog,
"DISC": discLog,
"PEER": peerLog,
"RPCS": rpcsLog,
"SRVR": srvrLog,
"TXMP": txmpLog,
}
// logClosure is used to provide a closure over expensive logging operations
// so don't have to be performed when the logging level doesn't warrant it.
type logClosure func() string
@ -44,22 +75,67 @@ func newLogClosure(c func() string) logClosure {
return logClosure(c)
}
// newLogger creates a new seelog logger using the provided logging level and
// log message prefix.
func newLogger(level string, prefix string) seelog.LoggerInterface {
//<seelog type="adaptive" mininterval="2000000" maxinterval="100000000"
// critmsgcount="500" minlevel="%s">
// useLogger updates the logger references for subsystemID to logger. Invalid
// subsystems are ignored.
func useLogger(subsystemID string, logger btclog.Logger) {
if _, ok := subsystemLoggers[subsystemID]; !ok {
return
}
subsystemLoggers[subsystemID] = logger
fmtstring := `
<seelog type="sync" minlevel="%s">
switch subsystemID {
case "BTCD":
btcdLog = logger
case "BCDB":
bcdbLog = logger
btcdb.UseLogger(logger)
case "CHAN":
chanLog = logger
btcchain.UseLogger(logger)
case "SCRP":
scrpLog = logger
btcscript.UseLogger(logger)
case "AMGR":
amgrLog = logger
case "BMGR":
bmgrLog = logger
case "DISC":
discLog = logger
case "PEER":
peerLog = logger
case "RPCS":
rpcsLog = logger
case "SRVR":
srvrLog = logger
case "TXMP":
txmpLog = logger
}
}
// newSeelogLogger creates a new seelog logger.
func newSeelogLogger() seelog.LoggerInterface {
// <seelog type="sync" minlevel="trace">
config := `
<seelog type="adaptive" mininterval="2000000" maxinterval="100000000"
critmsgcount="500" minlevel="trace">
<outputs formatid="all">
<console/>
</outputs>
<formats>
<format id="all" format="%%Time %%Date [%%LEV] %s: %%Msg%%n" />
<format id="all" format="%Time %Date [%LEV] %Msg%n" />
</formats>
</seelog>`
config := fmt.Sprintf(fmtstring, level, prefix)
logger, err := seelog.LoggerFromConfigAsString(config)
if err != nil {
@ -70,37 +146,44 @@ func newLogger(level string, prefix string) seelog.LoggerInterface {
return logger
}
// useLogger sets the btcd logger to the passed logger.
func useLogger(logger seelog.LoggerInterface) {
log = logger
// setLogLevel sets the logging level for provided subsystem. Invalid
// subsystems are ignored. Uninitialized subsystems are dynamically created as
// needed.
func setLogLevel(subsystemID string, logLevel string) {
// Create the backend seelog logger if needed.
if backendLog == seelog.Disabled {
backendLog = newSeelogLogger()
}
// Ignore invalid subsystems.
logger, ok := subsystemLoggers[subsystemID]
if !ok {
return
}
// Default to info if the log level is invalid.
level, ok := btclog.LogLevelFromString(logLevel)
if !ok {
level = btclog.InfoLvl
}
// Create new logger for the subsystem if needed.
if logger == btclog.Disabled {
logger = btclog.NewSubsystemLogger(backendLog, subsystemID+": ")
useLogger(subsystemID, logger)
}
logger.SetLevel(level)
}
// setLogLevel sets the log level for the logging system. It initializes a
// logger for each subsystem at the provided level.
func setLogLevel(logLevel string) []seelog.LoggerInterface {
var loggers []seelog.LoggerInterface
// Define sub-systems.
subSystems := []struct {
level string
prefix string
useLogger func(seelog.LoggerInterface)
}{
{logLevel, "BTCD", useLogger},
{logLevel, "BCDB", btcdb.UseLogger},
{logLevel, "CHAN", btcchain.UseLogger},
{logLevel, "SCRP", btcscript.UseLogger},
// setLogLevels sets the log level for all subsystem loggers to the passed
// level. It also dynamically creates the subsystem loggers as needed, so it
// can be used to initialize the logging system.
func setLogLevels(logLevel string) {
// Configure all sub-systems with the new logging level. Dynamically
// create loggers as needed.
for subsystemID := range subsystemLoggers {
setLogLevel(subsystemID, logLevel)
}
// Configure all sub-systems with new loggers while keeping track of
// the created loggers to return so they can be flushed.
for _, s := range subSystems {
newLog := newLogger(s.level, s.prefix)
loggers = append(loggers, newLog)
s.useLogger(newLog)
}
return loggers
}
// directionString is a helper function that returns a string that represents

View file

@ -465,7 +465,7 @@ func (mp *txMemPool) addOrphan(tx *btcutil.Tx) {
mp.orphansByPrev[originTxHash].PushBack(tx)
}
log.Debugf("TXMP: Stored orphan transaction %v (total: %d)", tx.Sha(),
txmpLog.Debugf("Stored orphan transaction %v (total: %d)", tx.Sha(),
len(mp.orphans))
}
@ -858,7 +858,7 @@ func (mp *txMemPool) maybeAcceptTransaction(tx *btcutil.Tx, isOrphan *bool) erro
// Add to transaction pool.
mp.addTransaction(tx)
log.Debugf("TXMP: Accepted transaction %v (pool size: %v)", txHash,
txmpLog.Debugf("Accepted transaction %v (pool size: %v)", txHash,
len(mp.pool))
// TODO(davec): Notifications
@ -953,7 +953,7 @@ func (mp *txMemPool) ProcessTransaction(tx *btcutil.Tx) error {
mp.Lock()
defer mp.Unlock()
log.Tracef("TXMP: Processing transaction %v", tx.Sha())
txmpLog.Tracef("Processing transaction %v", tx.Sha())
// Potentially accept the transaction to the memory pool.
var isOrphan bool

88
peer.go
View file

@ -224,7 +224,7 @@ func (p *peer) pushVersionMsg() error {
func (p *peer) handleVersionMsg(msg *btcwire.MsgVersion) {
// Detect self connections.
if msg.Nonce == p.server.nonce {
log.Debugf("PEER: Disconnecting peer connected to self %s",
peerLog.Debugf("Disconnecting peer connected to self %s",
p.addr)
p.Disconnect()
return
@ -241,7 +241,7 @@ func (p *peer) handleVersionMsg(msg *btcwire.MsgVersion) {
// Negotiate the protocol version.
p.protocolVersion = minUint32(p.protocolVersion, uint32(msg.ProtocolVersion))
p.versionKnown = true
log.Debugf("PEER: Negotiated protocol version %d for peer %s",
peerLog.Debugf("Negotiated protocol version %d for peer %s",
p.protocolVersion, p.addr)
p.lastBlock = msg.LastBlock
@ -257,7 +257,7 @@ func (p *peer) handleVersionMsg(msg *btcwire.MsgVersion) {
// Send version.
err := p.pushVersionMsg()
if err != nil {
p.logError("PEER: Can't send version message: %v", err)
p.logError("Can't send version message: %v", err)
p.Disconnect()
return
}
@ -267,7 +267,7 @@ func (p *peer) handleVersionMsg(msg *btcwire.MsgVersion) {
// at connection time and no point recomputing.
na, err := newNetAddress(p.conn.RemoteAddr(), p.services)
if err != nil {
p.logError("PEER: Can't get remote address: %v", err)
p.logError("Can't get remote address: %v", err)
p.Disconnect()
return
}
@ -285,7 +285,7 @@ func (p *peer) handleVersionMsg(msg *btcwire.MsgVersion) {
// Advertise the local address.
na, err := newNetAddress(p.conn.LocalAddr(), p.services)
if err != nil {
p.logError("PEER: Can't advertise local "+
p.logError("Can't advertise local "+
"address: %v", err)
p.Disconnect()
return
@ -329,7 +329,7 @@ func (p *peer) pushTxMsg(sha *btcwire.ShaHash, doneChan chan bool) error {
// to fetch a missing transaction results in the same behavior.
tx, err := p.server.txMemPool.FetchTransaction(sha)
if err != nil {
log.Tracef("PEER: Unable to fetch tx %v from transaction "+
peerLog.Tracef("Unable to fetch tx %v from transaction "+
"pool: %v", sha, err)
return err
}
@ -357,7 +357,7 @@ func (p *peer) pushBlockMsg(sha *btcwire.ShaHash, doneChan chan bool) error {
blk, err := p.server.db.FetchBlockBySha(sha)
if err != nil {
log.Tracef("PEER: Unable to fetch requested block sha %v: %v",
peerLog.Tracef("Unable to fetch requested block sha %v: %v",
sha, err)
return err
}
@ -410,7 +410,7 @@ func (p *peer) PushGetBlocksMsg(locator btcchain.BlockLocator, stopHash *btcwire
beginHash != nil && stopHash.IsEqual(p.prevGetBlocksStop) &&
beginHash.IsEqual(p.prevGetBlocksBegin) {
log.Tracef("PEER: Filtering duplicate [getblocks] with begin "+
peerLog.Tracef("Filtering duplicate [getblocks] with begin "+
"hash %v, stop hash %v", beginHash, stopHash)
return nil
}
@ -492,7 +492,7 @@ func (p *peer) handleBlockMsg(msg *btcwire.MsgBlock, buf []byte) {
// Add the block to the known inventory for the peer.
hash, err := block.Sha()
if err != nil {
log.Errorf("Unable to get block hash: %v", err)
peerLog.Errorf("Unable to get block hash: %v", err)
return
}
iv := btcwire.NewInvVect(btcwire.InvTypeBlock, hash)
@ -541,7 +541,7 @@ out:
case btcwire.InvTypeBlock:
err = p.pushBlockMsg(&iv.Hash, c)
default:
log.Warnf("PEER: Unknown type in inventory request %d",
peerLog.Warnf("Unknown type in inventory request %d",
iv.Type)
break out
}
@ -607,7 +607,7 @@ func (p *peer) handleGetBlocksMsg(msg *btcwire.MsgGetBlocks) {
// Fetch the inventory from the block database.
hashList, err := p.server.db.FetchHeightRange(start, endIdx)
if err != nil {
log.Warnf("PEER: Block lookup failed: %v", err)
peerLog.Warnf("Block lookup failed: %v", err)
return
}
@ -701,7 +701,7 @@ func (p *peer) handleGetHeadersMsg(msg *btcwire.MsgGetHeaders) {
// Fetch the inventory from the block database.
hashList, err := p.server.db.FetchHeightRange(start, endIdx)
if err != nil {
log.Warnf("PEER: Header lookup failed: %v", err)
peerLog.Warnf("Header lookup failed: %v", err)
return
}
@ -715,7 +715,7 @@ func (p *peer) handleGetHeadersMsg(msg *btcwire.MsgGetHeaders) {
for _, hash := range hashList {
block, err := p.server.db.FetchBlockBySha(&hash)
if err != nil {
log.Warnf("PEER: Lookup of known block hash "+
peerLog.Warnf("Lookup of known block hash "+
"failed: %v", err)
continue
}
@ -741,7 +741,7 @@ func (p *peer) handleGetAddrMsg(msg *btcwire.MsgGetAddr) {
// Push the addresses.
err := p.pushAddrMsg(addrCache)
if err != nil {
p.logError("PEER: Can't push address message: %v", err)
p.logError("Can't push address message: %v", err)
p.Disconnect()
return
}
@ -794,7 +794,7 @@ func (p *peer) handleAddrMsg(msg *btcwire.MsgAddr) {
// A message that has no addresses is invalid.
if len(msg.AddrList) == 0 {
p.logError("PEER: Command [%s] from %s does not contain any addresses",
p.logError("Command [%s] from %s does not contain any addresses",
msg.Command(), p.addr)
p.Disconnect()
return
@ -847,20 +847,20 @@ func (p *peer) readMessage() (msg btcwire.Message, buf []byte, err error) {
// Use closures to log expensive operations so they are only run when
// the logging level requires it.
log.Debugf("%v", newLogClosure(func() string {
peerLog.Debugf("%v", newLogClosure(func() string {
// Debug summary of message.
summary := messageSummary(msg)
if len(summary) > 0 {
summary = " (" + summary + ")"
}
return fmt.Sprintf("PEER: Received %v%s from %s",
return fmt.Sprintf("Received %v%s from %s",
msg.Command(), summary, p.addr)
}))
log.Tracef("%v", newLogClosure(func() string {
return "PEER: " + spew.Sdump(msg)
peerLog.Tracef("%v", newLogClosure(func() string {
return spew.Sdump(msg)
}))
log.Tracef("%v", newLogClosure(func() string {
return "PEER: " + spew.Sdump(buf)
peerLog.Tracef("%v", newLogClosure(func() string {
return spew.Sdump(buf)
}))
return
@ -885,32 +885,32 @@ func (p *peer) writeMessage(msg btcwire.Message) {
// Use closures to log expensive operations so they are only run when
// the logging level requires it.
log.Debugf("%v", newLogClosure(func() string {
peerLog.Debugf("%v", newLogClosure(func() string {
// Debug summary of message.
summary := messageSummary(msg)
if len(summary) > 0 {
summary = " (" + summary + ")"
}
return fmt.Sprintf("PEER: Sending %v%s to %s", msg.Command(),
return fmt.Sprintf("Sending %v%s to %s", msg.Command(),
summary, p.addr)
}))
log.Tracef("%v", newLogClosure(func() string {
return "PEER: " + spew.Sdump(msg)
peerLog.Tracef("%v", newLogClosure(func() string {
return spew.Sdump(msg)
}))
log.Tracef("%v", newLogClosure(func() string {
peerLog.Tracef("%v", newLogClosure(func() string {
var buf bytes.Buffer
err := btcwire.WriteMessage(&buf, msg, p.protocolVersion, p.btcnet)
if err != nil {
return err.Error()
}
return "PEER: " + spew.Sdump(buf.Bytes())
return spew.Sdump(buf.Bytes())
}))
// Write the message to the peer.
err := btcwire.WriteMessage(p.conn, msg, p.protocolVersion, p.btcnet)
if err != nil {
p.Disconnect()
p.logError("PEER: Can't send message: %v", err)
p.logError("Can't send message: %v", err)
return
}
}
@ -948,7 +948,7 @@ func (p *peer) inHandler() {
// XXX technically very very very slightly racy, doesn't really
// matter.
if p.versionKnown {
log.Warnf("Peer %s no answer for %d minutes, "+
peerLog.Warnf("Peer %s no answer for %d minutes, "+
"disconnecting", p, idleTimeoutMinutes)
}
p.Disconnect()
@ -964,7 +964,7 @@ out:
// regression test mode and the error is one of the
// allowed errors.
if cfg.RegressionTest && p.isAllowedByRegression(err) {
log.Errorf("PEER: Allowed regression test "+
peerLog.Errorf("Allowed regression test "+
"error: %v", err)
idleTimer.Reset(idleTimeoutMinutes * time.Minute)
continue
@ -1042,7 +1042,7 @@ out:
p.handleGetHeadersMsg(msg)
default:
log.Debugf("PEER: Received unhandled message of type %v: Fix Me",
peerLog.Debugf("Received unhandled message of type %v: Fix Me",
rmsg.Command())
}
@ -1050,7 +1050,7 @@ out:
// now if one of the messages that trigger it was processed.
if markConnected && atomic.LoadInt32(&p.disconnect) == 0 {
if p.na == nil {
log.Warnf("we're getting stuff before we " +
peerLog.Warnf("we're getting stuff before we " +
"got a version message. that's bad")
continue
}
@ -1072,7 +1072,7 @@ out:
p.server.blockManager.DonePeer(p)
}
log.Tracef("PEER: Peer input handler done for %s", p.addr)
peerLog.Tracef("Peer input handler done for %s", p.addr)
}
// outHandler handles all outgoing messages for the peer. It must be run as a
@ -1083,7 +1083,7 @@ func (p *peer) outHandler() {
pingTimer := time.AfterFunc(pingTimeoutMinutes*time.Minute, func() {
nonce, err := btcwire.RandomUint64()
if err != nil {
log.Errorf("Not sending ping on timeout to %s: %v",
peerLog.Errorf("Not sending ping on timeout to %s: %v",
p, err)
return
}
@ -1192,7 +1192,7 @@ cleanup:
break cleanup
}
}
log.Tracef("PEER: Peer output handler done for %s", p.addr)
peerLog.Tracef("Peer output handler done for %s", p.addr)
}
// QueueMessage adds the passed bitcoin message to the peer send queue. It
@ -1243,7 +1243,7 @@ func (p *peer) Start() error {
return nil
}
log.Tracef("PEER: Starting peer %s", p.addr)
peerLog.Tracef("Starting peer %s", p.addr)
// Send an initial version message if this is an outbound connection.
if !p.inbound {
@ -1265,7 +1265,7 @@ func (p *peer) Start() error {
// Shutdown gracefully shuts down the peer by disconnecting it.
func (p *peer) Shutdown() {
log.Tracef("PEER: Shutdown peer %s", p.addr)
peerLog.Tracef("Shutdown peer %s", p.addr)
p.Disconnect()
}
@ -1356,11 +1356,11 @@ func newOutboundPeer(s *server, addr string, persistent bool) *peer {
// this is a persistent connection, retry after the retry
// interval.
for atomic.LoadInt32(&p.disconnect) == 0 {
log.Debugf("SRVR: Attempting to connect to %s", faddr)
srvrLog.Debugf("Attempting to connect to %s", faddr)
conn, err := dial("tcp", addr)
if err != nil {
p.retrycount += 1
log.Debugf("SRVR: Failed to connect to %s: %v",
srvrLog.Debugf("Failed to connect to %s: %v",
faddr, err)
if !persistent {
p.server.donePeers <- p
@ -1368,8 +1368,8 @@ func newOutboundPeer(s *server, addr string, persistent bool) *peer {
}
scaledInterval := connectionRetryInterval.Nanoseconds() * p.retrycount / 2
scaledDuration := time.Duration(scaledInterval)
log.Debugf("SRVR: Retrying connection to %s "+
"in %s", faddr, scaledDuration)
srvrLog.Debugf("Retrying connection to %s in "+
"%s", faddr, scaledDuration)
time.Sleep(scaledDuration)
continue
}
@ -1382,7 +1382,7 @@ func newOutboundPeer(s *server, addr string, persistent bool) *peer {
p.server.addrManager.Attempt(p.na)
// Connection was successful so log it and start peer.
log.Debugf("SRVR: Connected to %s",
srvrLog.Debugf("Connected to %s",
conn.RemoteAddr())
p.conn = conn
atomic.AddInt32(&p.connected, 1)
@ -1399,8 +1399,8 @@ func newOutboundPeer(s *server, addr string, persistent bool) *peer {
// logError makes sure that we only log errors loudly on user peers.
func (p *peer) logError(fmt string, args ...interface{}) {
if p.persistent {
log.Errorf(fmt, args...)
peerLog.Errorf(fmt, args...)
} else {
log.Debugf(fmt, args...)
peerLog.Debugf(fmt, args...)
}
}

View file

@ -272,7 +272,7 @@ func (s *rpcServer) Start() {
return
}
log.Trace("RPCS: Starting RPC server")
rpcsLog.Trace("Starting RPC server")
rpcServeMux := http.NewServeMux()
httpServer := &http.Server{Handler: rpcServeMux}
rpcServeMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
@ -282,7 +282,7 @@ func (s *rpcServer) Start() {
if len(authhdr) > 0 && authhdr[0] == auth {
jsonRPCRead(w, r, s)
} else {
log.Warnf("RPCS: Auth failure.")
rpcsLog.Warnf("Auth failure.")
jsonAuthFail(w, r, s)
}
})
@ -305,9 +305,9 @@ func (s *rpcServer) Start() {
for _, listener := range s.listeners {
s.wg.Add(1)
go func(listener net.Listener) {
log.Infof("RPCS: RPC server listening on %s", listener.Addr())
rpcsLog.Infof("RPC server listening on %s", listener.Addr())
httpServer.Serve(listener)
log.Tracef("RPCS: RPC listener done for %s", listener.Addr())
rpcsLog.Tracef("RPC listener done for %s", listener.Addr())
s.wg.Done()
}(listener)
}
@ -316,18 +316,18 @@ func (s *rpcServer) Start() {
// Stop is used by server.go to stop the rpc listener.
func (s *rpcServer) Stop() error {
if atomic.AddInt32(&s.shutdown, 1) != 1 {
log.Infof("RPCS: RPC server is already in the process of shutting down")
rpcsLog.Infof("RPC server is already in the process of shutting down")
return nil
}
log.Warnf("RPCS: RPC server shutting down")
rpcsLog.Warnf("RPC server shutting down")
for _, listener := range s.listeners {
err := listener.Close()
if err != nil {
log.Errorf("RPCS: Problem shutting down rpc: %v", err)
rpcsLog.Errorf("Problem shutting down rpc: %v", err)
return err
}
}
log.Infof("RPCS: RPC server shutdown complete")
rpcsLog.Infof("RPC server shutdown complete")
s.wg.Wait()
close(s.quit)
return nil
@ -336,7 +336,7 @@ func (s *rpcServer) Stop() error {
// genkey generates a key/cert pair to the paths provided.
// TODO(oga) wrap errors with fmt.Errorf for more context?
func genKey(key, cert string) error {
log.Infof("RPCS: Generating TLS certificates...")
rpcsLog.Infof("Generating TLS certificates...")
priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
if err != nil {
return err
@ -419,7 +419,7 @@ func genKey(key, cert string) error {
pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keybytes})
keyOut.Close()
log.Infof("RPCS: Done generating TLS certificates")
rpcsLog.Infof("Done generating TLS certificates")
return nil
}
@ -470,7 +470,7 @@ func newRPCServer(listenAddrs []string, s *server) (*rpcServer, error) {
var listener net.Listener
listener, err = tls.Listen("tcp4", addr, &tlsConfig)
if err != nil {
log.Warnf("RPCS: Can't listen on %s: %v", addr,
rpcsLog.Warnf("Can't listen on %s: %v", addr,
err)
continue
}
@ -481,7 +481,7 @@ func newRPCServer(listenAddrs []string, s *server) (*rpcServer, error) {
var listener net.Listener
listener, err = tls.Listen("tcp6", addr, &tlsConfig)
if err != nil {
log.Warnf("RPCS: Can't listen on %s: %v", addr,
rpcsLog.Warnf("Can't listen on %s: %v", addr,
err)
continue
}
@ -510,7 +510,7 @@ func jsonRPCRead(w http.ResponseWriter, r *http.Request, s *rpcServer) {
}
body, err := btcjson.GetRaw(r.Body)
if err != nil {
log.Errorf("RPCS: Error getting json message: %v", err)
rpcsLog.Errorf("Error getting json message: %v", err)
return
}
@ -528,14 +528,14 @@ func jsonRPCRead(w http.ResponseWriter, r *http.Request, s *rpcServer) {
reply = standardCmdReply(cmd, s, nil)
}
log.Tracef("[RPCS] reply: %v", reply)
rpcsLog.Tracef("reply: %v", reply)
msg, err := btcjson.MarshallAndSend(reply, w)
if err != nil {
log.Errorf(msg)
rpcsLog.Errorf(msg)
return
}
log.Debugf(msg)
rpcsLog.Debugf(msg)
}
// TODO(jrick): Remove the wallet notification chan.
@ -674,7 +674,7 @@ func handleGetBestBlockHash(s *rpcServer, cmd btcjson.Cmd, walletNotification ch
var sha *btcwire.ShaHash
sha, _, err := s.server.db.NewestSha()
if err != nil {
log.Errorf("RPCS: Error getting newest sha: %v", err)
rpcsLog.Errorf("Error getting newest sha: %v", err)
return nil, btcjson.ErrBestBlockHash
}
@ -686,18 +686,18 @@ func handleGetBlock(s *rpcServer, cmd btcjson.Cmd, walletNotification chan []byt
c := cmd.(*btcjson.GetBlockCmd)
sha, err := btcwire.NewShaHashFromStr(c.Hash)
if err != nil {
log.Errorf("RPCS: Error generating sha: %v", err)
rpcsLog.Errorf("Error generating sha: %v", err)
return nil, btcjson.ErrBlockNotFound
}
blk, err := s.server.db.FetchBlockBySha(sha)
if err != nil {
log.Errorf("RPCS: Error fetching sha: %v", err)
rpcsLog.Errorf("Error fetching sha: %v", err)
return nil, btcjson.ErrBlockNotFound
}
idx := blk.Height()
buf, err := blk.Bytes()
if err != nil {
log.Errorf("RPCS: Error fetching block: %v", err)
rpcsLog.Errorf("Error fetching block: %v", err)
return nil, btcjson.ErrBlockNotFound
}
@ -710,7 +710,7 @@ func handleGetBlock(s *rpcServer, cmd btcjson.Cmd, walletNotification chan []byt
_, maxidx, err := s.server.db.NewestSha()
if err != nil {
log.Errorf("RPCS: Cannot get newest sha: %v", err)
rpcsLog.Errorf("Cannot get newest sha: %v", err)
return nil, btcjson.ErrBlockNotFound
}
@ -735,7 +735,7 @@ func handleGetBlock(s *rpcServer, cmd btcjson.Cmd, walletNotification chan []byt
var shaNext *btcwire.ShaHash
shaNext, err = s.server.db.FetchBlockShaByHeight(int64(idx + 1))
if err != nil {
log.Errorf("RPCS: No next block: %v", err)
rpcsLog.Errorf("No next block: %v", err)
return nil, btcjson.ErrBlockNotFound
}
blockReply.NextHash = shaNext.String()
@ -748,7 +748,7 @@ func handleGetBlock(s *rpcServer, cmd btcjson.Cmd, walletNotification chan []byt
func handleGetBlockCount(s *rpcServer, cmd btcjson.Cmd, walletNotification chan []byte) (interface{}, error) {
_, maxidx, err := s.server.db.NewestSha()
if err != nil {
log.Errorf("RPCS: Error getting newest sha: %v", err)
rpcsLog.Errorf("Error getting newest sha: %v", err)
return nil, btcjson.ErrBlockCount
}
@ -760,7 +760,7 @@ func handleGetBlockHash(s *rpcServer, cmd btcjson.Cmd, walletNotification chan [
c := cmd.(*btcjson.GetBlockHashCmd)
sha, err := s.server.db.FetchBlockShaByHeight(c.Index)
if err != nil {
log.Errorf("[RCPS] Error getting block: %v", err)
rpcsLog.Errorf("Error getting block: %v", err)
return nil, btcjson.ErrOutOfRange
}
@ -776,12 +776,12 @@ func handleGetConnectionCount(s *rpcServer, cmd btcjson.Cmd, walletNotification
func handleGetDifficulty(s *rpcServer, cmd btcjson.Cmd, walletNotification chan []byte) (interface{}, error) {
sha, _, err := s.server.db.NewestSha()
if err != nil {
log.Errorf("RPCS: Error getting sha: %v", err)
rpcsLog.Errorf("Error getting sha: %v", err)
return nil, btcjson.ErrDifficulty
}
blk, err := s.server.db.FetchBlockBySha(sha)
if err != nil {
log.Errorf("RPCS: Error getting block: %v", err)
rpcsLog.Errorf("Error getting block: %v", err)
return nil, btcjson.ErrDifficulty
}
blockHeader := &blk.MsgBlock().Header
@ -829,7 +829,7 @@ func handleGetRawTransaction(s *rpcServer, cmd btcjson.Cmd, walletNotification c
if err != nil {
txList, err := s.server.db.FetchTxBySha(txSha)
if err != nil || len(txList) == 0 {
log.Errorf("RPCS: Error fetching tx: %v", err)
rpcsLog.Errorf("Error fetching tx: %v", err)
return nil, btcjson.ErrNoTxInfo
}
@ -852,7 +852,7 @@ func handleGetRawTransaction(s *rpcServer, cmd btcjson.Cmd, walletNotification c
disbuf, _ := btcscript.DisasmString(v.SignatureScript)
vinList[i].ScriptSig.Asm = strings.Replace(disbuf, " ", "", -1)
vinList[i].Vout = i + 1
log.Debugf(disbuf)
rpcsLog.Debugf(disbuf)
}
for i, v := range txOutList {
@ -864,7 +864,7 @@ func handleGetRawTransaction(s *rpcServer, cmd btcjson.Cmd, walletNotification c
_, addrhash, err := btcscript.ScriptToAddrHash(v.PkScript)
if err != nil {
// TODO: set and return error?
log.Errorf("RPCS: Error getting address hash for %v: %v", txSha, err)
rpcsLog.Errorf("Error getting address hash for %v: %v", txSha, err)
}
if addr, err := btcutil.EncodeAddress(addrhash, s.server.btcnet); err == nil {
// TODO: set and return error?
@ -884,14 +884,14 @@ func handleGetRawTransaction(s *rpcServer, cmd btcjson.Cmd, walletNotification c
if blksha != nil {
blk, err := s.server.db.FetchBlockBySha(blksha)
if err != nil {
log.Errorf("RPCS: Error fetching sha: %v", err)
rpcsLog.Errorf("Error fetching sha: %v", err)
return nil, btcjson.ErrBlockNotFound
}
idx := blk.Height()
_, maxidx, err := s.server.db.NewestSha()
if err != nil {
log.Errorf("RPCS: Cannot get newest sha: %v", err)
rpcsLog.Errorf("Cannot get newest sha: %v", err)
return nil, btcjson.ErrNoNewestBlockInfo
}
@ -937,10 +937,10 @@ func handleSendRawTransaction(s *rpcServer, cmd btcjson.Cmd, walletNotification
// so log it as such. Otherwise, something really did go wrong,
// so log it as an actual error.
if _, ok := err.(TxRuleError); ok {
log.Debugf("RPCS: Rejected transaction %v: %v", tx.Sha(),
rpcsLog.Debugf("Rejected transaction %v: %v", tx.Sha(),
err)
} else {
log.Errorf("RPCS: Failed to process transaction %v: %v",
rpcsLog.Errorf("Failed to process transaction %v: %v",
tx.Sha(), err)
err = btcjson.Error{
Code: btcjson.ErrDeserialization.Code,
@ -974,7 +974,7 @@ func handleStop(s *rpcServer, cmd btcjson.Cmd, walletNotification chan []byte) (
func verifyChain(db btcdb.Db, level, depth int32) error {
_, curheight64, err := db.NewestSha()
if err != nil {
log.Errorf("RPCS: verify is unable to fetch current block "+
rpcsLog.Errorf("Verify is unable to fetch current block "+
"height: %v", err)
}
@ -988,14 +988,14 @@ func verifyChain(db btcdb.Db, level, depth int32) error {
// Level 0 just looks up the block.
sha, err := db.FetchBlockShaByHeight(int64(height))
if err != nil {
log.Errorf("RPCS: verify is unable to fetch block at "+
rpcsLog.Errorf("Verify is unable to fetch block at "+
"height %d: %v", height, err)
return err
}
block, err := db.FetchBlockBySha(sha)
if err != nil {
log.Errorf("RPCS: verify is unable to fetch block at "+
rpcsLog.Errorf("Verify is unable to fetch block at "+
"sha %v height %d: %v", sha, height, err)
return err
}
@ -1005,14 +1005,14 @@ func verifyChain(db btcdb.Db, level, depth int32) error {
err := btcchain.CheckBlockSanity(block,
activeNetParams.powLimit)
if err != nil {
log.Errorf("RPCS: verify is unable to "+
rpcsLog.Errorf("Verify is unable to "+
"validate block at sha %v height "+
"%s: %v", sha, height, err)
return err
}
}
}
log.Infof("RPCS: Chain verify completed successfully")
rpcsLog.Infof("Chain verify completed successfully")
return nil
}
@ -1181,7 +1181,7 @@ func handleRescan(s *rpcServer, cmd btcjson.Cmd,
return btcjson.ErrInternal
}
log.Debugf("RPCS: Begining rescan")
rpcsLog.Debugf("Begining rescan")
minblock := int64(rescanCmd.BeginBlock)
maxblock := int64(rescanCmd.EndBlock)
@ -1219,7 +1219,7 @@ func handleRescan(s *rpcServer, cmd btcjson.Cmd,
}
txaddr, err := btcutil.EncodeAddress(txaddrhash, s.server.btcnet)
if err != nil {
log.Errorf("Error encoding address: %v", err)
rpcsLog.Errorf("Error encoding address: %v", err)
return err
}
if _, ok := rescanCmd.Addresses[txaddr]; ok {
@ -1261,7 +1261,7 @@ func handleRescan(s *rpcServer, cmd btcjson.Cmd,
mreply, _ := json.Marshal(reply)
walletNotification <- mreply
log.Debug("RPCS: Finished rescan")
rpcsLog.Debug("Finished rescan")
return nil
}
@ -1327,7 +1327,7 @@ func getDifficultyRatio(bits uint32) float64 {
outString := difficulty.FloatString(2)
diff, err := strconv.ParseFloat(outString, 64)
if err != nil {
log.Errorf("RPCS: Cannot get difficulty: %v", err)
rpcsLog.Errorf("Cannot get difficulty: %v", err)
return 0
}
return diff
@ -1476,7 +1476,7 @@ func (s *rpcServer) websocketJSONHandler(walletNotification chan []byte,
func (s *rpcServer) NotifyBlockConnected(block *btcutil.Block) {
hash, err := block.Sha()
if err != nil {
log.Error("Bad block; connected block notification dropped.")
rpcsLog.Error("Bad block; connected block notification dropped.")
return
}
@ -1511,7 +1511,7 @@ func (s *rpcServer) NotifyBlockConnected(block *btcutil.Block) {
func (s *rpcServer) NotifyBlockDisconnected(block *btcutil.Block) {
hash, err := block.Sha()
if err != nil {
log.Error("Bad block; connected block notification dropped.")
rpcsLog.Error("Bad block; connected block notification dropped.")
return
}
@ -1540,7 +1540,7 @@ func notifySpentData(ctx *notificationCtx, txhash *btcwire.ShaHash, index uint32
err := spender.MsgTx().Serialize(&buf)
if err != nil {
// This really shouldn't ever happen...
log.Warnf("RPCS: Can't serialize tx: %v", err)
rpcsLog.Warnf("Can't serialize tx: %v", err)
return
}
txStr = string(buf.Bytes())
@ -1561,7 +1561,7 @@ func notifySpentData(ctx *notificationCtx, txhash *btcwire.ShaHash, index uint32
}
replyBytes, err := json.Marshal(reply)
if err != nil {
log.Errorf("RPCS: Unable to marshal spent notification: %v", err)
rpcsLog.Errorf("Unable to marshal spent notification: %v", err)
return
}
ctx.connection <- replyBytes
@ -1595,7 +1595,7 @@ func (s *rpcServer) newBlockNotifyCheckTxOut(block *btcutil.Block,
for i, txout := range tx.MsgTx().TxOut {
_, txaddrhash, err := btcscript.ScriptToAddrHash(txout.PkScript)
if err != nil {
log.Debug("Error getting payment address from tx; dropping any Tx notifications.")
rpcsLog.Debug("Error getting payment address from tx; dropping any Tx notifications.")
break
}
if idlist, ok := s.ws.txNotifications[string(txaddrhash)]; ok {
@ -1604,12 +1604,12 @@ func (s *rpcServer) newBlockNotifyCheckTxOut(block *btcutil.Block,
blkhash, err := block.Sha()
if err != nil {
log.Error("Error getting block sha; dropping Tx notification.")
rpcsLog.Error("Error getting block sha; dropping Tx notification.")
break
}
txaddr, err := btcutil.EncodeAddress(txaddrhash, s.server.btcnet)
if err != nil {
log.Error("Error encoding address; dropping Tx notification.")
rpcsLog.Error("Error encoding address; dropping Tx notification.")
break
}
reply := &btcjson.Reply{
@ -1637,7 +1637,7 @@ func (s *rpcServer) newBlockNotifyCheckTxOut(block *btcutil.Block,
}
replyBytes, err := json.Marshal(reply)
if err != nil {
log.Errorf("RPCS: Unable to marshal tx notification: %v", err)
rpcsLog.Errorf("Unable to marshal tx notification: %v", err)
continue
}
ctx.connection <- replyBytes

View file

@ -101,7 +101,7 @@ func (s *server) handleAddPeerMsg(state *peerState, p *peer) bool {
// Ignore new peers if we're shutting down.
if atomic.LoadInt32(&s.shutdown) != 0 {
log.Infof("SRVR: New peer %s ignored - server is shutting "+
srvrLog.Infof("New peer %s ignored - server is shutting "+
"down", p)
p.Shutdown()
return false
@ -110,19 +110,19 @@ func (s *server) handleAddPeerMsg(state *peerState, p *peer) bool {
// Disconnect banned peers.
host, _, err := net.SplitHostPort(p.addr)
if err != nil {
log.Debugf("SRVR: can't split hostport %v", err)
srvrLog.Debugf("can't split hostport %v", err)
p.Shutdown()
return false
}
if banEnd, ok := state.banned[host]; ok {
if time.Now().Before(banEnd) {
log.Debugf("SRVR: Peer %s is banned for another %v - "+
srvrLog.Debugf("Peer %s is banned for another %v - "+
"disconnecting", host, banEnd.Sub(time.Now()))
p.Shutdown()
return false
}
log.Infof("SRVR: Peer %s is no longer banned", host)
srvrLog.Infof("Peer %s is no longer banned", host)
delete(state.banned, host)
}
@ -130,7 +130,7 @@ func (s *server) handleAddPeerMsg(state *peerState, p *peer) bool {
// Limit max number of total peers.
if state.Count() >= cfg.MaxPeers {
log.Infof("SRVR: Max peers reached [%d] - disconnecting "+
srvrLog.Infof("Max peers reached [%d] - disconnecting "+
"peer %s", cfg.MaxPeers, p)
p.Shutdown()
// TODO(oga) how to handle permanent peers here?
@ -139,7 +139,7 @@ func (s *server) handleAddPeerMsg(state *peerState, p *peer) bool {
}
// Add the new peer and start it.
log.Debugf("SRVR: New peer %s", p)
srvrLog.Debugf("New peer %s", p)
if p.inbound {
state.peers.PushBack(p)
p.Start()
@ -179,7 +179,7 @@ func (s *server) handleDonePeerMsg(state *peerState, p *peer) {
state.outboundGroups[GroupKey(p.na)]--
}
list.Remove(e)
log.Debugf("SRVR: Removed peer %s", p)
srvrLog.Debugf("Removed peer %s", p)
return
}
}
@ -192,11 +192,11 @@ func (s *server) handleDonePeerMsg(state *peerState, p *peer) {
func (s *server) handleBanPeerMsg(state *peerState, p *peer) {
host, _, err := net.SplitHostPort(p.addr)
if err != nil {
log.Debugf("SRVR: can't split ban peer %s %v", p.addr, err)
srvrLog.Debugf("can't split ban peer %s %v", p.addr, err)
return
}
direction := directionString(p.inbound)
log.Infof("SRVR: Banned peer %s (%s) for %v", host, direction,
srvrLog.Infof("Banned peer %s (%s) for %v", host, direction,
cfg.BanDuration)
state.banned[host] = time.Now().Add(cfg.BanDuration)
@ -382,13 +382,13 @@ func (s *server) handleQuery(querymsg interface{}, state *peerState) {
// listenHandler is the main listener which accepts incoming connections for the
// server. It must be run as a goroutine.
func (s *server) listenHandler(listener net.Listener) {
log.Infof("SRVR: Server listening on %s", listener.Addr())
srvrLog.Infof("Server listening on %s", listener.Addr())
for atomic.LoadInt32(&s.shutdown) == 0 {
conn, err := listener.Accept()
if err != nil {
// Only log the error if we're not forcibly shutting down.
if atomic.LoadInt32(&s.shutdown) == 0 {
log.Errorf("SRVR: can't accept connection: %v",
srvrLog.Errorf("can't accept connection: %v",
err)
}
continue
@ -396,7 +396,7 @@ func (s *server) listenHandler(listener net.Listener) {
s.AddPeer(newInboundPeer(s, conn))
}
s.wg.Done()
log.Tracef("SRVR: Listener handler done for %s", listener.Addr())
srvrLog.Tracef("Listener handler done for %s", listener.Addr())
}
// seedFromDNS uses DNS seeding to populate the address manager with peers.
@ -452,7 +452,7 @@ func (s *server) peerHandler() {
s.addrManager.Start()
s.blockManager.Start()
log.Tracef("SRVR: Starting peer handler")
srvrLog.Tracef("Starting peer handler")
state := &peerState{
peers: list.New(),
persistentPeers: list.New(),
@ -595,7 +595,7 @@ out:
s.blockManager.Stop()
s.addrManager.Stop()
s.wg.Done()
log.Tracef("SRVR: Peer handler done")
srvrLog.Tracef("Peer handler done")
}
// AddPeer adds a new peer that has already been connected to the server.
@ -670,7 +670,7 @@ func (s *server) Start() {
return
}
log.Trace("SRVR: Starting server")
srvrLog.Trace("Starting server")
// Start all the listeners. There will not be any if listening is
// disabled.
@ -695,11 +695,11 @@ func (s *server) Start() {
func (s *server) Stop() error {
// Make sure this only happens once.
if atomic.AddInt32(&s.shutdown, 1) != 1 {
log.Infof("SRVR: Server is already in the process of shutting down")
srvrLog.Infof("Server is already in the process of shutting down")
return nil
}
log.Warnf("SRVR: Server shutting down")
srvrLog.Warnf("Server shutting down")
// Stop all the listeners. There will not be any listeners if
// listening is disabled.
@ -723,7 +723,7 @@ func (s *server) Stop() error {
// WaitForShutdown blocks until the main listener and peer handlers are stopped.
func (s *server) WaitForShutdown() {
s.wg.Wait()
log.Infof("SRVR: Server shutdown complete")
srvrLog.Infof("Server shutdown complete")
}
// ScheduleShutdown schedules a server shutdown after the specified duration.
@ -734,7 +734,7 @@ func (s *server) ScheduleShutdown(duration time.Duration) {
if atomic.AddInt32(&s.shutdownSched, 1) != 1 {
return
}
log.Warnf("SRVR: Server shutdown in %v", duration)
srvrLog.Warnf("Server shutdown in %v", duration)
go func() {
remaining := duration
tickDuration := dynamicTickDuration(remaining)
@ -760,7 +760,7 @@ func (s *server) ScheduleShutdown(duration time.Duration) {
ticker.Stop()
ticker = time.NewTicker(tickDuration)
}
log.Warnf("SRVR: Server shutdown in %v", remaining)
srvrLog.Warnf("Server shutdown in %v", remaining)
}
}
}()
@ -826,7 +826,7 @@ func newServer(listenAddrs []string, db btcdb.Db, btcnet btcwire.BitcoinNet) (*s
for _, addr := range ipv4Addrs {
listener, err := net.Listen("tcp4", addr)
if err != nil {
log.Warnf("SRVR: Can't listen on %s: %v", addr,
srvrLog.Warnf("Can't listen on %s: %v", addr,
err)
continue
}
@ -836,7 +836,7 @@ func newServer(listenAddrs []string, db btcdb.Db, btcnet btcwire.BitcoinNet) (*s
for _, addr := range ipv6Addrs {
listener, err := net.Listen("tcp6", addr)
if err != nil {
log.Warnf("SRVR: Can't listen on %s: %v", addr,
srvrLog.Warnf("Can't listen on %s: %v", addr,
err)
continue
}

View file

@ -27,7 +27,7 @@ func mainInterruptHandler() {
for {
select {
case <-interruptChannel:
log.Infof("Received SIGINT (Ctrl+C). Shutting down...")
btcdLog.Infof("Received SIGINT (Ctrl+C). Shutting down...")
for _, callback := range interruptCallbacks {
callback()
}

View file

@ -119,7 +119,7 @@ func upgradeDataPaths() error {
// Only migrate if the old path exists and the new one doesn't.
if fileExists(oldHomePath) && !fileExists(newHomePath) {
// Create the new path.
log.Infof("Migrating application home path from '%s' to '%s'",
btcdLog.Infof("Migrating application home path from '%s' to '%s'",
oldHomePath, newHomePath)
err := os.MkdirAll(newHomePath, 0700)
if err != nil {
@ -157,7 +157,7 @@ func upgradeDataPaths() error {
return err
}
} else {
log.Warnf("Not removing '%s' since it contains files "+
btcdLog.Warnf("Not removing '%s' since it contains files "+
"not created by this application. You may "+
"want to manually move them or delete them.",
oldHomePath)