lbcd/config.go

1187 lines
46 KiB
Go
Raw Normal View History

// Copyright (c) 2013-2017 The btcsuite developers
2013-08-06 23:55:22 +02:00
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
2016-06-04 04:14:15 +02:00
"bufio"
"crypto/rand"
"encoding/base64"
"errors"
2013-08-06 23:55:22 +02:00
"fmt"
2016-06-04 04:14:15 +02:00
"io"
2013-08-06 23:55:22 +02:00
"net"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
2013-08-06 23:55:22 +02:00
"time"
2014-07-02 15:50:08 +02:00
"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/connmgr"
"github.com/btcsuite/btcd/database"
_ "github.com/btcsuite/btcd/database/ffldb"
mempool: Refactor mempool code to its own package. (#737) This does the minimum work necessary to refactor the mempool code into its own package. The idea is that separating this code into its own package will greatly improve its testability, allow independent benchmarking and profiling, and open up some interesting opportunities for future development related to the memory pool. There are likely some areas related to policy that could be further refactored, however it is better to do that in future commits in order to keep the changeset as small as possible during this refactor. Overview of the major changes: - Create the new package - Move several files into the new package: - mempool.go -> mempool/mempool.go - mempoolerror.go -> mempool/error.go - policy.go -> mempool/policy.go - policy_test.go -> mempool/policy_test.go - Update mempool logging to use the new mempool package logger - Rename mempoolPolicy to Policy (so it's now mempool.Policy) - Rename mempoolConfig to Config (so it's now mempool.Config) - Rename mempoolTxDesc to TxDesc (so it's now mempool.TxDesc) - Rename txMemPool to TxPool (so it's now mempool.TxPool) - Move defaultBlockPrioritySize to the new package and export it - Export DefaultMinRelayTxFee from the mempool package - Export the CalcPriority function from the mempool package - Introduce a new RawMempoolVerbose function on the TxPool and update the RPC server to use it - Update all references to the mempool to use the package. - Add a skeleton README.md
2016-08-19 18:08:37 +02:00
"github.com/btcsuite/btcd/mempool"
"github.com/btcsuite/btcd/peer"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/go-socks/socks"
flags "github.com/jessevdk/go-flags"
2013-08-06 23:55:22 +02:00
)
const (
defaultConfigFilename = "btcd.conf"
defaultDataDirname = "data"
defaultLogLevel = "info"
defaultLogDirname = "logs"
defaultLogFilename = "btcd.log"
defaultMaxPeers = 125
defaultBanDuration = time.Hour * 24
defaultBanThreshold = 100
defaultConnectTimeout = time.Second * 30
defaultMaxRPCClients = 10
defaultMaxRPCWebsockets = 25
defaultMaxRPCConcurrentReqs = 20
blockchain: Rework to use new db interface. This commit is the first stage of several that are planned to convert the blockchain package into a concurrent safe package that will ultimately allow support for multi-peer download and concurrent chain processing. The goal is to update btcd proper after each step so it can take advantage of the enhancements as they are developed. In addition to the aforementioned benefit, this staged approach has been chosen since it is absolutely critical to maintain consensus. Separating the changes into several stages makes it easier for reviewers to logically follow what is happening and therefore helps prevent consensus bugs. Naturally there are significant automated tests to help prevent consensus issues as well. The main focus of this stage is to convert the blockchain package to use the new database interface and implement the chain-related functionality which it no longer handles. It also aims to improve efficiency in various areas by making use of the new database and chain capabilities. The following is an overview of the chain changes: - Update to use the new database interface - Add chain-related functionality that the old database used to handle - Main chain structure and state - Transaction spend tracking - Implement a new pruned unspent transaction output (utxo) set - Provides efficient direct access to the unspent transaction outputs - Uses a domain specific compression algorithm that understands the standard transaction scripts in order to significantly compress them - Removes reliance on the transaction index and paves the way toward eventually enabling block pruning - Modify the New function to accept a Config struct instead of inidividual parameters - Replace the old TxStore type with a new UtxoViewpoint type that makes use of the new pruned utxo set - Convert code to treat the new UtxoViewpoint as a rolling view that is used between connects and disconnects to improve efficiency - Make best chain state always set when the chain instance is created - Remove now unnecessary logic for dealing with unset best state - Make all exported functions concurrent safe - Currently using a single chain state lock as it provides a straight forward and easy to review path forward however this can be improved with more fine grained locking - Optimize various cases where full blocks were being loaded when only the header is needed to help reduce the I/O load - Add the ability for callers to get a snapshot of the current best chain stats in a concurrent safe fashion - Does not block callers while new blocks are being processed - Make error messages that reference transaction outputs consistently use <transaction hash>:<output index> - Introduce a new AssertError type an convert internal consistency checks to use it - Update tests and examples to reflect the changes - Add a full suite of tests to ensure correct functionality of the new code The following is an overview of the btcd changes: - Update to use the new database and chain interfaces - Temporarily remove all code related to the transaction index - Temporarily remove all code related to the address index - Convert all code that uses transaction stores to use the new utxo view - Rework several calls that required the block manager for safe concurrency to use the chain package directly now that it is concurrent safe - Change all calls to obtain the best hash to use the new best state snapshot capability from the chain package - Remove workaround for limits on fetching height ranges since the new database interface no longer imposes them - Correct the gettxout RPC handler to return the best chain hash as opposed the hash the txout was found in - Optimize various RPC handlers: - Change several of the RPC handlers to use the new chain snapshot capability to avoid needlessly loading data - Update several handlers to use new functionality to avoid accessing the block manager so they are able to return the data without blocking when the server is busy processing blocks - Update non-verbose getblock to avoid deserialization and serialization overhead - Update getblockheader to request the block height directly from chain and only load the header - Update getdifficulty to use the new cached data from chain - Update getmininginfo to use the new cached data from chain - Update non-verbose getrawtransaction to avoid deserialization and serialization overhead - Update gettxout to use the new utxo store versus loading full transactions using the transaction index The following is an overview of the utility changes: - Update addblock to use the new database and chain interfaces - Update findcheckpoint to use the new database and chain interfaces - Remove the dropafter utility which is no longer supported NOTE: The transaction index and address index will be reimplemented in another commit.
2015-08-26 06:03:18 +02:00
defaultDbType = "ffldb"
defaultFreeTxRelayLimit = 15.0
defaultTrickleInterval = peer.DefaultTrickleInterval
defaultBlockMinSize = 0
defaultBlockMaxSize = 750000
defaultBlockMinWeight = 0
defaultBlockMaxWeight = 3000000
blockMaxSizeMin = 1000
blockMaxSizeMax = blockchain.MaxBlockBaseSize - 1000
blockMaxWeightMin = 4000
blockMaxWeightMax = blockchain.MaxBlockWeight - 4000
defaultGenerate = false
2016-10-25 07:07:21 +02:00
defaultMaxOrphanTransactions = 100
defaultMaxOrphanTxSize = 100000
defaultSigCacheMaxSize = 100000
sampleConfigFilename = "sample-btcd.conf"
indexers: Implement optional tx/address indexes. This introduces a new indexing infrastructure for supporting optional indexes using the new database and blockchain infrastructure along with two concrete indexer implementations which provide both a transaction-by-hash and a transaction-by-address index. The new infrastructure is mostly separated into a package named indexers which is housed under the blockchain package. In order to support this, a new interface named IndexManager has been introduced in the blockchain package which provides methods to be notified when the chain has been initialized and when blocks are connected and disconnected from the main chain. A concrete implementation of an index manager is provided by the new indexers package. The new indexers package also provides a new interface named Indexer which allows the index manager to manage concrete index implementations which conform to the interface. The following is high level overview of the main index infrastructure changes: - Define a new IndexManager interface in the blockchain package and modify the package to make use of the interface when specified - Create a new indexers package - Provides an Index interface which allows concrete indexes to plugin to an index manager - Provides a concrete IndexManager implementation - Handles the lifecycle of all indexes it manages - Tracks the index tips - Handles catching up disabled indexes that have been reenabled - Handles reorgs while the index was disabled - Invokes the appropriate methods for all managed indexes to allow them to index and deindex the blocks and transactions - Implement a transaction-by-hash index - Makes use of internal block IDs to save a significant amount of space and indexing costs over the old transaction index format - Implement a transaction-by-address index - Makes use of a leveling scheme in order to provide a good tradeoff between space required and indexing costs - Supports enabling and disabling indexes at will - Support the ability to drop indexes if they are no longer desired The following is an overview of the btcd changes: - Add a new index logging subsystem - Add new options --txindex and --addrindex in order to enable the optional indexes - NOTE: The transaction index will automatically be enabled when the address index is enabled because it depends on it - Add new options --droptxindex and --dropaddrindex to allow the indexes to be removed - NOTE: The address index will also be removed when the transaction index is dropped because it depends on it - Update getrawtransactions RPC to make use of the transaction index - Reimplement the searchrawtransaction RPC that makes use of the address index - Update sample-btcd.conf to include sample usage for the new optional index flags
2016-02-19 05:51:18 +01:00
defaultTxIndex = false
defaultAddrIndex = false
2013-08-06 23:55:22 +02:00
)
var (
defaultHomeDir = btcutil.AppDataDir("btcd", false)
defaultConfigFile = filepath.Join(defaultHomeDir, defaultConfigFilename)
defaultDataDir = filepath.Join(defaultHomeDir, defaultDataDirname)
blockchain: Rework to use new db interface. This commit is the first stage of several that are planned to convert the blockchain package into a concurrent safe package that will ultimately allow support for multi-peer download and concurrent chain processing. The goal is to update btcd proper after each step so it can take advantage of the enhancements as they are developed. In addition to the aforementioned benefit, this staged approach has been chosen since it is absolutely critical to maintain consensus. Separating the changes into several stages makes it easier for reviewers to logically follow what is happening and therefore helps prevent consensus bugs. Naturally there are significant automated tests to help prevent consensus issues as well. The main focus of this stage is to convert the blockchain package to use the new database interface and implement the chain-related functionality which it no longer handles. It also aims to improve efficiency in various areas by making use of the new database and chain capabilities. The following is an overview of the chain changes: - Update to use the new database interface - Add chain-related functionality that the old database used to handle - Main chain structure and state - Transaction spend tracking - Implement a new pruned unspent transaction output (utxo) set - Provides efficient direct access to the unspent transaction outputs - Uses a domain specific compression algorithm that understands the standard transaction scripts in order to significantly compress them - Removes reliance on the transaction index and paves the way toward eventually enabling block pruning - Modify the New function to accept a Config struct instead of inidividual parameters - Replace the old TxStore type with a new UtxoViewpoint type that makes use of the new pruned utxo set - Convert code to treat the new UtxoViewpoint as a rolling view that is used between connects and disconnects to improve efficiency - Make best chain state always set when the chain instance is created - Remove now unnecessary logic for dealing with unset best state - Make all exported functions concurrent safe - Currently using a single chain state lock as it provides a straight forward and easy to review path forward however this can be improved with more fine grained locking - Optimize various cases where full blocks were being loaded when only the header is needed to help reduce the I/O load - Add the ability for callers to get a snapshot of the current best chain stats in a concurrent safe fashion - Does not block callers while new blocks are being processed - Make error messages that reference transaction outputs consistently use <transaction hash>:<output index> - Introduce a new AssertError type an convert internal consistency checks to use it - Update tests and examples to reflect the changes - Add a full suite of tests to ensure correct functionality of the new code The following is an overview of the btcd changes: - Update to use the new database and chain interfaces - Temporarily remove all code related to the transaction index - Temporarily remove all code related to the address index - Convert all code that uses transaction stores to use the new utxo view - Rework several calls that required the block manager for safe concurrency to use the chain package directly now that it is concurrent safe - Change all calls to obtain the best hash to use the new best state snapshot capability from the chain package - Remove workaround for limits on fetching height ranges since the new database interface no longer imposes them - Correct the gettxout RPC handler to return the best chain hash as opposed the hash the txout was found in - Optimize various RPC handlers: - Change several of the RPC handlers to use the new chain snapshot capability to avoid needlessly loading data - Update several handlers to use new functionality to avoid accessing the block manager so they are able to return the data without blocking when the server is busy processing blocks - Update non-verbose getblock to avoid deserialization and serialization overhead - Update getblockheader to request the block height directly from chain and only load the header - Update getdifficulty to use the new cached data from chain - Update getmininginfo to use the new cached data from chain - Update non-verbose getrawtransaction to avoid deserialization and serialization overhead - Update gettxout to use the new utxo store versus loading full transactions using the transaction index The following is an overview of the utility changes: - Update addblock to use the new database and chain interfaces - Update findcheckpoint to use the new database and chain interfaces - Remove the dropafter utility which is no longer supported NOTE: The transaction index and address index will be reimplemented in another commit.
2015-08-26 06:03:18 +02:00
knownDbTypes = database.SupportedDrivers()
defaultRPCKeyFile = filepath.Join(defaultHomeDir, "rpc.key")
defaultRPCCertFile = filepath.Join(defaultHomeDir, "rpc.cert")
defaultLogDir = filepath.Join(defaultHomeDir, defaultLogDirname)
2013-08-06 23:55:22 +02:00
)
// runServiceCommand is only set to a real function on Windows. It is used
// to parse and execute service commands specified via the -s flag.
var runServiceCommand func(string) error
peer: Refactor peer code into its own package. This commit introduces package peer which contains peer related features refactored from peer.go. The following is an overview of the features the package provides: - Provides a basic concurrent safe bitcoin peer for handling bitcoin communications via the peer-to-peer protocol - Full duplex reading and writing of bitcoin protocol messages - Automatic handling of the initial handshake process including protocol version negotiation - Automatic periodic keep-alive pinging and pong responses - Asynchronous message queueing of outbound messages with optional channel for notification when the message is actually sent - Inventory message batching and send trickling with known inventory detection and avoidance - Ability to wait for shutdown/disconnect - Flexible peer configuration - Caller is responsible for creating outgoing connections and listening for incoming connections so they have flexibility to establish connections as they see fit (proxies, etc.) - User agent name and version - Bitcoin network - Service support signalling (full nodes, bloom filters, etc.) - Maximum supported protocol version - Ability to register callbacks for handling bitcoin protocol messages - Proper handling of bloom filter related commands when the caller does not specify the related flag to signal support - Disconnects the peer when the protocol version is high enough - Does not invoke the related callbacks for older protocol versions - Snapshottable peer statistics such as the total number of bytes read and written, the remote address, user agent, and negotiated protocol version - Helper functions for pushing addresses, getblocks, getheaders, and reject messages - These could all be sent manually via the standard message output function, but the helpers provide additional nice functionality such as duplicate filtering and address randomization - Full documentation with example usage - Test coverage In addition to the addition of the new package, btcd has been refactored to make use of the new package by extending the basic peer it provides to work with the blockmanager and server to act as a full node. The following is a broad overview of the changes to integrate the package: - The server is responsible for all connection management including persistent peers and banning - Callbacks for all messages that are required to implement a full node are registered - Logic necessary to serve data and behave as a full node is now in the callback registered with the peer Finally, the following peer-related things have been improved as a part of this refactor: - Don't log or send reject message due to peer disconnects - Remove trace logs that aren't particularly helpful - Finish an old TODO to switch the queue WaitGroup over to a channel - Improve various comments and fix some code consistency cases - Improve a few logging bits - Implement a most-recently-used nonce tracking for detecting self connections and generate a unique nonce for each peer
2015-10-02 08:03:20 +02:00
// minUint32 is a helper function to return the minimum of two uint32s.
// This avoids a math import and the need to cast to floats.
func minUint32(a, b uint32) uint32 {
if a < b {
return a
}
return b
}
2013-08-06 23:55:22 +02:00
// config defines the configuration options for btcd.
//
// See loadConfig for details on the configuration load process.
type config struct {
AddCheckpoints []string `long:"addcheckpoint" description:"Add a custom checkpoint. Format: '<height>:<hash>'"`
AddPeers []string `short:"a" long:"addpeer" description:"Add a peer to connect with at startup"`
AddrIndex bool `long:"addrindex" description:"Maintain a full address-based transaction index which makes the searchrawtransactions RPC available"`
2020-04-14 13:35:22 +02:00
AgentBlacklist []string `long:"agentblacklist" description:"A comma separated list of user-agent substrings which will cause btcd to reject any peers whose user-agent contains any of the blacklisted substrings."`
AgentWhitelist []string `long:"agentwhitelist" description:"A comma separated list of user-agent substrings which will cause btcd to require all peers' user-agents to contain one of the whitelisted substrings. The blacklist is applied before the blacklist, and an empty whitelist will allow all agents that do not fail the blacklist."`
BanDuration time.Duration `long:"banduration" description:"How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second"`
BanThreshold uint32 `long:"banthreshold" description:"Maximum allowed ban score before disconnecting and banning misbehaving peers."`
BlockMaxSize uint32 `long:"blockmaxsize" description:"Maximum block size in bytes to be used when creating a block"`
BlockMinSize uint32 `long:"blockminsize" description:"Mininum block size in bytes to be used when creating a block"`
BlockMaxWeight uint32 `long:"blockmaxweight" description:"Maximum block weight to be used when creating a block"`
BlockMinWeight uint32 `long:"blockminweight" description:"Mininum block weight to be used when creating a block"`
BlockPrioritySize uint32 `long:"blockprioritysize" description:"Size in bytes for high-priority/low-fee transactions when creating a block"`
BlocksOnly bool `long:"blocksonly" description:"Do not accept transactions from remote peers."`
ConfigFile string `short:"C" long:"configfile" description:"Path to configuration file"`
ConnectPeers []string `long:"connect" description:"Connect only to the specified peers at startup"`
CPUProfile string `long:"cpuprofile" description:"Write CPU profile to the specified file"`
DataDir string `short:"b" long:"datadir" description:"Directory to store data"`
DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"`
DebugLevel string `short:"d" long:"debuglevel" description:"Logging level for all subsystems {trace, debug, info, warn, error, critical} -- You may also specify <subsystem>=<level>,<subsystem2>=<level>,... to set the log level for individual subsystems -- Use show to list available subsystems"`
DropAddrIndex bool `long:"dropaddrindex" description:"Deletes the address-based transaction index from the database on start up and then exits."`
DropCfIndex bool `long:"dropcfindex" description:"Deletes the index used for committed filtering (CF) support from the database on start up and then exits."`
DropTxIndex bool `long:"droptxindex" description:"Deletes the hash-based transaction index from the database on start up and then exits."`
ExternalIPs []string `long:"externalip" description:"Add an ip to the list of local addresses we claim to listen on to peers"`
Generate bool `long:"generate" description:"Generate (mine) bitcoins using the CPU"`
FreeTxRelayLimit float64 `long:"limitfreerelay" description:"Limit relay of transactions with no transaction fee to the given amount in thousands of bytes per minute"`
Listeners []string `long:"listen" description:"Add an interface/port to listen for connections (default all interfaces port: 8333, testnet: 18333)"`
LogDir string `long:"logdir" description:"Directory to log output."`
MaxOrphanTxs int `long:"maxorphantx" description:"Max number of orphan transactions to keep in memory"`
MaxPeers int `long:"maxpeers" description:"Max number of inbound and outbound peers"`
MiningAddrs []string `long:"miningaddr" description:"Add the specified payment address to the list of addresses to use for generated blocks -- At least one address is required if the generate option is set"`
MinRelayTxFee float64 `long:"minrelaytxfee" description:"The minimum transaction fee in BTC/kB to be considered a non-zero fee."`
DisableBanning bool `long:"nobanning" description:"Disable banning of misbehaving peers"`
NoCFilters bool `long:"nocfilters" description:"Disable committed filtering (CF) support"`
DisableCheckpoints bool `long:"nocheckpoints" description:"Disable built-in checkpoints. Don't do this unless you know what you're doing."`
DisableDNSSeed bool `long:"nodnsseed" description:"Disable DNS seeding for peers"`
DisableListen bool `long:"nolisten" description:"Disable listening for incoming connections -- NOTE: Listening is automatically disabled if the --connect or --proxy options are used without also specifying listen interfaces via --listen"`
NoOnion bool `long:"noonion" description:"Disable connecting to tor hidden services"`
NoPeerBloomFilters bool `long:"nopeerbloomfilters" description:"Disable bloom filtering support"`
NoRelayPriority bool `long:"norelaypriority" description:"Do not require free or low-fee transactions to have high priority for relaying"`
NoWinService bool `long:"nowinservice" description:"Do not start as a background service on Windows -- NOTE: This flag only works on the command line, not in the config file"`
DisableRPC bool `long:"norpc" description:"Disable built-in RPC server -- NOTE: The RPC server is disabled by default if no rpcuser/rpcpass or rpclimituser/rpclimitpass is specified"`
DisableTLS bool `long:"notls" description:"Disable TLS for the RPC server -- NOTE: This is only allowed if the RPC server is bound to localhost"`
OnionProxy string `long:"onion" description:"Connect to tor hidden services via SOCKS5 proxy (eg. 127.0.0.1:9050)"`
OnionProxyPass string `long:"onionpass" default-mask:"-" description:"Password for onion proxy server"`
OnionProxyUser string `long:"onionuser" description:"Username for onion proxy server"`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
Proxy string `long:"proxy" description:"Connect via SOCKS5 proxy (eg. 127.0.0.1:9050)"`
ProxyPass string `long:"proxypass" default-mask:"-" description:"Password for proxy server"`
ProxyUser string `long:"proxyuser" description:"Username for proxy server"`
RegressionTest bool `long:"regtest" description:"Use the regression test network"`
RejectNonStd bool `long:"rejectnonstd" description:"Reject non-standard transactions regardless of the default settings for the active network."`
RejectReplacement bool `long:"rejectreplacement" description:"Reject transactions that attempt to replace existing transactions within the mempool through the Replace-By-Fee (RBF) signaling policy."`
RelayNonStd bool `long:"relaynonstd" description:"Relay non-standard transactions regardless of the default settings for the active network."`
RPCCert string `long:"rpccert" description:"File containing the certificate file"`
RPCKey string `long:"rpckey" description:"File containing the certificate key"`
RPCLimitPass string `long:"rpclimitpass" default-mask:"-" description:"Password for limited RPC connections"`
RPCLimitUser string `long:"rpclimituser" description:"Username for limited RPC connections"`
RPCListeners []string `long:"rpclisten" description:"Add an interface/port to listen for RPC connections (default port: 8334, testnet: 18334)"`
RPCMaxClients int `long:"rpcmaxclients" description:"Max number of RPC clients for standard connections"`
RPCMaxConcurrentReqs int `long:"rpcmaxconcurrentreqs" description:"Max number of concurrent RPC requests that may be processed concurrently"`
RPCMaxWebsockets int `long:"rpcmaxwebsockets" description:"Max number of RPC websocket connections"`
RPCQuirks bool `long:"rpcquirks" description:"Mirror some JSON-RPC quirks of Bitcoin Core -- NOTE: Discouraged unless interoperability issues need to be worked around"`
RPCPass string `short:"P" long:"rpcpass" default-mask:"-" description:"Password for RPC connections"`
RPCUser string `short:"u" long:"rpcuser" description:"Username for RPC connections"`
SigCacheMaxSize uint `long:"sigcachemaxsize" description:"The maximum number of entries in the signature verification cache"`
SimNet bool `long:"simnet" description:"Use the simulation test network"`
TestNet3 bool `long:"testnet" description:"Use the test network"`
TorIsolation bool `long:"torisolation" description:"Enable Tor stream isolation by randomizing user credentials for each connection."`
TrickleInterval time.Duration `long:"trickleinterval" description:"Minimum time between attempts to send new inventory to a connected peer"`
TxIndex bool `long:"txindex" description:"Maintain a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"`
UserAgentComments []string `long:"uacomment" description:"Comment to add to the user agent -- See BIP 14 for more information."`
Upnp bool `long:"upnp" description:"Use UPnP to map our listening port outside of NAT"`
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
Whitelists []string `long:"whitelist" description:"Add an IP network or IP that will not be banned. (eg. 192.168.1.0/24 or ::1)"`
lookup func(string) ([]net.IP, error)
oniondial func(string, string, time.Duration) (net.Conn, error)
dial func(string, string, time.Duration) (net.Conn, error)
addCheckpoints []chaincfg.Checkpoint
miningAddrs []btcutil.Address
minRelayTxFee btcutil.Amount
2017-08-31 15:59:43 +02:00
whitelists []*net.IPNet
2013-08-06 23:55:22 +02:00
}
// serviceOptions defines the configuration options for the daemon as a service on
// Windows.
type serviceOptions struct {
ServiceCommand string `short:"s" long:"service" description:"Service command {install, remove, start, stop}"`
}
2014-09-08 21:19:47 +02:00
// cleanAndExpandPath expands environment variables and leading ~ in the
// passed path, cleans the result, and returns it.
func cleanAndExpandPath(path string) string {
// Expand initial ~ to OS specific home directory.
if strings.HasPrefix(path, "~") {
homeDir := filepath.Dir(defaultHomeDir)
path = strings.Replace(path, "~", homeDir, 1)
}
// NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%,
// but they variables can still be expanded via POSIX-style $VARIABLE.
return filepath.Clean(os.ExpandEnv(path))
}
2013-08-06 23:55:22 +02:00
// validLogLevel returns whether or not logLevel is a valid debug log level.
func validLogLevel(logLevel string) bool {
switch logLevel {
case "trace":
fallthrough
case "debug":
fallthrough
case "info":
fallthrough
case "warn":
fallthrough
case "error":
fallthrough
case "critical":
return true
}
return false
}
// supportedSubsystems returns a sorted slice of the supported subsystems for
// logging purposes.
func supportedSubsystems() []string {
// Convert the subsystemLoggers map keys to a slice.
subsystems := make([]string, 0, len(subsystemLoggers))
for subsysID := range subsystemLoggers {
subsystems = append(subsystems, subsysID)
}
// Sort the subsystems for stable display.
sort.Strings(subsystems)
return subsystems
}
// parseAndSetDebugLevels attempts to parse the specified debug level and set
// the levels accordingly. An appropriate error is returned if anything is
// invalid.
func parseAndSetDebugLevels(debugLevel string) error {
// When the specified string doesn't have any delimters, treat it as
// the log level for all subsystems.
if !strings.Contains(debugLevel, ",") && !strings.Contains(debugLevel, "=") {
// Validate debug log level.
if !validLogLevel(debugLevel) {
str := "The specified debug level [%v] is invalid"
return fmt.Errorf(str, debugLevel)
}
// Change the logging level for all subsystems.
setLogLevels(debugLevel)
return nil
}
// Split the specified string into subsystem/level pairs while detecting
// issues and update the log levels accordingly.
for _, logLevelPair := range strings.Split(debugLevel, ",") {
if !strings.Contains(logLevelPair, "=") {
str := "The specified debug level contains an invalid " +
"subsystem/level pair [%v]"
return fmt.Errorf(str, logLevelPair)
}
// Extract the specified subsystem and log level.
fields := strings.Split(logLevelPair, "=")
subsysID, logLevel := fields[0], fields[1]
// Validate subsystem.
if _, exists := subsystemLoggers[subsysID]; !exists {
str := "The specified subsystem [%v] is invalid -- " +
"supported subsytems %v"
return fmt.Errorf(str, subsysID, supportedSubsystems())
}
// Validate log level.
if !validLogLevel(logLevel) {
str := "The specified debug level [%v] is invalid"
return fmt.Errorf(str, logLevel)
}
setLogLevel(subsysID, logLevel)
}
return nil
}
// validDbType returns whether or not dbType is a supported database type.
func validDbType(dbType string) bool {
for _, knownType := range knownDbTypes {
if dbType == knownType {
return true
}
}
return false
}
2013-08-06 23:55:22 +02:00
// removeDuplicateAddresses returns a new slice with all duplicate entries in
// addrs removed.
func removeDuplicateAddresses(addrs []string) []string {
result := make([]string, 0, len(addrs))
seen := map[string]struct{}{}
2013-08-06 23:55:22 +02:00
for _, val := range addrs {
if _, ok := seen[val]; !ok {
result = append(result, val)
seen[val] = struct{}{}
2013-08-06 23:55:22 +02:00
}
}
return result
}
// normalizeAddress returns addr with the passed default port appended if
// there is not already a port specified.
func normalizeAddress(addr, defaultPort string) string {
_, _, err := net.SplitHostPort(addr)
if err != nil {
return net.JoinHostPort(addr, defaultPort)
}
return addr
}
// normalizeAddresses returns a new slice with all the passed peer addresses
// normalized with the given default port, and all duplicates removed.
func normalizeAddresses(addrs []string, defaultPort string) []string {
2013-08-06 23:55:22 +02:00
for i, addr := range addrs {
addrs[i] = normalizeAddress(addr, defaultPort)
2013-08-06 23:55:22 +02:00
}
return removeDuplicateAddresses(addrs)
}
// newCheckpointFromStr parses checkpoints in the '<height>:<hash>' format.
func newCheckpointFromStr(checkpoint string) (chaincfg.Checkpoint, error) {
parts := strings.Split(checkpoint, ":")
if len(parts) != 2 {
return chaincfg.Checkpoint{}, fmt.Errorf("unable to parse "+
"checkpoint %q -- use the syntax <height>:<hash>",
checkpoint)
}
height, err := strconv.ParseInt(parts[0], 10, 32)
if err != nil {
return chaincfg.Checkpoint{}, fmt.Errorf("unable to parse "+
"checkpoint %q due to malformed height", checkpoint)
}
if len(parts[1]) == 0 {
return chaincfg.Checkpoint{}, fmt.Errorf("unable to parse "+
"checkpoint %q due to missing hash", checkpoint)
}
hash, err := chainhash.NewHashFromStr(parts[1])
if err != nil {
return chaincfg.Checkpoint{}, fmt.Errorf("unable to parse "+
"checkpoint %q due to malformed hash", checkpoint)
}
return chaincfg.Checkpoint{
Height: int32(height),
Hash: hash,
}, nil
}
// parseCheckpoints checks the checkpoint strings for valid syntax
// ('<height>:<hash>') and parses them to chaincfg.Checkpoint instances.
func parseCheckpoints(checkpointStrings []string) ([]chaincfg.Checkpoint, error) {
if len(checkpointStrings) == 0 {
return nil, nil
}
checkpoints := make([]chaincfg.Checkpoint, len(checkpointStrings))
for i, cpString := range checkpointStrings {
checkpoint, err := newCheckpointFromStr(cpString)
if err != nil {
return nil, err
}
checkpoints[i] = checkpoint
}
return checkpoints, nil
}
2013-08-06 23:55:22 +02:00
// filesExists reports whether the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// newConfigParser returns a new command line flags parser.
func newConfigParser(cfg *config, so *serviceOptions, options flags.Options) *flags.Parser {
parser := flags.NewParser(cfg, options)
if runtime.GOOS == "windows" {
parser.AddGroup("Service Options", "Service Options", so)
}
return parser
}
2013-08-06 23:55:22 +02:00
// loadConfig initializes and parses the config using a config file and command
// line options.
//
// The configuration proceeds as follows:
// 1) Start with a default config with sane settings
// 2) Pre-parse the command line to check for an alternative config file
// 3) Load configuration file overwriting defaults with any specified options
// 4) Parse CLI options and overwrite/add any specified options
//
// The above results in btcd functioning properly without any config settings
// while still allowing the user to override settings with config files and
// command line options. Command line options always take precedence.
func loadConfig() (*config, []string, error) {
// Default config.
cfg := config{
ConfigFile: defaultConfigFile,
DebugLevel: defaultLogLevel,
MaxPeers: defaultMaxPeers,
BanDuration: defaultBanDuration,
BanThreshold: defaultBanThreshold,
RPCMaxClients: defaultMaxRPCClients,
RPCMaxWebsockets: defaultMaxRPCWebsockets,
RPCMaxConcurrentReqs: defaultMaxRPCConcurrentReqs,
DataDir: defaultDataDir,
LogDir: defaultLogDir,
DbType: defaultDbType,
RPCKey: defaultRPCKeyFile,
RPCCert: defaultRPCCertFile,
MinRelayTxFee: mempool.DefaultMinRelayTxFee.ToBTC(),
FreeTxRelayLimit: defaultFreeTxRelayLimit,
TrickleInterval: defaultTrickleInterval,
BlockMinSize: defaultBlockMinSize,
BlockMaxSize: defaultBlockMaxSize,
BlockMinWeight: defaultBlockMinWeight,
BlockMaxWeight: defaultBlockMaxWeight,
BlockPrioritySize: mempool.DefaultBlockPrioritySize,
MaxOrphanTxs: defaultMaxOrphanTransactions,
SigCacheMaxSize: defaultSigCacheMaxSize,
Generate: defaultGenerate,
TxIndex: defaultTxIndex,
AddrIndex: defaultAddrIndex,
2013-08-06 23:55:22 +02:00
}
// Service options which are only added on Windows.
serviceOpts := serviceOptions{}
2013-08-06 23:55:22 +02:00
// Pre-parse the command line options to see if an alternative config
// file or the version flag was specified. Any errors aside from the
// help message error can be ignored here since they will be caught by
// the final parse below.
2013-08-06 23:55:22 +02:00
preCfg := cfg
preParser := newConfigParser(&preCfg, &serviceOpts, flags.HelpFlag)
_, err := preParser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {
fmt.Fprintln(os.Stderr, err)
return nil, nil, err
}
}
2013-08-06 23:55:22 +02:00
// Show the version and exit if the version flag was specified.
appName := filepath.Base(os.Args[0])
appName = strings.TrimSuffix(appName, filepath.Ext(appName))
usageMessage := fmt.Sprintf("Use %s -h to show usage", appName)
if preCfg.ShowVersion {
fmt.Println(appName, "version", version())
os.Exit(0)
}
// Perform service command and exit if specified. Invalid service
// commands show an appropriate error. Only runs on Windows since
// the runServiceCommand function will be nil when not on Windows.
if serviceOpts.ServiceCommand != "" && runServiceCommand != nil {
err := runServiceCommand(serviceOpts.ServiceCommand)
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
os.Exit(0)
}
2013-08-06 23:55:22 +02:00
// Load additional config from file.
var configFileError error
parser := newConfigParser(&cfg, &serviceOpts, flags.Default)
if !(preCfg.RegressionTest || preCfg.SimNet) || preCfg.ConfigFile !=
defaultConfigFile {
2016-06-04 04:14:15 +02:00
if _, err := os.Stat(preCfg.ConfigFile); os.IsNotExist(err) {
err := createDefaultConfigFile(preCfg.ConfigFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Error creating a "+
"default config file: %v\n", err)
2016-06-04 04:14:15 +02:00
}
}
err := flags.NewIniParser(parser).ParseFile(preCfg.ConfigFile)
if err != nil {
if _, ok := err.(*os.PathError); !ok {
fmt.Fprintf(os.Stderr, "Error parsing config "+
"file: %v\n", err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
configFileError = err
2013-08-06 23:55:22 +02:00
}
}
// Don't add peers from the config file when in regression test mode.
if preCfg.RegressionTest && len(cfg.AddPeers) > 0 {
cfg.AddPeers = nil
}
2013-08-06 23:55:22 +02:00
// Parse command line options again to ensure they take precedence.
remainingArgs, err := parser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
fmt.Fprintln(os.Stderr, usageMessage)
2013-08-06 23:55:22 +02:00
}
return nil, nil, err
}
// Create the home directory if it doesn't already exist.
funcName := "loadConfig"
err = os.MkdirAll(defaultHomeDir, 0700)
if err != nil {
// Show a nicer error message if it's because a symlink is
// linked to a directory that does not exist (probably because
// it's not mounted).
if e, ok := err.(*os.PathError); ok && os.IsExist(err) {
if link, lerr := os.Readlink(e.Path); lerr == nil {
str := "is symlink %s -> %s mounted?"
err = fmt.Errorf(str, e.Path, link)
}
}
str := "%s: Failed to create home directory: %v"
err := fmt.Errorf(str, funcName, err)
fmt.Fprintln(os.Stderr, err)
return nil, nil, err
}
// Multiple networks can't be selected simultaneously.
numNets := 0
// Count number of network flags passed; assign active network params
// while we're at it
if cfg.TestNet3 {
numNets++
activeNetParams = &testNet3Params
}
if cfg.RegressionTest {
numNets++
activeNetParams = &regressionNetParams
}
if cfg.SimNet {
numNets++
// Also disable dns seeding on the simulation test network.
activeNetParams = &simNetParams
cfg.DisableDNSSeed = true
}
if numNets > 1 {
str := "%s: The testnet, regtest, segnet, and simnet params " +
"can't be used together -- choose one of the four"
err := fmt.Errorf(str, funcName)
2013-08-06 23:55:22 +02:00
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
2013-08-06 23:55:22 +02:00
return nil, nil, err
}
// Set the default policy for relaying non-standard transactions
// according to the default of the active network. The set
// configuration value takes precedence over the default value for the
// selected network.
relayNonStd := activeNetParams.RelayNonStdTxs
switch {
case cfg.RelayNonStd && cfg.RejectNonStd:
str := "%s: rejectnonstd and relaynonstd cannot be used " +
"together -- choose only one"
err := fmt.Errorf(str, funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
case cfg.RejectNonStd:
relayNonStd = false
case cfg.RelayNonStd:
relayNonStd = true
}
cfg.RelayNonStd = relayNonStd
// Append the network type to the data directory so it is "namespaced"
// per network. In addition to the block database, there are other
// pieces of data that are saved to disk such as address manager state.
// All data is specific to a network, so namespacing the data directory
// means each individual piece of serialized data does not have to
// worry about changing names per network and such.
cfg.DataDir = cleanAndExpandPath(cfg.DataDir)
cfg.DataDir = filepath.Join(cfg.DataDir, netName(activeNetParams))
// Append the network type to the log directory so it is "namespaced"
// per network in the same fashion as the data directory.
cfg.LogDir = cleanAndExpandPath(cfg.LogDir)
cfg.LogDir = filepath.Join(cfg.LogDir, netName(activeNetParams))
// Special show command to list supported subsystems and exit.
if cfg.DebugLevel == "show" {
fmt.Println("Supported subsystems", supportedSubsystems())
os.Exit(0)
}
// Initialize log rotation. After log rotation has been initialized, the
// logger variables may be used.
initLogRotator(filepath.Join(cfg.LogDir, defaultLogFilename))
// Parse, validate, and set debug log level(s).
if err := parseAndSetDebugLevels(cfg.DebugLevel); err != nil {
err := fmt.Errorf("%s: %v", funcName, err.Error())
2013-08-06 23:55:22 +02:00
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
2013-08-06 23:55:22 +02:00
return nil, nil, err
}
// Validate database type.
if !validDbType(cfg.DbType) {
str := "%s: The specified database type [%v] is invalid -- " +
"supported types %v"
err := fmt.Errorf(str, funcName, cfg.DbType, knownDbTypes)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Validate profile port number
if cfg.Profile != "" {
2013-09-18 00:04:40 +02:00
profilePort, err := strconv.Atoi(cfg.Profile)
if err != nil || profilePort < 1024 || profilePort > 65535 {
str := "%s: The profile port must be between 1024 and 65535"
err := fmt.Errorf(str, funcName)
2013-09-18 00:04:40 +02:00
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
2013-09-18 00:04:40 +02:00
return nil, nil, err
}
}
2013-08-06 23:55:22 +02:00
// Don't allow ban durations that are too short.
if cfg.BanDuration < time.Second {
2013-08-06 23:55:22 +02:00
str := "%s: The banduration option may not be less than 1s -- parsed [%v]"
err := fmt.Errorf(str, funcName, cfg.BanDuration)
2013-08-06 23:55:22 +02:00
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
2013-08-06 23:55:22 +02:00
return nil, nil, err
}
2017-08-31 15:59:43 +02:00
// Validate any given whitelisted IP addresses and networks.
if len(cfg.Whitelists) > 0 {
var ip net.IP
cfg.whitelists = make([]*net.IPNet, 0, len(cfg.Whitelists))
for _, addr := range cfg.Whitelists {
_, ipnet, err := net.ParseCIDR(addr)
if err != nil {
ip = net.ParseIP(addr)
if ip == nil {
str := "%s: The whitelist value of '%s' is invalid"
err = fmt.Errorf(str, funcName, addr)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
var bits int
if ip.To4() == nil {
// IPv6
bits = 128
} else {
bits = 32
}
ipnet = &net.IPNet{
IP: ip,
Mask: net.CIDRMask(bits, bits),
}
}
cfg.whitelists = append(cfg.whitelists, ipnet)
}
}
2013-08-06 23:55:22 +02:00
// --addPeer and --connect do not mix.
if len(cfg.AddPeers) > 0 && len(cfg.ConnectPeers) > 0 {
str := "%s: the --addpeer and --connect options can not be " +
"mixed"
err := fmt.Errorf(str, funcName)
2013-08-06 23:55:22 +02:00
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
2013-08-06 23:55:22 +02:00
return nil, nil, err
}
// --proxy or --connect without --listen disables listening.
if (cfg.Proxy != "" || len(cfg.ConnectPeers) > 0) &&
len(cfg.Listeners) == 0 {
cfg.DisableListen = true
}
// Connect means no DNS seeding.
2013-08-06 23:55:22 +02:00
if len(cfg.ConnectPeers) > 0 {
cfg.DisableDNSSeed = true
}
// Add the default listener if none were specified. The default
// listener is all addresses on the listen port for the network
// we are to connect to.
if len(cfg.Listeners) == 0 {
cfg.Listeners = []string{
net.JoinHostPort("", activeNetParams.DefaultPort),
}
2013-08-06 23:55:22 +02:00
}
// Check to make sure limited and admin users don't have the same username
if cfg.RPCUser == cfg.RPCLimitUser && cfg.RPCUser != "" {
str := "%s: --rpcuser and --rpclimituser must not specify the " +
"same username"
err := fmt.Errorf(str, funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Check to make sure limited and admin users don't have the same password
if cfg.RPCPass == cfg.RPCLimitPass && cfg.RPCPass != "" {
str := "%s: --rpcpass and --rpclimitpass must not specify the " +
"same password"
err := fmt.Errorf(str, funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// The RPC server is disabled if no username or password is provided.
if (cfg.RPCUser == "" || cfg.RPCPass == "") &&
(cfg.RPCLimitUser == "" || cfg.RPCLimitPass == "") {
cfg.DisableRPC = true
}
if cfg.DisableRPC {
btcdLog.Infof("RPC service is disabled")
}
// Default RPC to listen on localhost only.
if !cfg.DisableRPC && len(cfg.RPCListeners) == 0 {
addrs, err := net.LookupHost("localhost")
if err != nil {
return nil, nil, err
}
cfg.RPCListeners = make([]string, 0, len(addrs))
for _, addr := range addrs {
addr = net.JoinHostPort(addr, activeNetParams.rpcPort)
cfg.RPCListeners = append(cfg.RPCListeners, addr)
}
}
if cfg.RPCMaxConcurrentReqs < 0 {
str := "%s: The rpcmaxwebsocketconcurrentrequests option may " +
"not be less than 0 -- parsed [%d]"
err := fmt.Errorf(str, funcName, cfg.RPCMaxConcurrentReqs)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Validate the the minrelaytxfee.
cfg.minRelayTxFee, err = btcutil.NewAmount(cfg.MinRelayTxFee)
if err != nil {
str := "%s: invalid minrelaytxfee: %v"
err := fmt.Errorf(str, funcName, err)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Limit the max block size to a sane value.
if cfg.BlockMaxSize < blockMaxSizeMin || cfg.BlockMaxSize >
blockMaxSizeMax {
str := "%s: The blockmaxsize option must be in between %d " +
"and %d -- parsed [%d]"
err := fmt.Errorf(str, funcName, blockMaxSizeMin,
blockMaxSizeMax, cfg.BlockMaxSize)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Limit the max block weight to a sane value.
if cfg.BlockMaxWeight < blockMaxWeightMin ||
cfg.BlockMaxWeight > blockMaxWeightMax {
str := "%s: The blockmaxweight option must be in between %d " +
"and %d -- parsed [%d]"
err := fmt.Errorf(str, funcName, blockMaxWeightMin,
blockMaxWeightMax, cfg.BlockMaxWeight)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Limit the max orphan count to a sane vlue.
if cfg.MaxOrphanTxs < 0 {
str := "%s: The maxorphantx option may not be less than 0 " +
"-- parsed [%d]"
err := fmt.Errorf(str, funcName, cfg.MaxOrphanTxs)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Limit the block priority and minimum block sizes to max block size.
cfg.BlockPrioritySize = minUint32(cfg.BlockPrioritySize, cfg.BlockMaxSize)
cfg.BlockMinSize = minUint32(cfg.BlockMinSize, cfg.BlockMaxSize)
cfg.BlockMinWeight = minUint32(cfg.BlockMinWeight, cfg.BlockMaxWeight)
switch {
// If the max block size isn't set, but the max weight is, then we'll
// set the limit for the max block size to a safe limit so weight takes
// precedence.
case cfg.BlockMaxSize == defaultBlockMaxSize &&
cfg.BlockMaxWeight != defaultBlockMaxWeight:
cfg.BlockMaxSize = blockchain.MaxBlockBaseSize - 1000
// If the max block weight isn't set, but the block size is, then we'll
// scale the set weight accordingly based on the max block size value.
case cfg.BlockMaxSize != defaultBlockMaxSize &&
cfg.BlockMaxWeight == defaultBlockMaxWeight:
cfg.BlockMaxWeight = cfg.BlockMaxSize * blockchain.WitnessScaleFactor
}
// Look for illegal characters in the user agent comments.
for _, uaComment := range cfg.UserAgentComments {
if strings.ContainsAny(uaComment, "/:()") {
err := fmt.Errorf("%s: The following characters must not "+
"appear in user agent comments: '/', ':', '(', ')'",
funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
}
indexers: Implement optional tx/address indexes. This introduces a new indexing infrastructure for supporting optional indexes using the new database and blockchain infrastructure along with two concrete indexer implementations which provide both a transaction-by-hash and a transaction-by-address index. The new infrastructure is mostly separated into a package named indexers which is housed under the blockchain package. In order to support this, a new interface named IndexManager has been introduced in the blockchain package which provides methods to be notified when the chain has been initialized and when blocks are connected and disconnected from the main chain. A concrete implementation of an index manager is provided by the new indexers package. The new indexers package also provides a new interface named Indexer which allows the index manager to manage concrete index implementations which conform to the interface. The following is high level overview of the main index infrastructure changes: - Define a new IndexManager interface in the blockchain package and modify the package to make use of the interface when specified - Create a new indexers package - Provides an Index interface which allows concrete indexes to plugin to an index manager - Provides a concrete IndexManager implementation - Handles the lifecycle of all indexes it manages - Tracks the index tips - Handles catching up disabled indexes that have been reenabled - Handles reorgs while the index was disabled - Invokes the appropriate methods for all managed indexes to allow them to index and deindex the blocks and transactions - Implement a transaction-by-hash index - Makes use of internal block IDs to save a significant amount of space and indexing costs over the old transaction index format - Implement a transaction-by-address index - Makes use of a leveling scheme in order to provide a good tradeoff between space required and indexing costs - Supports enabling and disabling indexes at will - Support the ability to drop indexes if they are no longer desired The following is an overview of the btcd changes: - Add a new index logging subsystem - Add new options --txindex and --addrindex in order to enable the optional indexes - NOTE: The transaction index will automatically be enabled when the address index is enabled because it depends on it - Add new options --droptxindex and --dropaddrindex to allow the indexes to be removed - NOTE: The address index will also be removed when the transaction index is dropped because it depends on it - Update getrawtransactions RPC to make use of the transaction index - Reimplement the searchrawtransaction RPC that makes use of the address index - Update sample-btcd.conf to include sample usage for the new optional index flags
2016-02-19 05:51:18 +01:00
// --txindex and --droptxindex do not mix.
if cfg.TxIndex && cfg.DropTxIndex {
err := fmt.Errorf("%s: the --txindex and --droptxindex "+
"options may not be activated at the same time",
funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// --addrindex and --dropaddrindex do not mix.
if cfg.AddrIndex && cfg.DropAddrIndex {
err := fmt.Errorf("%s: the --addrindex and --dropaddrindex "+
"options may not be activated at the same time",
funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// --addrindex and --droptxindex do not mix.
if cfg.AddrIndex && cfg.DropTxIndex {
err := fmt.Errorf("%s: the --addrindex and --droptxindex "+
"options may not be activated at the same time "+
"because the address index relies on the transaction "+
"index",
funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
Implement a built-in concurrent CPU miner. This commit implements a built-in concurrent CPU miner that can be enabled with the combination of the --generate and --miningaddr options. The --blockminsize, --blockmaxsize, and --blockprioritysize configuration options wich already existed prior to this commit control the block template generation and hence affect blocks mined via the new CPU miner. The following is a quick overview of the changes and design: - Starting btcd with --generate and no addresses specified via --miningaddr will give an error and exit immediately - Makes use of multiple worker goroutines which independently create block templates, solve them, and submit the solved blocks - The default number of worker threads are based on the number of processor cores in the system and can be dynamically changed at run-time - There is a separate speed monitor goroutine used to collate periodic updates from the workers to calculate overall hashing speed - The current mining state, number of workers, and hashes per second can be queried - Updated sample-btcd.conf file has been updated to include the coin generation (mining) settings - Updated doc.go for the new command line options In addition the old --getworkkey option is now deprecated in favor of the new --miningaddr option. This was changed for a few reasons: - There is no reason to have a separate list of keys for getwork and CPU mining - getwork is deprecated and will be going away in the future so that means the --getworkkey flag will also be going away - Having the work 'key' in the option can be confused with wanting a private key while --miningaddr make it a little more clear it is an address that is required Closes #137. Reviewed by @jrick.
2014-06-12 03:09:38 +02:00
// Check mining addresses are valid and saved parsed versions.
cfg.miningAddrs = make([]btcutil.Address, 0, len(cfg.MiningAddrs))
Implement a built-in concurrent CPU miner. This commit implements a built-in concurrent CPU miner that can be enabled with the combination of the --generate and --miningaddr options. The --blockminsize, --blockmaxsize, and --blockprioritysize configuration options wich already existed prior to this commit control the block template generation and hence affect blocks mined via the new CPU miner. The following is a quick overview of the changes and design: - Starting btcd with --generate and no addresses specified via --miningaddr will give an error and exit immediately - Makes use of multiple worker goroutines which independently create block templates, solve them, and submit the solved blocks - The default number of worker threads are based on the number of processor cores in the system and can be dynamically changed at run-time - There is a separate speed monitor goroutine used to collate periodic updates from the workers to calculate overall hashing speed - The current mining state, number of workers, and hashes per second can be queried - Updated sample-btcd.conf file has been updated to include the coin generation (mining) settings - Updated doc.go for the new command line options In addition the old --getworkkey option is now deprecated in favor of the new --miningaddr option. This was changed for a few reasons: - There is no reason to have a separate list of keys for getwork and CPU mining - getwork is deprecated and will be going away in the future so that means the --getworkkey flag will also be going away - Having the work 'key' in the option can be confused with wanting a private key while --miningaddr make it a little more clear it is an address that is required Closes #137. Reviewed by @jrick.
2014-06-12 03:09:38 +02:00
for _, strAddr := range cfg.MiningAddrs {
addr, err := btcutil.DecodeAddress(strAddr, activeNetParams.Params)
if err != nil {
Implement a built-in concurrent CPU miner. This commit implements a built-in concurrent CPU miner that can be enabled with the combination of the --generate and --miningaddr options. The --blockminsize, --blockmaxsize, and --blockprioritysize configuration options wich already existed prior to this commit control the block template generation and hence affect blocks mined via the new CPU miner. The following is a quick overview of the changes and design: - Starting btcd with --generate and no addresses specified via --miningaddr will give an error and exit immediately - Makes use of multiple worker goroutines which independently create block templates, solve them, and submit the solved blocks - The default number of worker threads are based on the number of processor cores in the system and can be dynamically changed at run-time - There is a separate speed monitor goroutine used to collate periodic updates from the workers to calculate overall hashing speed - The current mining state, number of workers, and hashes per second can be queried - Updated sample-btcd.conf file has been updated to include the coin generation (mining) settings - Updated doc.go for the new command line options In addition the old --getworkkey option is now deprecated in favor of the new --miningaddr option. This was changed for a few reasons: - There is no reason to have a separate list of keys for getwork and CPU mining - getwork is deprecated and will be going away in the future so that means the --getworkkey flag will also be going away - Having the work 'key' in the option can be confused with wanting a private key while --miningaddr make it a little more clear it is an address that is required Closes #137. Reviewed by @jrick.
2014-06-12 03:09:38 +02:00
str := "%s: mining address '%s' failed to decode: %v"
err := fmt.Errorf(str, funcName, strAddr, err)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
if !addr.IsForNet(activeNetParams.Params) {
Implement a built-in concurrent CPU miner. This commit implements a built-in concurrent CPU miner that can be enabled with the combination of the --generate and --miningaddr options. The --blockminsize, --blockmaxsize, and --blockprioritysize configuration options wich already existed prior to this commit control the block template generation and hence affect blocks mined via the new CPU miner. The following is a quick overview of the changes and design: - Starting btcd with --generate and no addresses specified via --miningaddr will give an error and exit immediately - Makes use of multiple worker goroutines which independently create block templates, solve them, and submit the solved blocks - The default number of worker threads are based on the number of processor cores in the system and can be dynamically changed at run-time - There is a separate speed monitor goroutine used to collate periodic updates from the workers to calculate overall hashing speed - The current mining state, number of workers, and hashes per second can be queried - Updated sample-btcd.conf file has been updated to include the coin generation (mining) settings - Updated doc.go for the new command line options In addition the old --getworkkey option is now deprecated in favor of the new --miningaddr option. This was changed for a few reasons: - There is no reason to have a separate list of keys for getwork and CPU mining - getwork is deprecated and will be going away in the future so that means the --getworkkey flag will also be going away - Having the work 'key' in the option can be confused with wanting a private key while --miningaddr make it a little more clear it is an address that is required Closes #137. Reviewed by @jrick.
2014-06-12 03:09:38 +02:00
str := "%s: mining address '%s' is on the wrong network"
err := fmt.Errorf(str, funcName, strAddr)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
Implement a built-in concurrent CPU miner. This commit implements a built-in concurrent CPU miner that can be enabled with the combination of the --generate and --miningaddr options. The --blockminsize, --blockmaxsize, and --blockprioritysize configuration options wich already existed prior to this commit control the block template generation and hence affect blocks mined via the new CPU miner. The following is a quick overview of the changes and design: - Starting btcd with --generate and no addresses specified via --miningaddr will give an error and exit immediately - Makes use of multiple worker goroutines which independently create block templates, solve them, and submit the solved blocks - The default number of worker threads are based on the number of processor cores in the system and can be dynamically changed at run-time - There is a separate speed monitor goroutine used to collate periodic updates from the workers to calculate overall hashing speed - The current mining state, number of workers, and hashes per second can be queried - Updated sample-btcd.conf file has been updated to include the coin generation (mining) settings - Updated doc.go for the new command line options In addition the old --getworkkey option is now deprecated in favor of the new --miningaddr option. This was changed for a few reasons: - There is no reason to have a separate list of keys for getwork and CPU mining - getwork is deprecated and will be going away in the future so that means the --getworkkey flag will also be going away - Having the work 'key' in the option can be confused with wanting a private key while --miningaddr make it a little more clear it is an address that is required Closes #137. Reviewed by @jrick.
2014-06-12 03:09:38 +02:00
cfg.miningAddrs = append(cfg.miningAddrs, addr)
}
// Ensure there is at least one mining address when the generate flag is
// set.
if cfg.Generate && len(cfg.MiningAddrs) == 0 {
str := "%s: the generate flag is set, but there are no mining " +
"addresses specified "
err := fmt.Errorf(str, funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
Implement a built-in concurrent CPU miner. This commit implements a built-in concurrent CPU miner that can be enabled with the combination of the --generate and --miningaddr options. The --blockminsize, --blockmaxsize, and --blockprioritysize configuration options wich already existed prior to this commit control the block template generation and hence affect blocks mined via the new CPU miner. The following is a quick overview of the changes and design: - Starting btcd with --generate and no addresses specified via --miningaddr will give an error and exit immediately - Makes use of multiple worker goroutines which independently create block templates, solve them, and submit the solved blocks - The default number of worker threads are based on the number of processor cores in the system and can be dynamically changed at run-time - There is a separate speed monitor goroutine used to collate periodic updates from the workers to calculate overall hashing speed - The current mining state, number of workers, and hashes per second can be queried - Updated sample-btcd.conf file has been updated to include the coin generation (mining) settings - Updated doc.go for the new command line options In addition the old --getworkkey option is now deprecated in favor of the new --miningaddr option. This was changed for a few reasons: - There is no reason to have a separate list of keys for getwork and CPU mining - getwork is deprecated and will be going away in the future so that means the --getworkkey flag will also be going away - Having the work 'key' in the option can be confused with wanting a private key while --miningaddr make it a little more clear it is an address that is required Closes #137. Reviewed by @jrick.
2014-06-12 03:09:38 +02:00
return nil, nil, err
}
// Add default port to all listener addresses if needed and remove
// duplicate addresses.
cfg.Listeners = normalizeAddresses(cfg.Listeners,
activeNetParams.DefaultPort)
// Add default port to all rpc listener addresses if needed and remove
// duplicate addresses.
cfg.RPCListeners = normalizeAddresses(cfg.RPCListeners,
activeNetParams.rpcPort)
// Only allow TLS to be disabled if the RPC is bound to localhost
// addresses.
if !cfg.DisableRPC && cfg.DisableTLS {
allowedTLSListeners := map[string]struct{}{
"localhost": {},
"127.0.0.1": {},
"::1": {},
}
for _, addr := range cfg.RPCListeners {
host, _, err := net.SplitHostPort(addr)
if err != nil {
str := "%s: RPC listen interface '%s' is " +
"invalid: %v"
err := fmt.Errorf(str, funcName, addr, err)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
if _, ok := allowedTLSListeners[host]; !ok {
str := "%s: the --notls option may not be used " +
"when binding RPC to non localhost " +
"addresses: %s"
err := fmt.Errorf(str, funcName, addr)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
}
}
2013-08-06 23:55:22 +02:00
// Add default port to all added peer addresses if needed and remove
// duplicate addresses.
cfg.AddPeers = normalizeAddresses(cfg.AddPeers,
activeNetParams.DefaultPort)
cfg.ConnectPeers = normalizeAddresses(cfg.ConnectPeers,
activeNetParams.DefaultPort)
2013-08-06 23:55:22 +02:00
// --noonion and --onion do not mix.
if cfg.NoOnion && cfg.OnionProxy != "" {
err := fmt.Errorf("%s: the --noonion and --onion options may "+
"not be activated at the same time", funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Check the checkpoints for syntax errors.
cfg.addCheckpoints, err = parseCheckpoints(cfg.AddCheckpoints)
if err != nil {
str := "%s: Error parsing checkpoints: %v"
err := fmt.Errorf(str, funcName, err)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Tor stream isolation requires either proxy or onion proxy to be set.
if cfg.TorIsolation && cfg.Proxy == "" && cfg.OnionProxy == "" {
str := "%s: Tor stream isolation requires either proxy or " +
"onionproxy to be set"
err := fmt.Errorf(str, funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Setup dial and DNS resolution (lookup) functions depending on the
// specified options. The default is to use the standard
// net.DialTimeout function as well as the system DNS resolver. When a
// proxy is specified, the dial function is set to the proxy specific
// dial function and the lookup is set to use tor (unless --noonion is
// specified in which case the system DNS resolver is used).
cfg.dial = net.DialTimeout
cfg.lookup = net.LookupIP
if cfg.Proxy != "" {
_, _, err := net.SplitHostPort(cfg.Proxy)
if err != nil {
str := "%s: Proxy address '%s' is invalid: %v"
err := fmt.Errorf(str, funcName, cfg.Proxy, err)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Tor isolation flag means proxy credentials will be overridden
// unless there is also an onion proxy configured in which case
// that one will be overridden.
torIsolation := false
if cfg.TorIsolation && cfg.OnionProxy == "" &&
(cfg.ProxyUser != "" || cfg.ProxyPass != "") {
torIsolation = true
fmt.Fprintln(os.Stderr, "Tor isolation set -- "+
"overriding specified proxy user credentials")
}
proxy := &socks.Proxy{
Addr: cfg.Proxy,
Username: cfg.ProxyUser,
Password: cfg.ProxyPass,
TorIsolation: torIsolation,
}
cfg.dial = proxy.DialTimeout
// Treat the proxy as tor and perform DNS resolution through it
// unless the --noonion flag is set or there is an
// onion-specific proxy configured.
if !cfg.NoOnion && cfg.OnionProxy == "" {
cfg.lookup = func(host string) ([]net.IP, error) {
return connmgr.TorLookupIP(host, cfg.Proxy)
}
}
}
// Setup onion address dial function depending on the specified options.
// The default is to use the same dial function selected above. However,
// when an onion-specific proxy is specified, the onion address dial
// function is set to use the onion-specific proxy while leaving the
// normal dial function as selected above. This allows .onion address
// traffic to be routed through a different proxy than normal traffic.
if cfg.OnionProxy != "" {
_, _, err := net.SplitHostPort(cfg.OnionProxy)
if err != nil {
str := "%s: Onion proxy address '%s' is invalid: %v"
err := fmt.Errorf(str, funcName, cfg.OnionProxy, err)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Tor isolation flag means onion proxy credentials will be
// overridden.
if cfg.TorIsolation &&
(cfg.OnionProxyUser != "" || cfg.OnionProxyPass != "") {
fmt.Fprintln(os.Stderr, "Tor isolation set -- "+
"overriding specified onionproxy user "+
"credentials ")
}
cfg.oniondial = func(network, addr string, timeout time.Duration) (net.Conn, error) {
proxy := &socks.Proxy{
Addr: cfg.OnionProxy,
Username: cfg.OnionProxyUser,
Password: cfg.OnionProxyPass,
TorIsolation: cfg.TorIsolation,
}
return proxy.DialTimeout(network, addr, timeout)
}
// When configured in bridge mode (both --onion and --proxy are
// configured), it means that the proxy configured by --proxy is
// not a tor proxy, so override the DNS resolution to use the
// onion-specific proxy.
if cfg.Proxy != "" {
cfg.lookup = func(host string) ([]net.IP, error) {
return connmgr.TorLookupIP(host, cfg.OnionProxy)
}
}
} else {
cfg.oniondial = cfg.dial
}
// Specifying --noonion means the onion address dial function results in
// an error.
if cfg.NoOnion {
cfg.oniondial = func(a, b string, t time.Duration) (net.Conn, error) {
return nil, errors.New("tor has been disabled")
}
}
// Warn about missing config file only after all other configuration is
// done. This prevents the warning on help messages and invalid
// options. Note this should go directly before the return.
if configFileError != nil {
btcdLog.Warnf("%v", configFileError)
}
2013-08-06 23:55:22 +02:00
return &cfg, remainingArgs, nil
}
2016-06-04 04:14:15 +02:00
// createDefaultConfig copies the file sample-btcd.conf to the given destination path,
// and populates it with some randomly generated RPC username and password.
func createDefaultConfigFile(destinationPath string) error {
// Create the destination directory if it does not exists
err := os.MkdirAll(filepath.Dir(destinationPath), 0700)
if err != nil {
return err
}
2016-06-04 04:14:15 +02:00
// We assume sample config file path is same as binary
path, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return err
}
sampleConfigPath := filepath.Join(path, sampleConfigFilename)
2016-06-04 04:14:15 +02:00
// We generate a random user and password
randomBytes := make([]byte, 20)
_, err = rand.Read(randomBytes)
2016-06-04 04:14:15 +02:00
if err != nil {
return err
}
generatedRPCUser := base64.StdEncoding.EncodeToString(randomBytes)
_, err = rand.Read(randomBytes)
if err != nil {
return err
}
generatedRPCPass := base64.StdEncoding.EncodeToString(randomBytes)
src, err := os.Open(sampleConfigPath)
if err != nil {
return err
}
defer src.Close()
dest, err := os.OpenFile(destinationPath,
os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
2016-06-04 04:14:15 +02:00
if err != nil {
return err
}
defer dest.Close()
// We copy every line from the sample config file to the destination,
// only replacing the two lines for rpcuser and rpcpass
reader := bufio.NewReader(src)
for err != io.EOF {
var line string
line, err = reader.ReadString('\n')
if err != nil && err != io.EOF {
return err
}
if strings.Contains(line, "rpcuser=") {
line = "rpcuser=" + generatedRPCUser + "\n"
2016-06-04 04:14:15 +02:00
} else if strings.Contains(line, "rpcpass=") {
line = "rpcpass=" + generatedRPCPass + "\n"
2016-06-04 04:14:15 +02:00
}
if _, err := dest.WriteString(line); err != nil {
return err
}
}
return nil
}
// btcdDial connects to the address on the named network using the appropriate
// dial function depending on the address and configuration options. For
// example, .onion addresses will be dialed using the onion specific proxy if
// one was specified, but will otherwise use the normal dial function (which
// could itself use a proxy or not).
func btcdDial(addr net.Addr) (net.Conn, error) {
if strings.Contains(addr.String(), ".onion:") {
return cfg.oniondial(addr.Network(), addr.String(),
defaultConnectTimeout)
}
return cfg.dial(addr.Network(), addr.String(), defaultConnectTimeout)
}
// btcdLookup resolves the IP of the given host using the correct DNS lookup
// function depending on the configuration options. For example, addresses will
// be resolved using tor when the --proxy flag was specified unless --noonion
// was also specified in which case the normal system DNS resolver will be used.
//
// Any attempt to resolve a tor address (.onion) will return an error since they
// are not intended to be resolved outside of the tor proxy.
func btcdLookup(host string) ([]net.IP, error) {
if strings.HasSuffix(host, ".onion") {
return nil, fmt.Errorf("attempt to resolve tor address %s", host)
}
return cfg.lookup(host)
}