2016-12-17 00:59:25 +01:00
|
|
|
// Copyright (c) 2013-2017 The btcsuite developers
|
|
|
|
// Copyright (c) 2015-2017 The Decred developers
|
2013-08-06 23:55:22 +02:00
|
|
|
// Use of this source code is governed by an ISC
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2013-08-14 22:55:31 +02:00
|
|
|
"bytes"
|
2017-01-11 16:00:16 +01:00
|
|
|
"crypto/sha256"
|
2013-12-05 19:21:51 +01:00
|
|
|
"crypto/subtle"
|
2013-10-01 22:43:45 +02:00
|
|
|
"encoding/base64"
|
2013-08-14 22:55:31 +02:00
|
|
|
"encoding/hex"
|
2015-02-21 05:34:57 +01:00
|
|
|
"encoding/json"
|
2013-08-14 22:55:31 +02:00
|
|
|
"errors"
|
2013-10-01 22:43:45 +02:00
|
|
|
"fmt"
|
2014-06-27 18:13:04 +02:00
|
|
|
"io"
|
2014-01-10 20:45:04 +01:00
|
|
|
"io/ioutil"
|
2013-08-06 23:55:22 +02:00
|
|
|
"math/big"
|
2014-03-20 08:06:10 +01:00
|
|
|
"math/rand"
|
2013-08-06 23:55:22 +02:00
|
|
|
"net"
|
|
|
|
"net/http"
|
2013-11-07 17:25:11 +01:00
|
|
|
"os"
|
2013-08-06 23:55:22 +02:00
|
|
|
"strconv"
|
2014-06-27 21:12:22 +02:00
|
|
|
"strings"
|
2013-08-06 23:55:22 +02:00
|
|
|
"sync"
|
2013-10-03 01:33:42 +02:00
|
|
|
"sync/atomic"
|
2013-11-07 17:25:11 +01:00
|
|
|
"time"
|
2014-07-02 15:50:08 +02:00
|
|
|
|
2015-01-16 00:20:30 +01:00
|
|
|
"github.com/btcsuite/websocket"
|
2021-10-28 20:02:44 +02:00
|
|
|
"github.com/lbryio/lbcd/addrmgr"
|
2021-10-15 07:45:32 +02:00
|
|
|
"github.com/lbryio/lbcd/blockchain"
|
|
|
|
"github.com/lbryio/lbcd/blockchain/indexers"
|
|
|
|
"github.com/lbryio/lbcd/btcec"
|
|
|
|
"github.com/lbryio/lbcd/btcjson"
|
|
|
|
"github.com/lbryio/lbcd/chaincfg"
|
|
|
|
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
|
|
|
"github.com/lbryio/lbcd/database"
|
|
|
|
"github.com/lbryio/lbcd/mempool"
|
|
|
|
"github.com/lbryio/lbcd/mining"
|
|
|
|
"github.com/lbryio/lbcd/mining/cpuminer"
|
|
|
|
"github.com/lbryio/lbcd/peer"
|
|
|
|
"github.com/lbryio/lbcd/txscript"
|
|
|
|
"github.com/lbryio/lbcd/wire"
|
|
|
|
btcutil "github.com/lbryio/lbcutil"
|
2013-08-06 23:55:22 +02:00
|
|
|
)
|
|
|
|
|
2016-12-15 22:10:06 +01:00
|
|
|
// API version constants
|
|
|
|
const (
|
2017-01-25 02:39:46 +01:00
|
|
|
jsonrpcSemverString = "1.3.0"
|
2016-12-15 22:10:06 +01:00
|
|
|
jsonrpcSemverMajor = 1
|
2017-01-25 02:39:46 +01:00
|
|
|
jsonrpcSemverMinor = 3
|
2016-12-15 22:10:06 +01:00
|
|
|
jsonrpcSemverPatch = 0
|
|
|
|
)
|
|
|
|
|
2014-03-20 08:06:10 +01:00
|
|
|
const (
|
|
|
|
// rpcAuthTimeoutSeconds is the number of seconds a connection to the
|
|
|
|
// RPC server is allowed to stay open without authenticating before it
|
|
|
|
// is closed.
|
|
|
|
rpcAuthTimeoutSeconds = 10
|
|
|
|
|
|
|
|
// uint256Size is the number of bytes needed to represent an unsigned
|
|
|
|
// 256-bit integer.
|
|
|
|
uint256Size = 32
|
|
|
|
|
Implement some BIP0023 getblocktemplate mutations.
This commit implements a portion of the mutations section of BIP0023.
In particular, it adds the mutable, mintime, maxtime, and noncerange keys
to the returned block template along with indicating support for the time,
transactions/add, prevblock, and coinbase/append mutations. Also, the
addition of the mintime and maxtime fields imply support for the
time/decrement and time/increment mutations. Further, if the caller
indicates the coinbasevalue capability, the coinbasetxn field will be
omitted thereby implying support for the coinbase and generation
mutations.
Closes #124.
2014-06-30 03:00:51 +02:00
|
|
|
// gbtNonceRange is two 32-bit big-endian hexadecimal integers which
|
|
|
|
// represent the valid ranges of nonces returned by the getblocktemplate
|
|
|
|
// RPC.
|
|
|
|
gbtNonceRange = "00000000ffffffff"
|
|
|
|
|
2014-06-27 21:12:22 +02:00
|
|
|
// gbtRegenerateSeconds is the number of seconds that must pass before
|
|
|
|
// a new template is generated when the previous block hash has not
|
|
|
|
// changed and there have been changes to the available transactions
|
|
|
|
// in the memory pool.
|
|
|
|
gbtRegenerateSeconds = 60
|
peer: Refactor peer code into its own package.
This commit introduces package peer which contains peer related features
refactored from peer.go.
The following is an overview of the features the package provides:
- Provides a basic concurrent safe bitcoin peer for handling bitcoin
communications via the peer-to-peer protocol
- Full duplex reading and writing of bitcoin protocol messages
- Automatic handling of the initial handshake process including protocol
version negotiation
- Automatic periodic keep-alive pinging and pong responses
- Asynchronous message queueing of outbound messages with optional
channel for notification when the message is actually sent
- Inventory message batching and send trickling with known inventory
detection and avoidance
- Ability to wait for shutdown/disconnect
- Flexible peer configuration
- Caller is responsible for creating outgoing connections and listening
for incoming connections so they have flexibility to establish
connections as they see fit (proxies, etc.)
- User agent name and version
- Bitcoin network
- Service support signalling (full nodes, bloom filters, etc.)
- Maximum supported protocol version
- Ability to register callbacks for handling bitcoin protocol messages
- Proper handling of bloom filter related commands when the caller does
not specify the related flag to signal support
- Disconnects the peer when the protocol version is high enough
- Does not invoke the related callbacks for older protocol versions
- Snapshottable peer statistics such as the total number of bytes read
and written, the remote address, user agent, and negotiated protocol
version
- Helper functions for pushing addresses, getblocks, getheaders, and
reject messages
- These could all be sent manually via the standard message output
function, but the helpers provide additional nice functionality such
as duplicate filtering and address randomization
- Full documentation with example usage
- Test coverage
In addition to the addition of the new package, btcd has been refactored
to make use of the new package by extending the basic peer it provides to
work with the blockmanager and server to act as a full node. The
following is a broad overview of the changes to integrate the package:
- The server is responsible for all connection management including
persistent peers and banning
- Callbacks for all messages that are required to implement a full node
are registered
- Logic necessary to serve data and behave as a full node is now in the
callback registered with the peer
Finally, the following peer-related things have been improved as a part
of this refactor:
- Don't log or send reject message due to peer disconnects
- Remove trace logs that aren't particularly helpful
- Finish an old TODO to switch the queue WaitGroup over to a channel
- Improve various comments and fix some code consistency cases
- Improve a few logging bits
- Implement a most-recently-used nonce tracking for detecting self
connections and generate a unique nonce for each peer
2015-10-02 08:03:20 +02:00
|
|
|
|
|
|
|
// maxProtocolVersion is the max protocol version the server supports.
|
|
|
|
maxProtocolVersion = 70002
|
2014-06-27 21:12:22 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
Implement some BIP0023 getblocktemplate mutations.
This commit implements a portion of the mutations section of BIP0023.
In particular, it adds the mutable, mintime, maxtime, and noncerange keys
to the returned block template along with indicating support for the time,
transactions/add, prevblock, and coinbase/append mutations. Also, the
addition of the mintime and maxtime fields imply support for the
time/decrement and time/increment mutations. Further, if the caller
indicates the coinbasevalue capability, the coinbasetxn field will be
omitted thereby implying support for the coinbase and generation
mutations.
Closes #124.
2014-06-30 03:00:51 +02:00
|
|
|
// gbtMutableFields are the manipulations the server allows to be made
|
|
|
|
// to block templates generated by the getblocktemplate RPC. It is
|
|
|
|
// declared here to avoid the overhead of creating the slice on every
|
|
|
|
// invocation for constant data.
|
|
|
|
gbtMutableFields = []string{
|
|
|
|
"time", "transactions/add", "prevblock", "coinbase/append",
|
|
|
|
}
|
|
|
|
|
2014-06-27 21:12:22 +02:00
|
|
|
// gbtCoinbaseAux describes additional data that miners should include
|
|
|
|
// in the coinbase signature script. It is declared here to avoid the
|
|
|
|
// overhead of creating a new object on every invocation for constant
|
|
|
|
// data.
|
|
|
|
gbtCoinbaseAux = &btcjson.GetBlockTemplateResultAux{
|
2015-01-30 19:14:33 +01:00
|
|
|
Flags: hex.EncodeToString(builderScript(txscript.
|
2016-10-26 08:34:21 +02:00
|
|
|
NewScriptBuilder().
|
|
|
|
AddData([]byte(mining.CoinbaseFlags)))),
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
2014-06-27 21:12:22 +02:00
|
|
|
|
|
|
|
// gbtCapabilities describes additional capabilities returned with a
|
|
|
|
// block template generated by the getblocktemplate RPC. It is
|
|
|
|
// declared here to avoid the overhead of creating the slice on every
|
|
|
|
// invocation for constant data.
|
|
|
|
gbtCapabilities = []string{"proposal"}
|
2020-12-06 04:39:40 +01:00
|
|
|
|
|
|
|
// JSON 2.0 batched request prefix
|
|
|
|
batchedRequestPrefix = []byte("[")
|
2014-03-20 08:06:10 +01:00
|
|
|
)
|
2014-01-18 07:03:31 +01:00
|
|
|
|
2013-08-14 22:55:31 +02:00
|
|
|
// Errors
|
|
|
|
var (
|
2015-02-21 05:34:57 +01:00
|
|
|
// ErrRPCUnimplemented is an error returned to RPC clients when the
|
|
|
|
// provided command is recognized, but not implemented.
|
|
|
|
ErrRPCUnimplemented = &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCUnimplemented,
|
|
|
|
Message: "Command unimplemented",
|
|
|
|
}
|
|
|
|
|
|
|
|
// ErrRPCNoWallet is an error returned to RPC clients when the provided
|
|
|
|
// command is recognized as a wallet command.
|
|
|
|
ErrRPCNoWallet = &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCNoWallet,
|
|
|
|
Message: "This implementation does not implement wallet commands",
|
|
|
|
}
|
2013-08-14 22:55:31 +02:00
|
|
|
)
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
type commandHandler func(*rpcServer, interface{}, <-chan struct{}) (interface{}, error)
|
2013-12-31 21:48:50 +01:00
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// rpcHandlers maps RPC command strings to appropriate handler functions.
|
|
|
|
// This is set by init because help references rpcHandlers and thus causes
|
|
|
|
// a dependency loop.
|
2014-01-16 15:45:17 +01:00
|
|
|
var rpcHandlers map[string]commandHandler
|
|
|
|
var rpcHandlersBeforeInit = map[string]commandHandler{
|
2020-05-23 20:54:49 +02:00
|
|
|
"addnode": handleAddNode,
|
|
|
|
"createrawtransaction": handleCreateRawTransaction,
|
|
|
|
"debuglevel": handleDebugLevel,
|
|
|
|
"decoderawtransaction": handleDecodeRawTransaction,
|
|
|
|
"decodescript": handleDecodeScript,
|
|
|
|
"estimatefee": handleEstimateFee,
|
2021-11-09 21:50:02 +01:00
|
|
|
"estimatesmartfee": handleEstimateSmartFee,
|
2020-05-23 20:54:49 +02:00
|
|
|
"generate": handleGenerate,
|
2021-08-19 22:39:53 +02:00
|
|
|
"generatetoaddress": handleGenerateToAddress,
|
2020-05-23 20:54:49 +02:00
|
|
|
"getaddednodeinfo": handleGetAddedNodeInfo,
|
|
|
|
"getbestblock": handleGetBestBlock,
|
|
|
|
"getbestblockhash": handleGetBestBlockHash,
|
|
|
|
"getblock": handleGetBlock,
|
|
|
|
"getblockchaininfo": handleGetBlockChainInfo,
|
|
|
|
"getblockcount": handleGetBlockCount,
|
|
|
|
"getblockhash": handleGetBlockHash,
|
|
|
|
"getblockheader": handleGetBlockHeader,
|
2021-10-29 03:58:57 +02:00
|
|
|
"getchaintips": handleGetChainTips,
|
2020-05-23 20:54:49 +02:00
|
|
|
"getblocktemplate": handleGetBlockTemplate,
|
|
|
|
"getcfilter": handleGetCFilter,
|
|
|
|
"getcfilterheader": handleGetCFilterHeader,
|
|
|
|
"getconnectioncount": handleGetConnectionCount,
|
|
|
|
"getcurrentnet": handleGetCurrentNet,
|
|
|
|
"getdifficulty": handleGetDifficulty,
|
|
|
|
"getgenerate": handleGetGenerate,
|
|
|
|
"gethashespersec": handleGetHashesPerSec,
|
|
|
|
"getheaders": handleGetHeaders,
|
|
|
|
"getinfo": handleGetInfo,
|
|
|
|
"getmempoolinfo": handleGetMempoolInfo,
|
|
|
|
"getmininginfo": handleGetMiningInfo,
|
|
|
|
"getnettotals": handleGetNetTotals,
|
|
|
|
"getnetworkhashps": handleGetNetworkHashPS,
|
2021-10-28 20:02:44 +02:00
|
|
|
"getnetworkinfo": handleGetNetworkInfo,
|
2020-05-31 14:30:50 +02:00
|
|
|
"getnodeaddresses": handleGetNodeAddresses,
|
2020-05-23 20:54:49 +02:00
|
|
|
"getpeerinfo": handleGetPeerInfo,
|
|
|
|
"getrawmempool": handleGetRawMempool,
|
|
|
|
"getrawtransaction": handleGetRawTransaction,
|
|
|
|
"gettxout": handleGetTxOut,
|
|
|
|
"help": handleHelp,
|
2021-10-28 22:29:03 +02:00
|
|
|
"invalidateblock": handleInvalidateBlock,
|
2020-05-23 20:54:49 +02:00
|
|
|
"node": handleNode,
|
|
|
|
"ping": handlePing,
|
2021-10-28 22:29:03 +02:00
|
|
|
"reconsiderblock": handleReconsiderBlock,
|
2020-05-23 20:54:49 +02:00
|
|
|
"searchrawtransactions": handleSearchRawTransactions,
|
|
|
|
"sendrawtransaction": handleSendRawTransaction,
|
|
|
|
"setgenerate": handleSetGenerate,
|
|
|
|
"signmessagewithprivkey": handleSignMessageWithPrivKey,
|
|
|
|
"stop": handleStop,
|
|
|
|
"submitblock": handleSubmitBlock,
|
|
|
|
"uptime": handleUptime,
|
|
|
|
"validateaddress": handleValidateAddress,
|
|
|
|
"verifychain": handleVerifyChain,
|
|
|
|
"verifymessage": handleVerifyMessage,
|
|
|
|
"version": handleVersion,
|
2014-01-16 15:45:17 +01:00
|
|
|
}
|
|
|
|
|
2016-02-25 18:17:12 +01:00
|
|
|
// list of commands that we recognize, but for which btcd has no support because
|
2014-01-16 15:45:17 +01:00
|
|
|
// it lacks support for wallet functionality. For these commands the user
|
|
|
|
// should ask a connected instance of btcwallet.
|
2014-07-02 16:45:17 +02:00
|
|
|
var rpcAskWallet = map[string]struct{}{
|
2016-02-25 17:47:46 +01:00
|
|
|
"addmultisigaddress": {},
|
|
|
|
"backupwallet": {},
|
|
|
|
"createencryptedwallet": {},
|
|
|
|
"createmultisig": {},
|
|
|
|
"dumpprivkey": {},
|
|
|
|
"dumpwallet": {},
|
|
|
|
"encryptwallet": {},
|
|
|
|
"getaccount": {},
|
|
|
|
"getaccountaddress": {},
|
|
|
|
"getaddressesbyaccount": {},
|
|
|
|
"getbalance": {},
|
|
|
|
"getnewaddress": {},
|
|
|
|
"getrawchangeaddress": {},
|
|
|
|
"getreceivedbyaccount": {},
|
|
|
|
"getreceivedbyaddress": {},
|
|
|
|
"gettransaction": {},
|
|
|
|
"gettxoutsetinfo": {},
|
|
|
|
"getunconfirmedbalance": {},
|
|
|
|
"getwalletinfo": {},
|
|
|
|
"importprivkey": {},
|
|
|
|
"importwallet": {},
|
|
|
|
"keypoolrefill": {},
|
|
|
|
"listaccounts": {},
|
|
|
|
"listaddressgroupings": {},
|
|
|
|
"listlockunspent": {},
|
|
|
|
"listreceivedbyaccount": {},
|
|
|
|
"listreceivedbyaddress": {},
|
|
|
|
"listsinceblock": {},
|
|
|
|
"listtransactions": {},
|
|
|
|
"listunspent": {},
|
|
|
|
"lockunspent": {},
|
|
|
|
"move": {},
|
|
|
|
"sendfrom": {},
|
|
|
|
"sendmany": {},
|
|
|
|
"sendtoaddress": {},
|
|
|
|
"setaccount": {},
|
|
|
|
"settxfee": {},
|
|
|
|
"signmessage": {},
|
|
|
|
"signrawtransaction": {},
|
|
|
|
"walletlock": {},
|
|
|
|
"walletpassphrase": {},
|
|
|
|
"walletpassphrasechange": {},
|
2014-01-16 15:45:17 +01:00
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// Commands that are currently unimplemented, but should ultimately be.
|
|
|
|
var rpcUnimplemented = map[string]struct{}{
|
2016-11-03 01:17:00 +01:00
|
|
|
"estimatepriority": {},
|
|
|
|
"getchaintips": {},
|
2017-01-23 20:57:14 +01:00
|
|
|
"getmempoolentry": {},
|
2017-01-11 16:00:16 +01:00
|
|
|
"getwork": {},
|
2016-11-03 01:17:00 +01:00
|
|
|
"preciousblock": {},
|
2015-02-21 05:34:57 +01:00
|
|
|
}
|
2013-12-31 21:48:50 +01:00
|
|
|
|
2015-03-30 19:45:31 +02:00
|
|
|
// Commands that are available to a limited user
|
|
|
|
var rpcLimited = map[string]struct{}{
|
|
|
|
// Websockets commands
|
2017-01-25 19:49:35 +01:00
|
|
|
"loadtxfilter": {},
|
2016-02-25 17:47:46 +01:00
|
|
|
"notifyblocks": {},
|
|
|
|
"notifynewtransactions": {},
|
|
|
|
"notifyreceived": {},
|
|
|
|
"notifyspent": {},
|
|
|
|
"rescan": {},
|
2017-01-25 02:39:46 +01:00
|
|
|
"rescanblocks": {},
|
2016-02-25 17:47:46 +01:00
|
|
|
"session": {},
|
2015-03-30 19:45:31 +02:00
|
|
|
|
|
|
|
// Websockets AND HTTP/S commands
|
2016-02-25 17:47:46 +01:00
|
|
|
"help": {},
|
2015-03-30 19:45:31 +02:00
|
|
|
|
|
|
|
// HTTP/S-only commands
|
2016-02-25 17:47:46 +01:00
|
|
|
"createrawtransaction": {},
|
|
|
|
"decoderawtransaction": {},
|
|
|
|
"decodescript": {},
|
2017-11-13 23:39:16 +01:00
|
|
|
"estimatefee": {},
|
2016-02-25 17:47:46 +01:00
|
|
|
"getbestblock": {},
|
|
|
|
"getbestblockhash": {},
|
|
|
|
"getblock": {},
|
|
|
|
"getblockcount": {},
|
|
|
|
"getblockhash": {},
|
2017-02-16 19:10:28 +01:00
|
|
|
"getblockheader": {},
|
2017-01-18 09:09:05 +01:00
|
|
|
"getcfilter": {},
|
2017-02-01 14:57:45 +01:00
|
|
|
"getcfilterheader": {},
|
2016-02-25 17:47:46 +01:00
|
|
|
"getcurrentnet": {},
|
|
|
|
"getdifficulty": {},
|
2016-12-17 00:59:25 +01:00
|
|
|
"getheaders": {},
|
2016-02-25 17:47:46 +01:00
|
|
|
"getinfo": {},
|
|
|
|
"getnettotals": {},
|
|
|
|
"getnetworkhashps": {},
|
|
|
|
"getrawmempool": {},
|
|
|
|
"getrawtransaction": {},
|
|
|
|
"gettxout": {},
|
|
|
|
"searchrawtransactions": {},
|
|
|
|
"sendrawtransaction": {},
|
|
|
|
"submitblock": {},
|
2017-07-03 01:04:40 +02:00
|
|
|
"uptime": {},
|
2016-02-25 17:47:46 +01:00
|
|
|
"validateaddress": {},
|
|
|
|
"verifymessage": {},
|
2016-12-15 22:10:06 +01:00
|
|
|
"version": {},
|
2015-03-30 19:45:31 +02:00
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// builderScript is a convenience function which is used for hard-coded scripts
|
|
|
|
// built with the script builder. Any errors are converted to a panic since it
|
|
|
|
// is only, and must only, be used with hard-coded, and therefore, known good,
|
|
|
|
// scripts.
|
2015-01-30 19:14:33 +01:00
|
|
|
func builderScript(builder *txscript.ScriptBuilder) []byte {
|
2015-01-29 07:11:19 +01:00
|
|
|
script, err := builder.Script()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return script
|
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// internalRPCError is a convenience function to convert an internal error to
|
|
|
|
// an RPC error with the appropriate code set. It also logs the error to the
|
|
|
|
// RPC server subsystem since internal errors really should not occur. The
|
|
|
|
// context parameter is only used in the log message and may be empty if it's
|
|
|
|
// not needed.
|
|
|
|
func internalRPCError(errStr, context string) *btcjson.RPCError {
|
|
|
|
logStr := errStr
|
|
|
|
if context != "" {
|
|
|
|
logStr = context + ": " + errStr
|
|
|
|
}
|
|
|
|
rpcsLog.Error(logStr)
|
|
|
|
return btcjson.NewRPCError(btcjson.ErrRPCInternal.Code, errStr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// rpcDecodeHexError is a convenience function for returning a nicely formatted
|
|
|
|
// RPC error which indicates the provided hex string failed to decode.
|
|
|
|
func rpcDecodeHexError(gotHex string) *btcjson.RPCError {
|
|
|
|
return btcjson.NewRPCError(btcjson.ErrRPCDecodeHexString,
|
|
|
|
fmt.Sprintf("Argument must be hexadecimal string (not %q)",
|
|
|
|
gotHex))
|
|
|
|
}
|
|
|
|
|
2016-02-19 05:51:18 +01:00
|
|
|
// rpcNoTxInfoError is a convenience function for returning a nicely formatted
|
2016-12-13 22:49:52 +01:00
|
|
|
// RPC error which indicates there is no information available for the provided
|
2016-02-19 05:51:18 +01:00
|
|
|
// transaction hash.
|
2016-08-08 21:04:33 +02:00
|
|
|
func rpcNoTxInfoError(txHash *chainhash.Hash) *btcjson.RPCError {
|
2016-02-19 05:51:18 +01:00
|
|
|
return btcjson.NewRPCError(btcjson.ErrRPCNoTxInfo,
|
|
|
|
fmt.Sprintf("No information available about transaction %v",
|
|
|
|
txHash))
|
|
|
|
}
|
|
|
|
|
2014-06-27 21:12:22 +02:00
|
|
|
// gbtWorkState houses state that is used in between multiple RPC invocations to
|
|
|
|
// getblocktemplate.
|
|
|
|
type gbtWorkState struct {
|
|
|
|
sync.Mutex
|
|
|
|
lastTxUpdate time.Time
|
|
|
|
lastGenerated time.Time
|
2016-08-08 21:04:33 +02:00
|
|
|
prevHash *chainhash.Hash
|
2014-06-27 21:12:22 +02:00
|
|
|
minTimestamp time.Time
|
2016-10-26 08:34:21 +02:00
|
|
|
template *mining.BlockTemplate
|
2016-08-08 21:04:33 +02:00
|
|
|
notifyMap map[chainhash.Hash]map[int64]chan struct{}
|
2015-01-30 23:25:42 +01:00
|
|
|
timeSource blockchain.MedianTimeSource
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// newGbtWorkState returns a new instance of a gbtWorkState with all internal
|
|
|
|
// fields initialized and ready to use.
|
2015-01-30 23:25:42 +01:00
|
|
|
func newGbtWorkState(timeSource blockchain.MedianTimeSource) *gbtWorkState {
|
2014-06-27 21:12:22 +02:00
|
|
|
return &gbtWorkState{
|
2016-08-08 21:04:33 +02:00
|
|
|
notifyMap: make(map[chainhash.Hash]map[int64]chan struct{}),
|
2015-01-08 07:57:13 +01:00
|
|
|
timeSource: timeSource,
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// handleUnimplemented is the handler for commands that should ultimately be
|
|
|
|
// supported but are not yet implemented.
|
|
|
|
func handleUnimplemented(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
return nil, ErrRPCUnimplemented
|
2013-08-14 22:55:31 +02:00
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// handleAskWallet is the handler for commands that are recognized as valid, but
|
|
|
|
// are unable to answer correctly since it involves wallet state.
|
2015-02-20 03:44:48 +01:00
|
|
|
// These commands will be implemented in btcwallet.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleAskWallet(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
return nil, ErrRPCNoWallet
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// handleAddNode handles addnode commands.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleAddNode(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2015-02-20 03:44:48 +01:00
|
|
|
c := cmd.(*btcjson.AddNodeCmd)
|
2014-01-18 07:03:31 +01:00
|
|
|
|
2017-08-13 18:45:00 +02:00
|
|
|
addr := normalizeAddress(c.Addr, s.cfg.ChainParams.DefaultPort)
|
2015-02-20 03:44:48 +01:00
|
|
|
var err error
|
|
|
|
switch c.SubCmd {
|
|
|
|
case "add":
|
2016-04-02 23:58:01 +02:00
|
|
|
err = s.cfg.ConnMgr.Connect(addr, true)
|
2015-02-20 03:44:48 +01:00
|
|
|
case "remove":
|
2016-04-02 23:58:01 +02:00
|
|
|
err = s.cfg.ConnMgr.RemoveByAddr(addr)
|
2015-02-20 03:44:48 +01:00
|
|
|
case "onetry":
|
2016-04-02 23:58:01 +02:00
|
|
|
err = s.cfg.ConnMgr.Connect(addr, false)
|
2015-02-20 03:44:48 +01:00
|
|
|
default:
|
2015-03-05 22:47:54 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParameter,
|
|
|
|
Message: "invalid subcommand for addnode",
|
|
|
|
}
|
2014-01-18 07:03:31 +01:00
|
|
|
}
|
2014-02-19 03:44:37 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
if err != nil {
|
2015-03-05 22:47:54 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParameter,
|
|
|
|
Message: err.Error(),
|
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
2014-02-19 03:44:37 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// no data returned unless an error.
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2015-03-05 22:47:54 +01:00
|
|
|
// handleNode handles node commands.
|
|
|
|
func handleNode(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
c := cmd.(*btcjson.NodeCmd)
|
|
|
|
|
|
|
|
var addr string
|
2015-10-20 16:51:41 +02:00
|
|
|
var nodeID uint64
|
2015-03-05 22:47:54 +01:00
|
|
|
var errN, err error
|
2017-08-13 18:45:00 +02:00
|
|
|
params := s.cfg.ChainParams
|
2015-03-05 22:47:54 +01:00
|
|
|
switch c.SubCmd {
|
|
|
|
case "disconnect":
|
|
|
|
// If we have a valid uint disconnect by node id. Otherwise,
|
|
|
|
// attempt to disconnect by address, returning an error if a
|
|
|
|
// valid IP address is not supplied.
|
2015-10-20 16:51:41 +02:00
|
|
|
if nodeID, errN = strconv.ParseUint(c.Target, 10, 32); errN == nil {
|
2016-04-02 23:58:01 +02:00
|
|
|
err = s.cfg.ConnMgr.DisconnectByID(int32(nodeID))
|
2015-03-05 22:47:54 +01:00
|
|
|
} else {
|
|
|
|
if _, _, errP := net.SplitHostPort(c.Target); errP == nil || net.ParseIP(c.Target) != nil {
|
2017-08-13 18:45:00 +02:00
|
|
|
addr = normalizeAddress(c.Target, params.DefaultPort)
|
2016-04-02 23:58:01 +02:00
|
|
|
err = s.cfg.ConnMgr.DisconnectByAddr(addr)
|
2015-03-05 22:47:54 +01:00
|
|
|
} else {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParameter,
|
|
|
|
Message: "invalid address or node ID",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-04-02 23:58:01 +02:00
|
|
|
if err != nil && peerExists(s.cfg.ConnMgr, addr, int32(nodeID)) {
|
|
|
|
|
2015-03-05 22:47:54 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCMisc,
|
|
|
|
Message: "can't disconnect a permanent peer, use remove",
|
|
|
|
}
|
|
|
|
}
|
2016-04-02 23:58:01 +02:00
|
|
|
|
2015-03-05 22:47:54 +01:00
|
|
|
case "remove":
|
|
|
|
// If we have a valid uint disconnect by node id. Otherwise,
|
|
|
|
// attempt to disconnect by address, returning an error if a
|
|
|
|
// valid IP address is not supplied.
|
2015-10-20 16:51:41 +02:00
|
|
|
if nodeID, errN = strconv.ParseUint(c.Target, 10, 32); errN == nil {
|
2016-04-02 23:58:01 +02:00
|
|
|
err = s.cfg.ConnMgr.RemoveByID(int32(nodeID))
|
2015-03-05 22:47:54 +01:00
|
|
|
} else {
|
|
|
|
if _, _, errP := net.SplitHostPort(c.Target); errP == nil || net.ParseIP(c.Target) != nil {
|
2017-08-13 18:45:00 +02:00
|
|
|
addr = normalizeAddress(c.Target, params.DefaultPort)
|
2016-04-02 23:58:01 +02:00
|
|
|
err = s.cfg.ConnMgr.RemoveByAddr(addr)
|
2015-03-05 22:47:54 +01:00
|
|
|
} else {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParameter,
|
|
|
|
Message: "invalid address or node ID",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-04-02 23:58:01 +02:00
|
|
|
if err != nil && peerExists(s.cfg.ConnMgr, addr, int32(nodeID)) {
|
2015-03-05 22:47:54 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCMisc,
|
|
|
|
Message: "can't remove a temporary peer, use disconnect",
|
|
|
|
}
|
|
|
|
}
|
2016-04-02 23:58:01 +02:00
|
|
|
|
2015-03-05 22:47:54 +01:00
|
|
|
case "connect":
|
2017-08-13 18:45:00 +02:00
|
|
|
addr = normalizeAddress(c.Target, params.DefaultPort)
|
2015-03-05 22:47:54 +01:00
|
|
|
|
|
|
|
// Default to temporary connections.
|
|
|
|
subCmd := "temp"
|
|
|
|
if c.ConnectSubCmd != nil {
|
|
|
|
subCmd = *c.ConnectSubCmd
|
|
|
|
}
|
|
|
|
|
|
|
|
switch subCmd {
|
|
|
|
case "perm", "temp":
|
2016-04-02 23:58:01 +02:00
|
|
|
err = s.cfg.ConnMgr.Connect(addr, subCmd == "perm")
|
2015-03-05 22:47:54 +01:00
|
|
|
default:
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParameter,
|
|
|
|
Message: "invalid subcommand for node connect",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParameter,
|
|
|
|
Message: "invalid subcommand for node",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParameter,
|
|
|
|
Message: err.Error(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// no data returned unless an error.
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// peerExists determines if a certain peer is currently connected given
|
|
|
|
// information about all currently connected peers. Peer existence is
|
|
|
|
// determined using either a target address or node id.
|
2016-04-02 23:58:01 +02:00
|
|
|
func peerExists(connMgr rpcserverConnManager, addr string, nodeID int32) bool {
|
|
|
|
for _, p := range connMgr.ConnectedPeers() {
|
|
|
|
if p.ToPeer().ID() == nodeID || p.ToPeer().Addr() == addr {
|
2015-03-05 22:47:54 +01:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// messageToHex serializes a message to the wire protocol encoding using the
|
|
|
|
// latest protocol version and returns a hex-encoded string of the result.
|
|
|
|
func messageToHex(msg wire.Message) (string, error) {
|
|
|
|
var buf bytes.Buffer
|
2016-10-19 04:23:27 +02:00
|
|
|
if err := msg.BtcEncode(&buf, maxProtocolVersion, wire.WitnessEncoding); err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
context := fmt.Sprintf("Failed to encode msg of type %T", msg)
|
|
|
|
return "", internalRPCError(err.Error(), context)
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
return hex.EncodeToString(buf.Bytes()), nil
|
|
|
|
}
|
2014-01-15 04:53:07 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// handleCreateRawTransaction handles createrawtransaction commands.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleCreateRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2015-02-20 03:44:48 +01:00
|
|
|
c := cmd.(*btcjson.CreateRawTransactionCmd)
|
|
|
|
|
2015-10-23 17:47:27 +02:00
|
|
|
// Validate the locktime, if given.
|
|
|
|
if c.LockTime != nil &&
|
|
|
|
(*c.LockTime < 0 || *c.LockTime > int64(wire.MaxTxInSequenceNum)) {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParameter,
|
|
|
|
Message: "Locktime out of range",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Add all transaction inputs to a new transaction after performing
|
|
|
|
// some validity checks.
|
2016-10-27 04:09:19 +02:00
|
|
|
mtx := wire.NewMsgTx(wire.TxVersion)
|
2015-02-20 03:44:48 +01:00
|
|
|
for _, input := range c.Inputs {
|
2016-08-08 21:04:33 +02:00
|
|
|
txHash, err := chainhash.NewHashFromStr(input.Txid)
|
2014-01-22 21:10:04 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, rpcDecodeHexError(input.Txid)
|
2014-01-17 22:03:43 +01:00
|
|
|
}
|
2013-12-05 19:21:51 +01:00
|
|
|
|
2016-11-03 00:18:48 +01:00
|
|
|
prevOut := wire.NewOutPoint(txHash, input.Vout)
|
2016-10-19 04:23:27 +02:00
|
|
|
txIn := wire.NewTxIn(prevOut, []byte{}, nil)
|
2015-10-23 17:47:27 +02:00
|
|
|
if c.LockTime != nil && *c.LockTime != 0 {
|
|
|
|
txIn.Sequence = wire.MaxTxInSequenceNum - 1
|
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
mtx.AddTxIn(txIn)
|
2013-08-07 17:38:39 +02:00
|
|
|
}
|
2014-03-04 17:15:25 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Add all transaction outputs to the transaction after performing
|
|
|
|
// some validity checks.
|
2017-08-13 18:45:00 +02:00
|
|
|
params := s.cfg.ChainParams
|
2015-02-20 03:44:48 +01:00
|
|
|
for encodedAddr, amount := range c.Amounts {
|
|
|
|
// Ensure amount is in the valid range for monetary amounts.
|
2020-08-14 10:46:02 +02:00
|
|
|
if amount <= 0 || amount*btcutil.SatoshiPerBitcoin > btcutil.MaxSatoshi {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCType,
|
2015-02-20 03:44:48 +01:00
|
|
|
Message: "Invalid amount",
|
|
|
|
}
|
|
|
|
}
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Decode the provided address.
|
2017-08-13 18:45:00 +02:00
|
|
|
addr, err := btcutil.DecodeAddress(encodedAddr, params)
|
2015-02-20 03:44:48 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidAddressOrKey,
|
|
|
|
Message: "Invalid address or key: " + err.Error(),
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
}
|
2014-06-27 18:13:04 +02:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Ensure the address is one of the supported types and that
|
|
|
|
// the network encoded with the address matches the network the
|
|
|
|
// server is currently on.
|
|
|
|
switch addr.(type) {
|
|
|
|
case *btcutil.AddressPubKeyHash:
|
|
|
|
case *btcutil.AddressScriptHash:
|
|
|
|
default:
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidAddressOrKey,
|
|
|
|
Message: "Invalid address or key",
|
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
2017-08-13 18:45:00 +02:00
|
|
|
if !addr.IsForNet(params) {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidAddressOrKey,
|
|
|
|
Message: "Invalid address: " + encodedAddr +
|
|
|
|
" is for the wrong network",
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
}
|
2014-06-27 18:13:04 +02:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Create a new script which pays to the provided address.
|
|
|
|
pkScript, err := txscript.PayToAddrScript(addr)
|
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
context := "Failed to generate pay-to-address script"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert the amount to satoshi.
|
|
|
|
satoshi, err := btcutil.NewAmount(amount)
|
|
|
|
if err != nil {
|
|
|
|
context := "Failed to convert amount"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
2014-06-27 18:13:04 +02:00
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
txOut := wire.NewTxOut(int64(satoshi), pkScript)
|
2015-02-20 03:44:48 +01:00
|
|
|
mtx.AddTxOut(txOut)
|
2014-06-27 18:13:04 +02:00
|
|
|
}
|
|
|
|
|
2015-10-23 17:47:27 +02:00
|
|
|
// Set the Locktime, if given.
|
|
|
|
if c.LockTime != nil {
|
|
|
|
mtx.LockTime = uint32(*c.LockTime)
|
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// Return the serialized and hex-encoded transaction. Note that this
|
|
|
|
// is intentionally not directly returning because the first return
|
|
|
|
// value is a string and it would result in returning an empty string to
|
|
|
|
// the client instead of nothing (nil) in the case of an error.
|
2015-02-20 03:44:48 +01:00
|
|
|
mtxHex, err := messageToHex(mtx)
|
2014-06-27 18:13:04 +02:00
|
|
|
if err != nil {
|
2015-02-20 03:44:48 +01:00
|
|
|
return nil, err
|
2014-06-27 18:13:04 +02:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
return mtxHex, nil
|
|
|
|
}
|
2014-06-27 18:13:04 +02:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// handleDebugLevel handles debuglevel commands.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleDebugLevel(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2015-02-20 03:44:48 +01:00
|
|
|
c := cmd.(*btcjson.DebugLevelCmd)
|
|
|
|
|
|
|
|
// Special show command to list supported subsystems.
|
|
|
|
if c.LevelSpec == "show" {
|
|
|
|
return fmt.Sprintf("Supported subsystems %v",
|
|
|
|
supportedSubsystems()), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
err := parseAndSetDebugLevels(c.LevelSpec)
|
2014-06-27 18:13:04 +02:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParams.Code,
|
2015-02-20 03:44:48 +01:00
|
|
|
Message: err.Error(),
|
|
|
|
}
|
2014-06-27 18:13:04 +02:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
return "Done.", nil
|
2014-06-27 18:13:04 +02:00
|
|
|
}
|
|
|
|
|
2017-08-30 03:40:19 +02:00
|
|
|
// witnessToHex formats the passed witness stack as a slice of hex-encoded
|
|
|
|
// strings to be used in a JSON response.
|
|
|
|
func witnessToHex(witness wire.TxWitness) []string {
|
|
|
|
// Ensure nil is returned when there are no entries versus an empty
|
|
|
|
// slice so it can properly be omitted as necessary.
|
|
|
|
if len(witness) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
result := make([]string, 0, len(witness))
|
|
|
|
for _, wit := range witness {
|
|
|
|
result = append(result, hex.EncodeToString(wit))
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// createVinList returns a slice of JSON objects for the inputs of the passed
|
|
|
|
// transaction.
|
|
|
|
func createVinList(mtx *wire.MsgTx) []btcjson.Vin {
|
2015-09-28 20:27:09 +02:00
|
|
|
// Coinbase transactions only have a single txin by definition.
|
2015-02-20 03:44:48 +01:00
|
|
|
vinList := make([]btcjson.Vin, len(mtx.TxIn))
|
2015-09-28 20:27:09 +02:00
|
|
|
if blockchain.IsCoinBaseTx(mtx) {
|
|
|
|
txIn := mtx.TxIn[0]
|
|
|
|
vinList[0].Coinbase = hex.EncodeToString(txIn.SignatureScript)
|
|
|
|
vinList[0].Sequence = txIn.Sequence
|
2017-08-30 03:40:19 +02:00
|
|
|
vinList[0].Witness = witnessToHex(txIn.Witness)
|
2015-09-28 20:27:09 +02:00
|
|
|
return vinList
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, txIn := range mtx.TxIn {
|
|
|
|
// The disassembled string will contain [error] inline
|
|
|
|
// if the script doesn't fully parse, so ignore the
|
|
|
|
// error here.
|
|
|
|
disbuf, _ := txscript.DisasmString(txIn.SignatureScript)
|
|
|
|
|
|
|
|
vinEntry := &vinList[i]
|
|
|
|
vinEntry.Txid = txIn.PreviousOutPoint.Hash.String()
|
|
|
|
vinEntry.Vout = txIn.PreviousOutPoint.Index
|
|
|
|
vinEntry.Sequence = txIn.Sequence
|
|
|
|
vinEntry.ScriptSig = &btcjson.ScriptSig{
|
|
|
|
Asm: disbuf,
|
|
|
|
Hex: hex.EncodeToString(txIn.SignatureScript),
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
2016-10-19 04:23:27 +02:00
|
|
|
|
|
|
|
if mtx.HasWitness() {
|
2017-08-30 03:40:19 +02:00
|
|
|
vinEntry.Witness = witnessToHex(txIn.Witness)
|
2016-10-19 04:23:27 +02:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
2014-02-19 03:44:37 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
return vinList
|
2014-02-19 03:44:37 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// createVoutList returns a slice of JSON objects for the outputs of the passed
|
|
|
|
// transaction.
|
2015-11-16 00:30:13 +01:00
|
|
|
func createVoutList(mtx *wire.MsgTx, chainParams *chaincfg.Params, filterAddrMap map[string]struct{}) []btcjson.Vout {
|
|
|
|
voutList := make([]btcjson.Vout, 0, len(mtx.TxOut))
|
2015-02-20 03:44:48 +01:00
|
|
|
for i, v := range mtx.TxOut {
|
|
|
|
// The disassembled string will contain [error] inline if the
|
|
|
|
// script doesn't fully parse, so ignore the error here.
|
|
|
|
disbuf, _ := txscript.DisasmString(v.PkScript)
|
2013-12-05 19:21:51 +01:00
|
|
|
|
2021-07-23 18:13:31 +02:00
|
|
|
script := txscript.StripClaimScriptPrefix(v.PkScript)
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Ignore the error here since an error means the script
|
|
|
|
// couldn't parse and there is no additional information about
|
|
|
|
// it anyways.
|
2021-07-23 18:13:31 +02:00
|
|
|
scriptClass, addrs, reqSigs, _ := txscript.ExtractPkScriptAddrs(script, chainParams)
|
2013-12-05 19:21:51 +01:00
|
|
|
|
2015-11-29 20:28:10 +01:00
|
|
|
// Encode the addresses while checking if the address passes the
|
|
|
|
// filter when needed.
|
|
|
|
passesFilter := len(filterAddrMap) == 0
|
2015-11-16 00:30:13 +01:00
|
|
|
encodedAddrs := make([]string, len(addrs))
|
|
|
|
for j, addr := range addrs {
|
2015-11-29 20:28:10 +01:00
|
|
|
encodedAddr := addr.EncodeAddress()
|
|
|
|
encodedAddrs[j] = encodedAddr
|
2015-11-16 00:30:13 +01:00
|
|
|
|
2015-11-29 20:28:10 +01:00
|
|
|
// No need to check the map again if the filter already
|
|
|
|
// passes.
|
|
|
|
if passesFilter {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, exists := filterAddrMap[encodedAddr]; exists {
|
|
|
|
passesFilter = true
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
2013-08-07 17:38:39 +02:00
|
|
|
}
|
2015-11-16 00:30:13 +01:00
|
|
|
|
|
|
|
if !passesFilter {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
var vout btcjson.Vout
|
|
|
|
vout.N = uint32(i)
|
|
|
|
vout.Value = btcutil.Amount(v.Value).ToBTC()
|
|
|
|
vout.ScriptPubKey.Addresses = encodedAddrs
|
|
|
|
vout.ScriptPubKey.Asm = disbuf
|
|
|
|
vout.ScriptPubKey.Hex = hex.EncodeToString(v.PkScript)
|
|
|
|
vout.ScriptPubKey.ReqSigs = int32(reqSigs)
|
|
|
|
|
2021-07-23 18:13:31 +02:00
|
|
|
if len(script) < len(v.PkScript) {
|
|
|
|
vout.ScriptPubKey.IsClaim = v.PkScript[0] == txscript.OP_CLAIMNAME || v.PkScript[0] == txscript.OP_UPDATECLAIM
|
|
|
|
vout.ScriptPubKey.IsSupport = v.PkScript[0] == txscript.OP_SUPPORTCLAIM
|
|
|
|
vout.ScriptPubKey.SubType = scriptClass.String()
|
|
|
|
vout.ScriptPubKey.Type = txscript.ScriptClass.String(0)
|
|
|
|
} else {
|
|
|
|
vout.ScriptPubKey.Type = scriptClass.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO here: isclaim, issupport, subtype,
|
|
|
|
|
2015-11-16 00:30:13 +01:00
|
|
|
voutList = append(voutList, vout)
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
|
|
|
|
return voutList
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// createTxRawResult converts the passed transaction and associated parameters
|
|
|
|
// to a raw transaction JSON object.
|
2015-07-28 21:55:35 +02:00
|
|
|
func createTxRawResult(chainParams *chaincfg.Params, mtx *wire.MsgTx,
|
|
|
|
txHash string, blkHeader *wire.BlockHeader, blkHash string,
|
2015-08-08 04:20:49 +02:00
|
|
|
blkHeight int32, chainHeight int32) (*btcjson.TxRawResult, error) {
|
2013-11-07 17:25:11 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
mtxHex, err := messageToHex(mtx)
|
2013-11-07 17:25:11 +01:00
|
|
|
if err != nil {
|
2015-02-20 03:44:48 +01:00
|
|
|
return nil, err
|
2013-11-07 17:25:11 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
txReply := &btcjson.TxRawResult{
|
|
|
|
Hex: mtxHex,
|
2015-02-21 05:34:57 +01:00
|
|
|
Txid: txHash,
|
2016-10-19 03:21:33 +02:00
|
|
|
Hash: mtx.WitnessHash().String(),
|
|
|
|
Size: int32(mtx.SerializeSize()),
|
|
|
|
Vsize: int32(mempool.GetTxVirtualSize(btcutil.NewTx(mtx))),
|
2019-09-26 02:19:03 +02:00
|
|
|
Weight: int32(blockchain.GetTransactionWeight(btcutil.NewTx(mtx))),
|
2015-02-20 03:44:48 +01:00
|
|
|
Vin: createVinList(mtx),
|
2016-02-19 05:51:18 +01:00
|
|
|
Vout: createVoutList(mtx, chainParams, nil),
|
2021-03-21 20:54:48 +01:00
|
|
|
Version: uint32(mtx.Version),
|
2015-02-20 03:44:48 +01:00
|
|
|
LockTime: mtx.LockTime,
|
2013-11-07 17:25:11 +01:00
|
|
|
}
|
|
|
|
|
2015-07-28 21:55:35 +02:00
|
|
|
if blkHeader != nil {
|
2015-02-20 03:44:48 +01:00
|
|
|
// This is not a typo, they are identical in bitcoind as well.
|
2015-07-28 21:55:35 +02:00
|
|
|
txReply.Time = blkHeader.Timestamp.Unix()
|
|
|
|
txReply.Blocktime = blkHeader.Timestamp.Unix()
|
|
|
|
txReply.BlockHash = blkHash
|
|
|
|
txReply.Confirmations = uint64(1 + chainHeight - blkHeight)
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
return txReply, nil
|
|
|
|
}
|
2013-11-07 17:25:11 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// handleDecodeRawTransaction handles decoderawtransaction commands.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleDecodeRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2015-02-20 03:44:48 +01:00
|
|
|
c := cmd.(*btcjson.DecodeRawTransactionCmd)
|
2014-12-21 01:04:07 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Deserialize the transaction.
|
|
|
|
hexStr := c.HexTx
|
|
|
|
if len(hexStr)%2 != 0 {
|
|
|
|
hexStr = "0" + hexStr
|
2013-11-07 17:25:11 +01:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
serializedTx, err := hex.DecodeString(hexStr)
|
2013-11-07 17:25:11 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, rpcDecodeHexError(hexStr)
|
2013-08-07 17:38:39 +02:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
var mtx wire.MsgTx
|
|
|
|
err = mtx.Deserialize(bytes.NewReader(serializedTx))
|
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCDeserialization,
|
|
|
|
Message: "TX decode failed: " + err.Error(),
|
2013-11-14 02:51:37 +01:00
|
|
|
}
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
2013-08-07 17:38:39 +02:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Create and return the result.
|
|
|
|
txReply := btcjson.TxRawDecodeResult{
|
2016-08-08 21:04:33 +02:00
|
|
|
Txid: mtx.TxHash().String(),
|
2015-02-20 03:44:48 +01:00
|
|
|
Version: mtx.Version,
|
|
|
|
Locktime: mtx.LockTime,
|
|
|
|
Vin: createVinList(&mtx),
|
2016-04-02 23:58:01 +02:00
|
|
|
Vout: createVoutList(&mtx, s.cfg.ChainParams, nil),
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
return txReply, nil
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// handleDecodeScript handles decodescript commands.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleDecodeScript(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2015-02-20 03:44:48 +01:00
|
|
|
c := cmd.(*btcjson.DecodeScriptCmd)
|
2013-10-01 22:43:45 +02:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Convert the hex script to bytes.
|
|
|
|
hexStr := c.HexScript
|
|
|
|
if len(hexStr)%2 != 0 {
|
|
|
|
hexStr = "0" + hexStr
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
script, err := hex.DecodeString(hexStr)
|
2013-08-06 23:55:22 +02:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, rpcDecodeHexError(hexStr)
|
2013-08-14 22:55:31 +02:00
|
|
|
}
|
2013-11-04 19:31:56 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// The disassembled string will contain [error] inline if the script
|
|
|
|
// doesn't fully parse, so ignore the error here.
|
|
|
|
disbuf, _ := txscript.DisasmString(script)
|
2013-10-29 18:18:53 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Get information about the script.
|
|
|
|
// Ignore the error here since an error means the script couldn't parse
|
|
|
|
// and there is no additinal information about it anyways.
|
|
|
|
scriptClass, addrs, reqSigs, _ := txscript.ExtractPkScriptAddrs(script,
|
2016-04-02 23:58:01 +02:00
|
|
|
s.cfg.ChainParams)
|
2015-02-20 03:44:48 +01:00
|
|
|
addresses := make([]string, len(addrs))
|
|
|
|
for i, addr := range addrs {
|
|
|
|
addresses[i] = addr.EncodeAddress()
|
2013-10-29 18:18:53 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Convert the script itself to a pay-to-script-hash address.
|
2016-04-02 23:58:01 +02:00
|
|
|
p2sh, err := btcutil.NewAddressScriptHash(script, s.cfg.ChainParams)
|
2013-12-31 20:15:44 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
context := "Failed to convert script to pay-to-script-hash"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
2013-12-31 20:15:44 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Generate and return the reply.
|
|
|
|
reply := btcjson.DecodeScriptResult{
|
|
|
|
Asm: disbuf,
|
|
|
|
ReqSigs: int32(reqSigs),
|
|
|
|
Type: scriptClass.String(),
|
|
|
|
Addresses: addresses,
|
2016-10-19 19:15:21 +02:00
|
|
|
}
|
|
|
|
if scriptClass != txscript.ScriptHashTy {
|
|
|
|
reply.P2sh = p2sh.EncodeAddress()
|
2014-01-01 17:17:40 +01:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
return reply, nil
|
2014-01-01 17:17:40 +01:00
|
|
|
}
|
|
|
|
|
2017-11-13 23:39:16 +01:00
|
|
|
// handleEstimateFee handles estimatefee commands.
|
|
|
|
func handleEstimateFee(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
c := cmd.(*btcjson.EstimateFeeCmd)
|
|
|
|
|
|
|
|
if s.cfg.FeeEstimator == nil {
|
2021-11-09 21:50:02 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInternal.Code,
|
|
|
|
Message: "Fee estimation disabled",
|
|
|
|
}
|
2017-11-13 23:39:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if c.NumBlocks <= 0 {
|
2021-11-09 21:50:02 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParameter,
|
|
|
|
Message: "Parameter NumBlocks must be positive",
|
|
|
|
}
|
2017-11-13 23:39:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
feeRate, err := s.cfg.FeeEstimator.EstimateFee(uint32(c.NumBlocks))
|
|
|
|
|
|
|
|
if err != nil {
|
2021-11-09 21:50:02 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParameter,
|
|
|
|
Message: err.Error(),
|
|
|
|
}
|
2017-11-13 23:39:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Convert to satoshis per kb.
|
2016-08-30 06:39:19 +02:00
|
|
|
return float64(feeRate), nil
|
2017-11-13 23:39:16 +01:00
|
|
|
}
|
|
|
|
|
2021-11-09 21:50:02 +01:00
|
|
|
func handleEstimateSmartFee(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
c := cmd.(*btcjson.EstimateSmartFeeCmd)
|
|
|
|
|
|
|
|
rpcsLog.Debugf("EstimateSmartFee is not implemented; falling back to EstimateFee. Requested mode: %s", c.EstimateMode)
|
|
|
|
|
|
|
|
return handleEstimateFee(s, &btcjson.EstimateFeeCmd{NumBlocks: c.ConfTarget}, closeChan)
|
|
|
|
}
|
|
|
|
|
2015-04-27 17:19:02 +02:00
|
|
|
func handleGenerate(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
// Respond with an error if there are no addresses to pay the
|
|
|
|
// created blocks to.
|
|
|
|
if len(cfg.miningAddrs) == 0 {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInternal.Code,
|
|
|
|
Message: "No payment addresses specified " +
|
|
|
|
"via --miningaddr",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-28 09:38:57 +02:00
|
|
|
// Respond with an error if there's virtually 0 chance of mining a block
|
|
|
|
// with the CPU.
|
2016-04-02 23:58:01 +02:00
|
|
|
if !s.cfg.ChainParams.GenerateSupported {
|
2016-10-28 09:38:57 +02:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCDifficulty,
|
|
|
|
Message: fmt.Sprintf("No support for `generate` on "+
|
|
|
|
"the current network, %s, as it's unlikely to "+
|
2018-01-26 05:39:33 +01:00
|
|
|
"be possible to mine a block with the CPU.",
|
2016-04-02 23:58:01 +02:00
|
|
|
s.cfg.ChainParams.Net),
|
2016-10-28 09:38:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-27 17:19:02 +02:00
|
|
|
c := cmd.(*btcjson.GenerateCmd)
|
|
|
|
|
|
|
|
// Respond with an error if the client is requesting 0 blocks to be generated.
|
|
|
|
if c.NumBlocks == 0 {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInternal.Code,
|
|
|
|
Message: "Please request a nonzero number of blocks to generate.",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a reply
|
|
|
|
reply := make([]string, c.NumBlocks)
|
|
|
|
|
2021-08-19 22:39:53 +02:00
|
|
|
blockHashes, err := s.cfg.CPUMiner.GenerateNBlocks(c.NumBlocks, nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInternal.Code,
|
|
|
|
Message: err.Error(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mine the correct number of blocks, assigning the hex representation of the
|
|
|
|
// hash of each one to its place in the reply.
|
|
|
|
for i, hash := range blockHashes {
|
|
|
|
reply[i] = hash.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
return reply, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleGenerateToAddress handles generate commands.
|
|
|
|
func handleGenerateToAddress(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
c := cmd.(*btcjson.GenerateToAddressCmd)
|
|
|
|
payToAddr, err := btcutil.DecodeAddress(c.Address, s.cfg.ChainParams)
|
|
|
|
|
|
|
|
// Respond with an error if there are no addresses to pay the
|
|
|
|
// created blocks to.
|
|
|
|
if err != nil {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParameter,
|
|
|
|
Message: "No payment addresses specified ",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// cfg.miningAddrs = append(cfg.miningAddrs, maddr)
|
|
|
|
|
|
|
|
// Respond with an error if there's virtually 0 chance of mining a block
|
|
|
|
// with the CPU.
|
|
|
|
if !s.cfg.ChainParams.GenerateSupported {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCDifficulty,
|
|
|
|
Message: fmt.Sprintf("No support for `generatetoaddress` on "+
|
|
|
|
"the current network, %s, as it's unlikely to "+
|
|
|
|
"be possible to mine a block with the CPU.",
|
|
|
|
s.cfg.ChainParams.Net),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Respond with an error if the client is requesting 0 blocks to be generated.
|
|
|
|
if c.NumBlocks == 0 {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInternal.Code,
|
|
|
|
Message: "Please request a nonzero number of blocks to generate.",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a reply
|
|
|
|
reply := make([]string, c.NumBlocks)
|
|
|
|
|
|
|
|
blockHashes, err := s.cfg.CPUMiner.GenerateNBlocks(uint32(c.NumBlocks), payToAddr)
|
2015-04-27 17:19:02 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInternal.Code,
|
|
|
|
Message: err.Error(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mine the correct number of blocks, assigning the hex representation of the
|
|
|
|
// hash of each one to its place in the reply.
|
|
|
|
for i, hash := range blockHashes {
|
|
|
|
reply[i] = hash.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
return reply, nil
|
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// handleGetAddedNodeInfo handles getaddednodeinfo commands.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetAddedNodeInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2015-02-20 03:44:48 +01:00
|
|
|
c := cmd.(*btcjson.GetAddedNodeInfoCmd)
|
2014-01-01 17:17:40 +01:00
|
|
|
|
2016-04-02 23:58:01 +02:00
|
|
|
// Retrieve a list of persistent (added) peers from the server and
|
|
|
|
// filter the list of peers per the specified address (if any).
|
|
|
|
peers := s.cfg.ConnMgr.PersistentPeers()
|
2015-02-21 05:34:57 +01:00
|
|
|
if c.Node != nil {
|
|
|
|
node := *c.Node
|
2015-02-20 03:44:48 +01:00
|
|
|
found := false
|
|
|
|
for i, peer := range peers {
|
2016-04-02 23:58:01 +02:00
|
|
|
if peer.ToPeer().Addr() == node {
|
2015-02-20 03:44:48 +01:00
|
|
|
peers = peers[i : i+1]
|
|
|
|
found = true
|
|
|
|
}
|
2014-01-01 17:17:40 +01:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
if !found {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
2016-11-15 00:44:38 +01:00
|
|
|
Code: btcjson.ErrRPCClientNodeNotAdded,
|
2015-02-21 05:34:57 +01:00
|
|
|
Message: "Node has not been added",
|
2014-01-01 17:17:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Without the dns flag, the result is just a slice of the addresses as
|
|
|
|
// strings.
|
2015-02-21 05:34:57 +01:00
|
|
|
if !c.DNS {
|
2015-02-20 03:44:48 +01:00
|
|
|
results := make([]string, 0, len(peers))
|
|
|
|
for _, peer := range peers {
|
2016-04-02 23:58:01 +02:00
|
|
|
results = append(results, peer.ToPeer().Addr())
|
2014-01-01 17:17:40 +01:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
return results, nil
|
|
|
|
}
|
2014-01-01 17:17:40 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// With the dns flag, the result is an array of JSON objects which
|
|
|
|
// include the result of DNS lookups for each peer.
|
|
|
|
results := make([]*btcjson.GetAddedNodeInfoResult, 0, len(peers))
|
2016-04-02 23:58:01 +02:00
|
|
|
for _, rpcPeer := range peers {
|
2015-02-20 03:44:48 +01:00
|
|
|
// Set the "address" of the peer which could be an ip address
|
|
|
|
// or a domain name.
|
2016-04-02 23:58:01 +02:00
|
|
|
peer := rpcPeer.ToPeer()
|
2015-02-20 03:44:48 +01:00
|
|
|
var result btcjson.GetAddedNodeInfoResult
|
peer: Refactor peer code into its own package.
This commit introduces package peer which contains peer related features
refactored from peer.go.
The following is an overview of the features the package provides:
- Provides a basic concurrent safe bitcoin peer for handling bitcoin
communications via the peer-to-peer protocol
- Full duplex reading and writing of bitcoin protocol messages
- Automatic handling of the initial handshake process including protocol
version negotiation
- Automatic periodic keep-alive pinging and pong responses
- Asynchronous message queueing of outbound messages with optional
channel for notification when the message is actually sent
- Inventory message batching and send trickling with known inventory
detection and avoidance
- Ability to wait for shutdown/disconnect
- Flexible peer configuration
- Caller is responsible for creating outgoing connections and listening
for incoming connections so they have flexibility to establish
connections as they see fit (proxies, etc.)
- User agent name and version
- Bitcoin network
- Service support signalling (full nodes, bloom filters, etc.)
- Maximum supported protocol version
- Ability to register callbacks for handling bitcoin protocol messages
- Proper handling of bloom filter related commands when the caller does
not specify the related flag to signal support
- Disconnects the peer when the protocol version is high enough
- Does not invoke the related callbacks for older protocol versions
- Snapshottable peer statistics such as the total number of bytes read
and written, the remote address, user agent, and negotiated protocol
version
- Helper functions for pushing addresses, getblocks, getheaders, and
reject messages
- These could all be sent manually via the standard message output
function, but the helpers provide additional nice functionality such
as duplicate filtering and address randomization
- Full documentation with example usage
- Test coverage
In addition to the addition of the new package, btcd has been refactored
to make use of the new package by extending the basic peer it provides to
work with the blockmanager and server to act as a full node. The
following is a broad overview of the changes to integrate the package:
- The server is responsible for all connection management including
persistent peers and banning
- Callbacks for all messages that are required to implement a full node
are registered
- Logic necessary to serve data and behave as a full node is now in the
callback registered with the peer
Finally, the following peer-related things have been improved as a part
of this refactor:
- Don't log or send reject message due to peer disconnects
- Remove trace logs that aren't particularly helpful
- Finish an old TODO to switch the queue WaitGroup over to a channel
- Improve various comments and fix some code consistency cases
- Improve a few logging bits
- Implement a most-recently-used nonce tracking for detecting self
connections and generate a unique nonce for each peer
2015-10-02 08:03:20 +02:00
|
|
|
result.AddedNode = peer.Addr()
|
2015-02-21 05:34:57 +01:00
|
|
|
result.Connected = btcjson.Bool(peer.Connected())
|
2015-02-20 03:44:48 +01:00
|
|
|
|
|
|
|
// Split the address into host and port portions so we can do
|
|
|
|
// a DNS lookup against the host. When no port is specified in
|
|
|
|
// the address, just use the address as the host.
|
peer: Refactor peer code into its own package.
This commit introduces package peer which contains peer related features
refactored from peer.go.
The following is an overview of the features the package provides:
- Provides a basic concurrent safe bitcoin peer for handling bitcoin
communications via the peer-to-peer protocol
- Full duplex reading and writing of bitcoin protocol messages
- Automatic handling of the initial handshake process including protocol
version negotiation
- Automatic periodic keep-alive pinging and pong responses
- Asynchronous message queueing of outbound messages with optional
channel for notification when the message is actually sent
- Inventory message batching and send trickling with known inventory
detection and avoidance
- Ability to wait for shutdown/disconnect
- Flexible peer configuration
- Caller is responsible for creating outgoing connections and listening
for incoming connections so they have flexibility to establish
connections as they see fit (proxies, etc.)
- User agent name and version
- Bitcoin network
- Service support signalling (full nodes, bloom filters, etc.)
- Maximum supported protocol version
- Ability to register callbacks for handling bitcoin protocol messages
- Proper handling of bloom filter related commands when the caller does
not specify the related flag to signal support
- Disconnects the peer when the protocol version is high enough
- Does not invoke the related callbacks for older protocol versions
- Snapshottable peer statistics such as the total number of bytes read
and written, the remote address, user agent, and negotiated protocol
version
- Helper functions for pushing addresses, getblocks, getheaders, and
reject messages
- These could all be sent manually via the standard message output
function, but the helpers provide additional nice functionality such
as duplicate filtering and address randomization
- Full documentation with example usage
- Test coverage
In addition to the addition of the new package, btcd has been refactored
to make use of the new package by extending the basic peer it provides to
work with the blockmanager and server to act as a full node. The
following is a broad overview of the changes to integrate the package:
- The server is responsible for all connection management including
persistent peers and banning
- Callbacks for all messages that are required to implement a full node
are registered
- Logic necessary to serve data and behave as a full node is now in the
callback registered with the peer
Finally, the following peer-related things have been improved as a part
of this refactor:
- Don't log or send reject message due to peer disconnects
- Remove trace logs that aren't particularly helpful
- Finish an old TODO to switch the queue WaitGroup over to a channel
- Improve various comments and fix some code consistency cases
- Improve a few logging bits
- Implement a most-recently-used nonce tracking for detecting self
connections and generate a unique nonce for each peer
2015-10-02 08:03:20 +02:00
|
|
|
host, _, err := net.SplitHostPort(peer.Addr())
|
2014-01-01 17:17:40 +01:00
|
|
|
if err != nil {
|
peer: Refactor peer code into its own package.
This commit introduces package peer which contains peer related features
refactored from peer.go.
The following is an overview of the features the package provides:
- Provides a basic concurrent safe bitcoin peer for handling bitcoin
communications via the peer-to-peer protocol
- Full duplex reading and writing of bitcoin protocol messages
- Automatic handling of the initial handshake process including protocol
version negotiation
- Automatic periodic keep-alive pinging and pong responses
- Asynchronous message queueing of outbound messages with optional
channel for notification when the message is actually sent
- Inventory message batching and send trickling with known inventory
detection and avoidance
- Ability to wait for shutdown/disconnect
- Flexible peer configuration
- Caller is responsible for creating outgoing connections and listening
for incoming connections so they have flexibility to establish
connections as they see fit (proxies, etc.)
- User agent name and version
- Bitcoin network
- Service support signalling (full nodes, bloom filters, etc.)
- Maximum supported protocol version
- Ability to register callbacks for handling bitcoin protocol messages
- Proper handling of bloom filter related commands when the caller does
not specify the related flag to signal support
- Disconnects the peer when the protocol version is high enough
- Does not invoke the related callbacks for older protocol versions
- Snapshottable peer statistics such as the total number of bytes read
and written, the remote address, user agent, and negotiated protocol
version
- Helper functions for pushing addresses, getblocks, getheaders, and
reject messages
- These could all be sent manually via the standard message output
function, but the helpers provide additional nice functionality such
as duplicate filtering and address randomization
- Full documentation with example usage
- Test coverage
In addition to the addition of the new package, btcd has been refactored
to make use of the new package by extending the basic peer it provides to
work with the blockmanager and server to act as a full node. The
following is a broad overview of the changes to integrate the package:
- The server is responsible for all connection management including
persistent peers and banning
- Callbacks for all messages that are required to implement a full node
are registered
- Logic necessary to serve data and behave as a full node is now in the
callback registered with the peer
Finally, the following peer-related things have been improved as a part
of this refactor:
- Don't log or send reject message due to peer disconnects
- Remove trace logs that aren't particularly helpful
- Finish an old TODO to switch the queue WaitGroup over to a channel
- Improve various comments and fix some code consistency cases
- Improve a few logging bits
- Implement a most-recently-used nonce tracking for detecting self
connections and generate a unique nonce for each peer
2015-10-02 08:03:20 +02:00
|
|
|
host = peer.Addr()
|
2014-01-01 17:17:40 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
var ipList []string
|
2016-12-06 16:42:25 +01:00
|
|
|
switch {
|
|
|
|
case net.ParseIP(host) != nil, strings.HasSuffix(host, ".onion"):
|
|
|
|
ipList = make([]string, 1)
|
|
|
|
ipList[0] = host
|
|
|
|
default:
|
|
|
|
// Do a DNS lookup for the address. If the lookup fails, just
|
|
|
|
// use the host.
|
|
|
|
ips, err := btcdLookup(host)
|
|
|
|
if err != nil {
|
|
|
|
ipList = make([]string, 1)
|
|
|
|
ipList[0] = host
|
|
|
|
break
|
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
ipList = make([]string, 0, len(ips))
|
|
|
|
for _, ip := range ips {
|
|
|
|
ipList = append(ipList, ip.String())
|
2014-01-01 17:17:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Add the addresses and connection info to the result.
|
|
|
|
addrs := make([]btcjson.GetAddedNodeInfoResultAddr, 0, len(ipList))
|
|
|
|
for _, ip := range ipList {
|
|
|
|
var addr btcjson.GetAddedNodeInfoResultAddr
|
|
|
|
addr.Address = ip
|
|
|
|
addr.Connected = "false"
|
|
|
|
if ip == host && peer.Connected() {
|
peer: Refactor peer code into its own package.
This commit introduces package peer which contains peer related features
refactored from peer.go.
The following is an overview of the features the package provides:
- Provides a basic concurrent safe bitcoin peer for handling bitcoin
communications via the peer-to-peer protocol
- Full duplex reading and writing of bitcoin protocol messages
- Automatic handling of the initial handshake process including protocol
version negotiation
- Automatic periodic keep-alive pinging and pong responses
- Asynchronous message queueing of outbound messages with optional
channel for notification when the message is actually sent
- Inventory message batching and send trickling with known inventory
detection and avoidance
- Ability to wait for shutdown/disconnect
- Flexible peer configuration
- Caller is responsible for creating outgoing connections and listening
for incoming connections so they have flexibility to establish
connections as they see fit (proxies, etc.)
- User agent name and version
- Bitcoin network
- Service support signalling (full nodes, bloom filters, etc.)
- Maximum supported protocol version
- Ability to register callbacks for handling bitcoin protocol messages
- Proper handling of bloom filter related commands when the caller does
not specify the related flag to signal support
- Disconnects the peer when the protocol version is high enough
- Does not invoke the related callbacks for older protocol versions
- Snapshottable peer statistics such as the total number of bytes read
and written, the remote address, user agent, and negotiated protocol
version
- Helper functions for pushing addresses, getblocks, getheaders, and
reject messages
- These could all be sent manually via the standard message output
function, but the helpers provide additional nice functionality such
as duplicate filtering and address randomization
- Full documentation with example usage
- Test coverage
In addition to the addition of the new package, btcd has been refactored
to make use of the new package by extending the basic peer it provides to
work with the blockmanager and server to act as a full node. The
following is a broad overview of the changes to integrate the package:
- The server is responsible for all connection management including
persistent peers and banning
- Callbacks for all messages that are required to implement a full node
are registered
- Logic necessary to serve data and behave as a full node is now in the
callback registered with the peer
Finally, the following peer-related things have been improved as a part
of this refactor:
- Don't log or send reject message due to peer disconnects
- Remove trace logs that aren't particularly helpful
- Finish an old TODO to switch the queue WaitGroup over to a channel
- Improve various comments and fix some code consistency cases
- Improve a few logging bits
- Implement a most-recently-used nonce tracking for detecting self
connections and generate a unique nonce for each peer
2015-10-02 08:03:20 +02:00
|
|
|
addr.Connected = directionString(peer.Inbound())
|
2014-01-01 17:17:40 +01:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
addrs = append(addrs, addr)
|
2014-01-01 17:17:40 +01:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
result.Addresses = &addrs
|
|
|
|
results = append(results, &result)
|
2014-01-01 17:17:40 +01:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
return results, nil
|
2014-01-01 17:17:40 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// handleGetBestBlock implements the getbestblock command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetBestBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2015-02-20 03:44:48 +01:00
|
|
|
// All other "get block" commands give either the height, the
|
|
|
|
// hash, or both but require the block SHA. This gets both for
|
|
|
|
// the best block.
|
2016-04-02 23:58:01 +02:00
|
|
|
best := s.cfg.Chain.BestSnapshot()
|
2015-04-06 06:15:49 +02:00
|
|
|
result := &btcjson.GetBestBlockResult{
|
2015-08-26 06:03:18 +02:00
|
|
|
Hash: best.Hash.String(),
|
|
|
|
Height: best.Height,
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
return result, nil
|
2013-11-22 17:46:56 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// handleGetBestBlockHash implements the getbestblockhash command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetBestBlockHash(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2016-04-02 23:58:01 +02:00
|
|
|
best := s.cfg.Chain.BestSnapshot()
|
2015-08-26 06:03:18 +02:00
|
|
|
return best.Hash.String(), nil
|
2013-12-31 01:22:39 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// getDifficultyRatio returns the proof-of-work difficulty as a multiple of the
|
|
|
|
// minimum difficulty using the passed bits field from the header of a block.
|
2017-08-13 18:45:00 +02:00
|
|
|
func getDifficultyRatio(bits uint32, params *chaincfg.Params) float64 {
|
2015-02-20 03:44:48 +01:00
|
|
|
// The minimum difficulty is the max possible proof-of-work limit bits
|
2016-02-22 11:35:54 +01:00
|
|
|
// converted back to a number. Note this is not the same as the proof of
|
|
|
|
// work limit directly because the block difficulty is encoded in a block
|
|
|
|
// with the compact form which loses precision.
|
2017-08-13 18:45:00 +02:00
|
|
|
max := blockchain.CompactToBig(params.PowLimitBits)
|
2015-02-20 03:44:48 +01:00
|
|
|
target := blockchain.CompactToBig(bits)
|
2014-01-02 06:30:00 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
difficulty := new(big.Rat).SetFrac(max, target)
|
2015-05-04 10:53:39 +02:00
|
|
|
outString := difficulty.FloatString(8)
|
2015-02-20 03:44:48 +01:00
|
|
|
diff, err := strconv.ParseFloat(outString, 64)
|
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("Cannot get difficulty: %v", err)
|
|
|
|
return 0
|
2013-12-31 01:22:39 +01:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
return diff
|
2013-12-31 01:22:39 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// handleGetBlock implements the getblock command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2015-02-20 03:44:48 +01:00
|
|
|
c := cmd.(*btcjson.GetBlockCmd)
|
2015-02-21 05:34:57 +01:00
|
|
|
|
2015-08-26 06:03:18 +02:00
|
|
|
// Load the raw block bytes from the database.
|
2016-08-08 21:04:33 +02:00
|
|
|
hash, err := chainhash.NewHashFromStr(c.Hash)
|
2014-03-20 08:06:10 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, rpcDecodeHexError(c.Hash)
|
2014-03-20 08:06:10 +01:00
|
|
|
}
|
2015-08-26 06:03:18 +02:00
|
|
|
var blkBytes []byte
|
2016-04-02 23:58:01 +02:00
|
|
|
err = s.cfg.DB.View(func(dbTx database.Tx) error {
|
2015-08-26 06:03:18 +02:00
|
|
|
var err error
|
|
|
|
blkBytes, err = dbTx.FetchBlock(hash)
|
|
|
|
return err
|
|
|
|
})
|
2015-02-20 03:44:48 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCBlockNotFound,
|
|
|
|
Message: "Block not found",
|
|
|
|
}
|
2014-03-20 08:06:10 +01:00
|
|
|
}
|
2020-05-15 02:27:59 +02:00
|
|
|
// If verbosity is 0, return the serialized block as a hex encoded string.
|
2020-01-31 18:31:47 +01:00
|
|
|
if c.Verbosity != nil && *c.Verbosity == 0 {
|
2015-08-26 06:03:18 +02:00
|
|
|
return hex.EncodeToString(blkBytes), nil
|
2014-03-20 08:06:10 +01:00
|
|
|
}
|
|
|
|
|
2020-05-15 02:27:59 +02:00
|
|
|
// Otherwise, generate the JSON object and return it.
|
2015-08-26 06:03:18 +02:00
|
|
|
|
|
|
|
// Deserialize the block.
|
|
|
|
blk, err := btcutil.NewBlockFromBytes(blkBytes)
|
2013-12-31 01:22:39 +01:00
|
|
|
if err != nil {
|
2015-08-26 06:03:18 +02:00
|
|
|
context := "Failed to deserialize block"
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, internalRPCError(err.Error(), context)
|
2013-12-31 01:22:39 +01:00
|
|
|
}
|
2015-08-26 06:03:18 +02:00
|
|
|
|
|
|
|
// Get the block height from chain.
|
2016-04-02 23:58:01 +02:00
|
|
|
blockHeight, err := s.cfg.Chain.BlockHeightByHash(hash)
|
2013-12-31 01:22:39 +01:00
|
|
|
if err != nil {
|
2015-08-26 06:03:18 +02:00
|
|
|
context := "Failed to obtain block height"
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, internalRPCError(err.Error(), context)
|
2013-12-31 01:22:39 +01:00
|
|
|
}
|
2015-08-26 06:03:18 +02:00
|
|
|
blk.SetHeight(blockHeight)
|
2016-04-02 23:58:01 +02:00
|
|
|
best := s.cfg.Chain.BestSnapshot()
|
2015-08-26 06:03:18 +02:00
|
|
|
|
|
|
|
// Get next block hash unless there are none.
|
|
|
|
var nextHashString string
|
|
|
|
if blockHeight < best.Height {
|
2016-04-02 23:58:01 +02:00
|
|
|
nextHash, err := s.cfg.Chain.BlockHashByHeight(blockHeight + 1)
|
2015-08-26 06:03:18 +02:00
|
|
|
if err != nil {
|
|
|
|
context := "No next block"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
|
|
|
}
|
|
|
|
nextHashString = nextHash.String()
|
|
|
|
}
|
2013-12-31 01:22:39 +01:00
|
|
|
|
2017-08-13 18:45:00 +02:00
|
|
|
params := s.cfg.ChainParams
|
2015-02-20 03:44:48 +01:00
|
|
|
blockHeader := &blk.MsgBlock().Header
|
2021-08-04 07:10:26 +02:00
|
|
|
var prevHashString string
|
|
|
|
if blockHeight > 0 {
|
|
|
|
prevHashString = blockHeader.PrevBlock.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
base := btcjson.GetBlockVerboseResultBase{
|
2015-02-20 03:44:48 +01:00
|
|
|
Hash: c.Hash,
|
|
|
|
Version: blockHeader.Version,
|
2016-12-02 18:25:04 +01:00
|
|
|
VersionHex: fmt.Sprintf("%08x", blockHeader.Version),
|
2015-02-20 03:44:48 +01:00
|
|
|
MerkleRoot: blockHeader.MerkleRoot.String(),
|
2021-08-04 07:10:26 +02:00
|
|
|
PreviousHash: prevHashString,
|
2015-02-20 03:44:48 +01:00
|
|
|
Nonce: blockHeader.Nonce,
|
|
|
|
Time: blockHeader.Timestamp.Unix(),
|
2018-08-16 06:05:34 +02:00
|
|
|
Confirmations: int64(1 + best.Height - blockHeight),
|
2015-08-26 06:03:18 +02:00
|
|
|
Height: int64(blockHeight),
|
|
|
|
Size: int32(len(blkBytes)),
|
2016-10-19 04:23:27 +02:00
|
|
|
StrippedSize: int32(blk.MsgBlock().SerializeSizeStripped()),
|
|
|
|
Weight: int32(blockchain.GetBlockWeight(blk)),
|
2015-02-20 03:44:48 +01:00
|
|
|
Bits: strconv.FormatInt(int64(blockHeader.Bits), 16),
|
2017-08-13 18:45:00 +02:00
|
|
|
Difficulty: getDifficultyRatio(blockHeader.Bits, params),
|
2015-08-26 06:03:18 +02:00
|
|
|
NextHash: nextHashString,
|
2021-08-04 07:10:26 +02:00
|
|
|
ClaimTrie: blockHeader.ClaimTrie.String(),
|
2013-12-31 01:22:39 +01:00
|
|
|
}
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2020-01-31 18:31:47 +01:00
|
|
|
if *c.Verbosity == 1 {
|
2015-02-20 03:44:48 +01:00
|
|
|
transactions := blk.Transactions()
|
|
|
|
txNames := make([]string, len(transactions))
|
|
|
|
for i, tx := range transactions {
|
2016-08-08 21:04:33 +02:00
|
|
|
txNames[i] = tx.Hash().String()
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
2014-01-04 06:29:24 +01:00
|
|
|
|
2021-08-04 07:10:26 +02:00
|
|
|
base.TxCount = len(txNames)
|
|
|
|
blockReply := btcjson.GetBlockVerboseResult{
|
|
|
|
GetBlockVerboseResultBase: base,
|
|
|
|
Tx: txNames,
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
2021-08-04 07:10:26 +02:00
|
|
|
return blockReply, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
txns := blk.Transactions()
|
|
|
|
rawTxns := make([]btcjson.TxRawResult, len(txns))
|
|
|
|
for i, tx := range txns {
|
|
|
|
rawTxn, err := createTxRawResult(params, tx.MsgTx(),
|
|
|
|
tx.Hash().String(), blockHeader, hash.String(),
|
|
|
|
blockHeight, best.Height)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
rawTxns[i] = *rawTxn
|
|
|
|
}
|
|
|
|
base.TxCount = len(rawTxns)
|
|
|
|
blockReply := btcjson.GetBlockVerboseTxResult{
|
|
|
|
GetBlockVerboseResultBase: base,
|
|
|
|
Tx: rawTxns,
|
2014-07-02 03:09:22 +02:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
|
2013-10-29 16:42:34 +01:00
|
|
|
return blockReply, nil
|
|
|
|
}
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2016-11-03 01:17:00 +01:00
|
|
|
// softForkStatus converts a ThresholdState state into a human readable string
|
|
|
|
// corresponding to the particular state.
|
|
|
|
func softForkStatus(state blockchain.ThresholdState) (string, error) {
|
|
|
|
switch state {
|
|
|
|
case blockchain.ThresholdDefined:
|
|
|
|
return "defined", nil
|
|
|
|
case blockchain.ThresholdStarted:
|
|
|
|
return "started", nil
|
|
|
|
case blockchain.ThresholdLockedIn:
|
|
|
|
return "lockedin", nil
|
|
|
|
case blockchain.ThresholdActive:
|
|
|
|
return "active", nil
|
|
|
|
case blockchain.ThresholdFailed:
|
|
|
|
return "failed", nil
|
|
|
|
default:
|
|
|
|
return "", fmt.Errorf("unknown deployment state: %v", state)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleGetBlockChainInfo implements the getblockchaininfo command.
|
|
|
|
func handleGetBlockChainInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
// Obtain a snapshot of the current best known blockchain state. We'll
|
|
|
|
// populate the response to this call primarily from this snapshot.
|
2017-08-13 18:45:00 +02:00
|
|
|
params := s.cfg.ChainParams
|
2016-04-02 23:58:01 +02:00
|
|
|
chain := s.cfg.Chain
|
|
|
|
chainSnapshot := chain.BestSnapshot()
|
2016-11-03 01:17:00 +01:00
|
|
|
|
|
|
|
chainInfo := &btcjson.GetBlockChainInfoResult{
|
2017-08-13 18:45:00 +02:00
|
|
|
Chain: params.Name,
|
2016-11-03 01:17:00 +01:00
|
|
|
Blocks: chainSnapshot.Height,
|
|
|
|
Headers: chainSnapshot.Height,
|
|
|
|
BestBlockHash: chainSnapshot.Hash.String(),
|
2017-08-13 18:45:00 +02:00
|
|
|
Difficulty: getDifficultyRatio(chainSnapshot.Bits, params),
|
2016-11-03 01:17:00 +01:00
|
|
|
MedianTime: chainSnapshot.MedianTime.Unix(),
|
|
|
|
Pruned: false,
|
2019-10-30 02:50:46 +01:00
|
|
|
SoftForks: &btcjson.SoftForks{
|
|
|
|
Bip9SoftForks: make(map[string]*btcjson.Bip9SoftForkDescription),
|
|
|
|
},
|
2016-11-03 01:17:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Next, populate the response with information describing the current
|
|
|
|
// status of soft-forks deployed via the super-majority block
|
|
|
|
// signalling mechanism.
|
|
|
|
height := chainSnapshot.Height
|
2019-10-30 02:50:46 +01:00
|
|
|
chainInfo.SoftForks.SoftForks = []*btcjson.SoftForkDescription{
|
2016-11-03 01:17:00 +01:00
|
|
|
{
|
|
|
|
ID: "bip34",
|
|
|
|
Version: 2,
|
|
|
|
Reject: struct {
|
|
|
|
Status bool `json:"status"`
|
|
|
|
}{
|
2017-08-13 18:45:00 +02:00
|
|
|
Status: height >= params.BIP0034Height,
|
2016-11-03 01:17:00 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ID: "bip66",
|
|
|
|
Version: 3,
|
|
|
|
Reject: struct {
|
|
|
|
Status bool `json:"status"`
|
|
|
|
}{
|
2017-08-13 18:45:00 +02:00
|
|
|
Status: height >= params.BIP0066Height,
|
2016-11-03 01:17:00 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ID: "bip65",
|
|
|
|
Version: 4,
|
|
|
|
Reject: struct {
|
|
|
|
Status bool `json:"status"`
|
|
|
|
}{
|
2017-08-13 18:45:00 +02:00
|
|
|
Status: height >= params.BIP0065Height,
|
2016-11-03 01:17:00 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, query the BIP0009 version bits state for all currently
|
|
|
|
// defined BIP0009 soft-fork deployments.
|
2017-08-13 18:45:00 +02:00
|
|
|
for deployment, deploymentDetails := range params.Deployments {
|
2016-11-03 01:17:00 +01:00
|
|
|
// Map the integer deployment ID into a human readable
|
|
|
|
// fork-name.
|
|
|
|
var forkName string
|
|
|
|
switch deployment {
|
|
|
|
case chaincfg.DeploymentTestDummy:
|
|
|
|
forkName = "dummy"
|
2017-01-04 06:06:34 +01:00
|
|
|
|
2016-12-07 03:12:44 +01:00
|
|
|
case chaincfg.DeploymentCSV:
|
|
|
|
forkName = "csv"
|
2017-01-04 06:06:34 +01:00
|
|
|
|
|
|
|
case chaincfg.DeploymentSegwit:
|
|
|
|
forkName = "segwit"
|
|
|
|
|
2021-02-17 11:07:12 +01:00
|
|
|
case chaincfg.DeploymentTaproot:
|
|
|
|
forkName = "taproot"
|
|
|
|
|
2016-11-03 01:17:00 +01:00
|
|
|
default:
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInternal.Code,
|
|
|
|
Message: fmt.Sprintf("Unknown deployment %v "+
|
|
|
|
"detected", deployment),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query the chain for the current status of the deployment as
|
|
|
|
// identified by its deployment ID.
|
2016-04-02 23:58:01 +02:00
|
|
|
deploymentStatus, err := chain.ThresholdState(uint32(deployment))
|
2016-11-03 01:17:00 +01:00
|
|
|
if err != nil {
|
|
|
|
context := "Failed to obtain deployment status"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to convert the current deployment status into a
|
|
|
|
// human readable string. If the status is unrecognized, then a
|
|
|
|
// non-nil error is returned.
|
|
|
|
statusString, err := softForkStatus(deploymentStatus)
|
|
|
|
if err != nil {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInternal.Code,
|
|
|
|
Message: fmt.Sprintf("unknown deployment status: %v",
|
|
|
|
deploymentStatus),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, populate the soft-fork description with all the
|
|
|
|
// information gathered above.
|
2019-10-30 02:50:46 +01:00
|
|
|
chainInfo.SoftForks.Bip9SoftForks[forkName] = &btcjson.Bip9SoftForkDescription{
|
2019-10-30 02:51:12 +01:00
|
|
|
Status: strings.ToLower(statusString),
|
|
|
|
Bit: deploymentDetails.BitNumber,
|
|
|
|
StartTime2: int64(deploymentDetails.StartTime),
|
|
|
|
Timeout: int64(deploymentDetails.ExpireTime),
|
2021-07-29 23:21:51 +02:00
|
|
|
Since: deploymentDetails.ForceActiveAt,
|
2016-11-03 01:17:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return chainInfo, nil
|
|
|
|
}
|
|
|
|
|
2013-10-29 16:42:34 +01:00
|
|
|
// handleGetBlockCount implements the getblockcount command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetBlockCount(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2016-04-02 23:58:01 +02:00
|
|
|
best := s.cfg.Chain.BestSnapshot()
|
2015-08-26 06:03:18 +02:00
|
|
|
return int64(best.Height), nil
|
2013-10-29 16:42:34 +01:00
|
|
|
}
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2013-10-29 16:42:34 +01:00
|
|
|
// handleGetBlockHash implements the getblockhash command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetBlockHash(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2013-10-29 16:42:34 +01:00
|
|
|
c := cmd.(*btcjson.GetBlockHashCmd)
|
2016-04-02 23:58:01 +02:00
|
|
|
hash, err := s.cfg.Chain.BlockHashByHeight(int32(c.Index))
|
2013-10-29 16:42:34 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCOutOfRange,
|
|
|
|
Message: "Block number out of range",
|
|
|
|
}
|
2013-10-29 16:42:34 +01:00
|
|
|
}
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2015-08-26 06:03:18 +02:00
|
|
|
return hash.String(), nil
|
2013-10-29 16:42:34 +01:00
|
|
|
}
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2015-07-04 19:42:43 +02:00
|
|
|
// handleGetBlockHeader implements the getblockheader command.
|
|
|
|
func handleGetBlockHeader(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
c := cmd.(*btcjson.GetBlockHeaderCmd)
|
|
|
|
|
2017-01-31 07:36:15 +01:00
|
|
|
// Fetch the header from chain.
|
2016-08-08 21:04:33 +02:00
|
|
|
hash, err := chainhash.NewHashFromStr(c.Hash)
|
2015-07-04 19:42:43 +02:00
|
|
|
if err != nil {
|
2015-08-26 06:03:18 +02:00
|
|
|
return nil, rpcDecodeHexError(c.Hash)
|
2015-07-04 19:42:43 +02:00
|
|
|
}
|
2018-08-02 01:50:56 +02:00
|
|
|
blockHeader, err := s.cfg.Chain.HeaderByHash(hash)
|
2015-08-26 06:03:18 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCBlockNotFound,
|
|
|
|
Message: "Block not found",
|
2015-07-04 19:42:43 +02:00
|
|
|
}
|
2015-08-26 06:03:18 +02:00
|
|
|
}
|
2015-07-04 19:42:43 +02:00
|
|
|
|
2015-08-26 06:03:18 +02:00
|
|
|
// When the verbose flag isn't set, simply return the serialized block
|
|
|
|
// header as a hex-encoded string.
|
|
|
|
if c.Verbose != nil && !*c.Verbose {
|
2017-01-31 07:36:15 +01:00
|
|
|
var headerBuf bytes.Buffer
|
|
|
|
err := blockHeader.Serialize(&headerBuf)
|
|
|
|
if err != nil {
|
|
|
|
context := "Failed to serialize block header"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
|
|
|
}
|
|
|
|
return hex.EncodeToString(headerBuf.Bytes()), nil
|
2015-08-26 06:03:18 +02:00
|
|
|
}
|
2015-07-04 19:42:43 +02:00
|
|
|
|
2015-08-26 06:03:18 +02:00
|
|
|
// The verbose flag is set, so generate the JSON object and return it.
|
2015-07-04 19:42:43 +02:00
|
|
|
|
2015-08-26 06:03:18 +02:00
|
|
|
// Get the block height from chain.
|
2016-04-02 23:58:01 +02:00
|
|
|
blockHeight, err := s.cfg.Chain.BlockHeightByHash(hash)
|
2015-07-04 19:42:43 +02:00
|
|
|
if err != nil {
|
2015-08-26 06:03:18 +02:00
|
|
|
context := "Failed to obtain block height"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
2015-07-04 19:42:43 +02:00
|
|
|
}
|
2016-04-02 23:58:01 +02:00
|
|
|
best := s.cfg.Chain.BestSnapshot()
|
2015-07-04 19:42:43 +02:00
|
|
|
|
2015-08-26 06:03:18 +02:00
|
|
|
// Get next block hash unless there are none.
|
|
|
|
var nextHashString string
|
|
|
|
if blockHeight < best.Height {
|
2016-04-02 23:58:01 +02:00
|
|
|
nextHash, err := s.cfg.Chain.BlockHashByHeight(blockHeight + 1)
|
2015-08-26 06:03:18 +02:00
|
|
|
if err != nil {
|
|
|
|
context := "No next block"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
|
|
|
}
|
|
|
|
nextHashString = nextHash.String()
|
2015-07-04 19:42:43 +02:00
|
|
|
}
|
|
|
|
|
2017-08-13 18:45:00 +02:00
|
|
|
params := s.cfg.ChainParams
|
2015-08-26 06:03:18 +02:00
|
|
|
blockHeaderReply := btcjson.GetBlockHeaderVerboseResult{
|
|
|
|
Hash: c.Hash,
|
2018-08-16 06:05:34 +02:00
|
|
|
Confirmations: int64(1 + best.Height - blockHeight),
|
2016-11-03 00:18:48 +01:00
|
|
|
Height: blockHeight,
|
2015-08-26 06:03:18 +02:00
|
|
|
Version: blockHeader.Version,
|
2016-12-02 18:25:04 +01:00
|
|
|
VersionHex: fmt.Sprintf("%08x", blockHeader.Version),
|
2015-08-26 06:03:18 +02:00
|
|
|
MerkleRoot: blockHeader.MerkleRoot.String(),
|
|
|
|
NextHash: nextHashString,
|
|
|
|
PreviousHash: blockHeader.PrevBlock.String(),
|
|
|
|
Nonce: uint64(blockHeader.Nonce),
|
|
|
|
Time: blockHeader.Timestamp.Unix(),
|
|
|
|
Bits: strconv.FormatInt(int64(blockHeader.Bits), 16),
|
2017-08-13 18:45:00 +02:00
|
|
|
Difficulty: getDifficultyRatio(blockHeader.Bits, params),
|
2015-08-26 06:03:18 +02:00
|
|
|
}
|
|
|
|
return blockHeaderReply, nil
|
2015-07-04 19:42:43 +02:00
|
|
|
}
|
|
|
|
|
2021-10-29 03:58:57 +02:00
|
|
|
// handleGetChainTips implements the getchaintips command.
|
|
|
|
func handleGetChainTips(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
tips := s.cfg.Chain.ChainTips()
|
|
|
|
results := make([]btcjson.GetChainTipsResult, 0, len(tips))
|
|
|
|
for _, tip := range tips {
|
|
|
|
results = append(results, btcjson.GetChainTipsResult(tip))
|
|
|
|
}
|
|
|
|
return results, nil
|
|
|
|
}
|
|
|
|
|
2014-06-27 21:12:22 +02:00
|
|
|
// encodeTemplateID encodes the passed details into an ID that can be used to
|
|
|
|
// uniquely identify a block template.
|
2016-08-08 21:04:33 +02:00
|
|
|
func encodeTemplateID(prevHash *chainhash.Hash, lastGenerated time.Time) string {
|
2014-06-27 21:12:22 +02:00
|
|
|
return fmt.Sprintf("%s-%d", prevHash.String(), lastGenerated.Unix())
|
|
|
|
}
|
|
|
|
|
|
|
|
// decodeTemplateID decodes an ID that is used to uniquely identify a block
|
|
|
|
// template. This is mainly used as a mechanism to track when to update clients
|
|
|
|
// that are using long polling for block templates. The ID consists of the
|
|
|
|
// previous block hash for the associated template and the time the associated
|
|
|
|
// template was generated.
|
2016-08-08 21:04:33 +02:00
|
|
|
func decodeTemplateID(templateID string) (*chainhash.Hash, int64, error) {
|
2014-06-27 21:12:22 +02:00
|
|
|
fields := strings.Split(templateID, "-")
|
|
|
|
if len(fields) != 2 {
|
|
|
|
return nil, 0, errors.New("invalid longpollid format")
|
|
|
|
}
|
|
|
|
|
2016-08-08 21:04:33 +02:00
|
|
|
prevHash, err := chainhash.NewHashFromStr(fields[0])
|
2014-06-27 21:12:22 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, errors.New("invalid longpollid format")
|
|
|
|
}
|
2014-06-30 21:14:31 +02:00
|
|
|
lastGenerated, err := strconv.ParseInt(fields[1], 10, 64)
|
2014-06-27 21:12:22 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, errors.New("invalid longpollid format")
|
|
|
|
}
|
|
|
|
|
2014-06-30 21:14:31 +02:00
|
|
|
return prevHash, lastGenerated, nil
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// notifyLongPollers notifies any channels that have been registered to be
|
|
|
|
// notified when block templates are stale.
|
|
|
|
//
|
|
|
|
// This function MUST be called with the state locked.
|
2016-08-08 21:04:33 +02:00
|
|
|
func (state *gbtWorkState) notifyLongPollers(latestHash *chainhash.Hash, lastGenerated time.Time) {
|
2014-06-27 21:12:22 +02:00
|
|
|
// Notify anything that is waiting for a block template update from a
|
|
|
|
// hash which is not the hash of the tip of the best chain since their
|
|
|
|
// work is now invalid.
|
|
|
|
for hash, channels := range state.notifyMap {
|
|
|
|
if !hash.IsEqual(latestHash) {
|
|
|
|
for _, c := range channels {
|
|
|
|
close(c)
|
|
|
|
}
|
|
|
|
delete(state.notifyMap, hash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return now if the provided last generated timestamp has not been
|
|
|
|
// initialized.
|
|
|
|
if lastGenerated.IsZero() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return now if there is nothing registered for updates to the current
|
|
|
|
// best block hash.
|
|
|
|
channels, ok := state.notifyMap[*latestHash]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify anything that is waiting for a block template update from a
|
|
|
|
// block template generated before the most recently generated block
|
|
|
|
// template.
|
|
|
|
lastGeneratedUnix := lastGenerated.Unix()
|
|
|
|
for lastGen, c := range channels {
|
|
|
|
if lastGen < lastGeneratedUnix {
|
|
|
|
close(c)
|
|
|
|
delete(channels, lastGen)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the entry altogether if there are no more registered
|
|
|
|
// channels.
|
|
|
|
if len(channels) == 0 {
|
|
|
|
delete(state.notifyMap, *latestHash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// NotifyBlockConnected uses the newly-connected block to notify any long poll
|
|
|
|
// clients with a new block template when their existing block template is
|
|
|
|
// stale due to the newly connected block.
|
2016-08-08 21:04:33 +02:00
|
|
|
func (state *gbtWorkState) NotifyBlockConnected(blockHash *chainhash.Hash) {
|
2014-06-27 21:12:22 +02:00
|
|
|
go func() {
|
|
|
|
state.Lock()
|
|
|
|
defer state.Unlock()
|
|
|
|
|
2016-08-08 21:04:33 +02:00
|
|
|
state.notifyLongPollers(blockHash, state.lastTxUpdate)
|
2014-06-27 21:12:22 +02:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// NotifyMempoolTx uses the new last updated time for the transaction memory
|
|
|
|
// pool to notify any long poll clients with a new block template when their
|
|
|
|
// existing block template is stale due to enough time passing and the contents
|
|
|
|
// of the memory pool changing.
|
|
|
|
func (state *gbtWorkState) NotifyMempoolTx(lastUpdated time.Time) {
|
|
|
|
go func() {
|
|
|
|
state.Lock()
|
|
|
|
defer state.Unlock()
|
|
|
|
|
|
|
|
// No need to notify anything if no block templates have been generated
|
|
|
|
// yet.
|
|
|
|
if state.prevHash == nil || state.lastGenerated.IsZero() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if time.Now().After(state.lastGenerated.Add(time.Second *
|
|
|
|
gbtRegenerateSeconds)) {
|
|
|
|
|
|
|
|
state.notifyLongPollers(state.prevHash, lastUpdated)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// templateUpdateChan returns a channel that will be closed once the block
|
|
|
|
// template associated with the passed previous hash and last generated time
|
|
|
|
// is stale. The function will return existing channels for duplicate
|
|
|
|
// parameters which allows multiple clients to wait for the same block template
|
|
|
|
// without requiring a different channel for each client.
|
|
|
|
//
|
|
|
|
// This function MUST be called with the state locked.
|
2016-08-08 21:04:33 +02:00
|
|
|
func (state *gbtWorkState) templateUpdateChan(prevHash *chainhash.Hash, lastGenerated int64) chan struct{} {
|
2014-06-27 21:12:22 +02:00
|
|
|
// Either get the current list of channels waiting for updates about
|
|
|
|
// changes to block template for the previous hash or create a new one.
|
|
|
|
channels, ok := state.notifyMap[*prevHash]
|
|
|
|
if !ok {
|
|
|
|
m := make(map[int64]chan struct{})
|
|
|
|
state.notifyMap[*prevHash] = m
|
|
|
|
channels = m
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the current channel associated with the time the block template
|
|
|
|
// was last generated or create a new one.
|
|
|
|
c, ok := channels[lastGenerated]
|
|
|
|
if !ok {
|
|
|
|
c = make(chan struct{})
|
|
|
|
channels[lastGenerated] = c
|
|
|
|
}
|
|
|
|
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2014-06-27 21:12:22 +02:00
|
|
|
// updateBlockTemplate creates or updates a block template for the work state.
|
|
|
|
// A new block template will be generated when the current best block has
|
|
|
|
// changed or the transactions in the memory pool have been updated and it has
|
2016-02-22 11:35:54 +01:00
|
|
|
// been long enough since the last template was generated. Otherwise, the
|
|
|
|
// timestamp for the existing block template is updated (and possibly the
|
2014-06-27 21:12:22 +02:00
|
|
|
// difficulty on testnet per the consesus rules). Finally, if the
|
2016-02-22 11:35:54 +01:00
|
|
|
// useCoinbaseValue flag is false and the existing block template does not
|
2014-06-27 21:12:22 +02:00
|
|
|
// already contain a valid payment address, the block template will be updated
|
|
|
|
// with a randomly selected payment address from the list of configured
|
|
|
|
// addresses.
|
|
|
|
//
|
|
|
|
// This function MUST be called with the state locked.
|
|
|
|
func (state *gbtWorkState) updateBlockTemplate(s *rpcServer, useCoinbaseValue bool) error {
|
2016-04-02 23:58:01 +02:00
|
|
|
generator := s.cfg.Generator
|
|
|
|
lastTxUpdate := generator.TxSource().LastUpdated()
|
2014-06-27 21:12:22 +02:00
|
|
|
if lastTxUpdate.IsZero() {
|
|
|
|
lastTxUpdate = time.Now()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a new block template when the current best block has
|
|
|
|
// changed or the transactions in the memory pool have been updated and
|
|
|
|
// it has been at least gbtRegenerateSecond since the last template was
|
|
|
|
// generated.
|
2015-02-05 22:16:39 +01:00
|
|
|
var msgBlock *wire.MsgBlock
|
2014-06-27 21:12:22 +02:00
|
|
|
var targetDifficulty string
|
2016-04-02 23:58:01 +02:00
|
|
|
latestHash := &s.cfg.Chain.BestSnapshot().Hash
|
2014-06-27 21:12:22 +02:00
|
|
|
template := state.template
|
|
|
|
if template == nil || state.prevHash == nil ||
|
|
|
|
!state.prevHash.IsEqual(latestHash) ||
|
|
|
|
(state.lastTxUpdate != lastTxUpdate &&
|
|
|
|
time.Now().After(state.lastGenerated.Add(time.Second*
|
|
|
|
gbtRegenerateSeconds))) {
|
|
|
|
|
|
|
|
// Reset the previous best hash the block template was generated
|
|
|
|
// against so any errors below cause the next invocation to try
|
|
|
|
// again.
|
|
|
|
state.prevHash = nil
|
|
|
|
|
|
|
|
// Choose a payment address at random if the caller requests a
|
|
|
|
// full coinbase as opposed to only the pertinent details needed
|
|
|
|
// to create their own coinbase.
|
|
|
|
var payAddr btcutil.Address
|
|
|
|
if !useCoinbaseValue {
|
|
|
|
payAddr = cfg.miningAddrs[rand.Intn(len(cfg.miningAddrs))]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new block template that has a coinbase which anyone
|
|
|
|
// can redeem. This is only acceptable because the returned
|
|
|
|
// block template doesn't include the coinbase, so the caller
|
|
|
|
// will ultimately create their own coinbase which pays to the
|
|
|
|
// appropriate address(es).
|
2016-04-02 23:58:01 +02:00
|
|
|
blkTemplate, err := generator.NewBlockTemplate(payAddr)
|
2014-06-27 21:12:22 +02:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return internalRPCError("Failed to create new block "+
|
|
|
|
"template: "+err.Error(), "")
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
template = blkTemplate
|
2016-04-11 17:27:29 +02:00
|
|
|
msgBlock = template.Block
|
2014-06-27 21:12:22 +02:00
|
|
|
targetDifficulty = fmt.Sprintf("%064x",
|
2015-01-30 23:25:42 +01:00
|
|
|
blockchain.CompactToBig(msgBlock.Header.Bits))
|
2014-06-27 21:12:22 +02:00
|
|
|
|
2016-10-26 08:34:21 +02:00
|
|
|
// Get the minimum allowed timestamp for the block based on the
|
2014-06-27 21:12:22 +02:00
|
|
|
// median timestamp of the last several blocks per the chain
|
|
|
|
// consensus rules.
|
2016-04-02 23:58:01 +02:00
|
|
|
best := s.cfg.Chain.BestSnapshot()
|
2016-10-26 08:34:21 +02:00
|
|
|
minTimestamp := mining.MinimumMedianTime(best)
|
2014-06-27 21:12:22 +02:00
|
|
|
|
|
|
|
// Update work state to ensure another block template isn't
|
|
|
|
// generated until needed.
|
|
|
|
state.template = template
|
|
|
|
state.lastGenerated = time.Now()
|
|
|
|
state.lastTxUpdate = lastTxUpdate
|
|
|
|
state.prevHash = latestHash
|
|
|
|
state.minTimestamp = minTimestamp
|
|
|
|
|
|
|
|
rpcsLog.Debugf("Generated block template (timestamp %v, "+
|
|
|
|
"target %s, merkle root %s)",
|
|
|
|
msgBlock.Header.Timestamp, targetDifficulty,
|
|
|
|
msgBlock.Header.MerkleRoot)
|
2014-06-27 21:12:22 +02:00
|
|
|
|
|
|
|
// Notify any clients that are long polling about the new
|
|
|
|
// template.
|
|
|
|
state.notifyLongPollers(latestHash, lastTxUpdate)
|
2014-06-27 21:12:22 +02:00
|
|
|
} else {
|
|
|
|
// At this point, there is a saved block template and another
|
|
|
|
// request for a template was made, but either the available
|
|
|
|
// transactions haven't change or it hasn't been long enough to
|
|
|
|
// trigger a new block template to be generated. So, update the
|
|
|
|
// existing block template.
|
|
|
|
|
|
|
|
// When the caller requires a full coinbase as opposed to only
|
|
|
|
// the pertinent details needed to create their own coinbase,
|
|
|
|
// add a payment address to the output of the coinbase of the
|
|
|
|
// template if it doesn't already have one. Since this requires
|
|
|
|
// mining addresses to be specified via the config, an error is
|
|
|
|
// returned if none have been specified.
|
2016-04-11 17:27:29 +02:00
|
|
|
if !useCoinbaseValue && !template.ValidPayAddress {
|
2014-06-27 21:12:22 +02:00
|
|
|
// Choose a payment address at random.
|
|
|
|
payToAddr := cfg.miningAddrs[rand.Intn(len(cfg.miningAddrs))]
|
|
|
|
|
|
|
|
// Update the block coinbase output of the template to
|
|
|
|
// pay to the randomly selected payment address.
|
2015-01-30 19:14:33 +01:00
|
|
|
pkScript, err := txscript.PayToAddrScript(payToAddr)
|
2014-06-27 21:12:22 +02:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
context := "Failed to create pay-to-addr script"
|
|
|
|
return internalRPCError(err.Error(), context)
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
2016-04-11 17:27:29 +02:00
|
|
|
template.Block.Transactions[0].TxOut[0].PkScript = pkScript
|
|
|
|
template.ValidPayAddress = true
|
2014-06-27 21:12:22 +02:00
|
|
|
|
|
|
|
// Update the merkle root.
|
2016-04-11 17:27:29 +02:00
|
|
|
block := btcutil.NewBlock(template.Block)
|
2016-10-19 04:23:27 +02:00
|
|
|
merkles := blockchain.BuildMerkleTreeStore(block.Transactions(), false)
|
2016-04-11 17:27:29 +02:00
|
|
|
template.Block.Header.MerkleRoot = *merkles[len(merkles)-1]
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set locals for convenience.
|
2016-04-11 17:27:29 +02:00
|
|
|
msgBlock = template.Block
|
2014-06-27 21:12:22 +02:00
|
|
|
targetDifficulty = fmt.Sprintf("%064x",
|
2015-01-30 23:25:42 +01:00
|
|
|
blockchain.CompactToBig(msgBlock.Header.Bits))
|
2014-06-27 21:12:22 +02:00
|
|
|
|
|
|
|
// Update the time of the block template to the current time
|
|
|
|
// while accounting for the median time of the past several
|
|
|
|
// blocks per the chain consensus rules.
|
2016-04-02 23:58:01 +02:00
|
|
|
generator.UpdateBlockTime(msgBlock)
|
2014-06-27 21:12:22 +02:00
|
|
|
msgBlock.Header.Nonce = 0
|
|
|
|
|
|
|
|
rpcsLog.Debugf("Updated block template (timestamp %v, "+
|
|
|
|
"target %s)", msgBlock.Header.Timestamp,
|
|
|
|
targetDifficulty)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockTemplateResult returns the current block template associated with the
|
|
|
|
// state as a btcjson.GetBlockTemplateResult that is ready to be encoded to JSON
|
|
|
|
// and returned to the caller.
|
|
|
|
//
|
|
|
|
// This function MUST be called with the state locked.
|
2014-06-27 21:12:22 +02:00
|
|
|
func (state *gbtWorkState) blockTemplateResult(useCoinbaseValue bool, submitOld *bool) (*btcjson.GetBlockTemplateResult, error) {
|
Implement some BIP0023 getblocktemplate mutations.
This commit implements a portion of the mutations section of BIP0023.
In particular, it adds the mutable, mintime, maxtime, and noncerange keys
to the returned block template along with indicating support for the time,
transactions/add, prevblock, and coinbase/append mutations. Also, the
addition of the mintime and maxtime fields imply support for the
time/decrement and time/increment mutations. Further, if the caller
indicates the coinbasevalue capability, the coinbasetxn field will be
omitted thereby implying support for the coinbase and generation
mutations.
Closes #124.
2014-06-30 03:00:51 +02:00
|
|
|
// Ensure the timestamps are still in valid range for the template.
|
|
|
|
// This should really only ever happen if the local clock is changed
|
|
|
|
// after the template is generated, but it's important to avoid serving
|
|
|
|
// invalid block templates.
|
|
|
|
template := state.template
|
2016-04-11 17:27:29 +02:00
|
|
|
msgBlock := template.Block
|
Implement some BIP0023 getblocktemplate mutations.
This commit implements a portion of the mutations section of BIP0023.
In particular, it adds the mutable, mintime, maxtime, and noncerange keys
to the returned block template along with indicating support for the time,
transactions/add, prevblock, and coinbase/append mutations. Also, the
addition of the mintime and maxtime fields imply support for the
time/decrement and time/increment mutations. Further, if the caller
indicates the coinbasevalue capability, the coinbasetxn field will be
omitted thereby implying support for the coinbase and generation
mutations.
Closes #124.
2014-06-30 03:00:51 +02:00
|
|
|
header := &msgBlock.Header
|
2015-01-08 07:57:13 +01:00
|
|
|
adjustedTime := state.timeSource.AdjustedTime()
|
2015-01-30 23:25:42 +01:00
|
|
|
maxTime := adjustedTime.Add(time.Second * blockchain.MaxTimeOffsetSeconds)
|
Implement some BIP0023 getblocktemplate mutations.
This commit implements a portion of the mutations section of BIP0023.
In particular, it adds the mutable, mintime, maxtime, and noncerange keys
to the returned block template along with indicating support for the time,
transactions/add, prevblock, and coinbase/append mutations. Also, the
addition of the mintime and maxtime fields imply support for the
time/decrement and time/increment mutations. Further, if the caller
indicates the coinbasevalue capability, the coinbasetxn field will be
omitted thereby implying support for the coinbase and generation
mutations.
Closes #124.
2014-06-30 03:00:51 +02:00
|
|
|
if header.Timestamp.After(maxTime) {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCOutOfRange,
|
Implement some BIP0023 getblocktemplate mutations.
This commit implements a portion of the mutations section of BIP0023.
In particular, it adds the mutable, mintime, maxtime, and noncerange keys
to the returned block template along with indicating support for the time,
transactions/add, prevblock, and coinbase/append mutations. Also, the
addition of the mintime and maxtime fields imply support for the
time/decrement and time/increment mutations. Further, if the caller
indicates the coinbasevalue capability, the coinbasetxn field will be
omitted thereby implying support for the coinbase and generation
mutations.
Closes #124.
2014-06-30 03:00:51 +02:00
|
|
|
Message: fmt.Sprintf("The template time is after the "+
|
|
|
|
"maximum allowed time for a block - template "+
|
2015-01-08 07:57:13 +01:00
|
|
|
"time %v, maximum time %v", adjustedTime,
|
|
|
|
maxTime),
|
Implement some BIP0023 getblocktemplate mutations.
This commit implements a portion of the mutations section of BIP0023.
In particular, it adds the mutable, mintime, maxtime, and noncerange keys
to the returned block template along with indicating support for the time,
transactions/add, prevblock, and coinbase/append mutations. Also, the
addition of the mintime and maxtime fields imply support for the
time/decrement and time/increment mutations. Further, if the caller
indicates the coinbasevalue capability, the coinbasetxn field will be
omitted thereby implying support for the coinbase and generation
mutations.
Closes #124.
2014-06-30 03:00:51 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-27 21:12:22 +02:00
|
|
|
// Convert each transaction in the block template to a template result
|
|
|
|
// transaction. The result does not include the coinbase, so notice
|
|
|
|
// the adjustments to the various lengths and indices.
|
|
|
|
numTx := len(msgBlock.Transactions)
|
|
|
|
transactions := make([]btcjson.GetBlockTemplateResultTx, 0, numTx-1)
|
2016-08-08 21:04:33 +02:00
|
|
|
txIndex := make(map[chainhash.Hash]int64, numTx)
|
2014-06-27 21:12:22 +02:00
|
|
|
for i, tx := range msgBlock.Transactions {
|
2020-09-21 23:11:48 +02:00
|
|
|
txID := tx.TxHash()
|
|
|
|
txIndex[txID] = int64(i)
|
2014-06-27 21:12:22 +02:00
|
|
|
|
|
|
|
// Skip the coinbase transaction.
|
|
|
|
if i == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an array of 1-based indices to transactions that come
|
|
|
|
// before this one in the transactions list which this one
|
|
|
|
// depends on. This is necessary since the created block must
|
|
|
|
// ensure proper ordering of the dependencies. A map is used
|
|
|
|
// before creating the final array to prevent duplicate entries
|
2016-02-22 11:35:54 +01:00
|
|
|
// when multiple inputs reference the same transaction.
|
2014-06-27 21:12:22 +02:00
|
|
|
dependsMap := make(map[int64]struct{})
|
|
|
|
for _, txIn := range tx.TxIn {
|
2014-10-01 17:34:30 +02:00
|
|
|
if idx, ok := txIndex[txIn.PreviousOutPoint.Hash]; ok {
|
2014-06-27 21:12:22 +02:00
|
|
|
dependsMap[idx] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
depends := make([]int64, 0, len(dependsMap))
|
|
|
|
for idx := range dependsMap {
|
|
|
|
depends = append(depends, idx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Serialize the transaction for later conversion to hex.
|
|
|
|
txBuf := bytes.NewBuffer(make([]byte, 0, tx.SerializeSize()))
|
|
|
|
if err := tx.Serialize(txBuf); err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
context := "Failed to serialize transaction"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
|
2016-10-19 04:23:27 +02:00
|
|
|
bTx := btcutil.NewTx(tx)
|
2014-06-27 21:12:22 +02:00
|
|
|
resultTx := btcjson.GetBlockTemplateResultTx{
|
|
|
|
Data: hex.EncodeToString(txBuf.Bytes()),
|
2020-09-21 23:11:48 +02:00
|
|
|
TxID: txID.String(),
|
|
|
|
Hash: tx.WitnessHash().String(),
|
2014-06-27 21:12:22 +02:00
|
|
|
Depends: depends,
|
2016-04-11 17:27:29 +02:00
|
|
|
Fee: template.Fees[i],
|
2016-10-19 04:23:27 +02:00
|
|
|
SigOps: template.SigOpCosts[i],
|
|
|
|
Weight: blockchain.GetTransactionWeight(bTx),
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
transactions = append(transactions, resultTx)
|
|
|
|
}
|
|
|
|
|
Implement some BIP0023 getblocktemplate mutations.
This commit implements a portion of the mutations section of BIP0023.
In particular, it adds the mutable, mintime, maxtime, and noncerange keys
to the returned block template along with indicating support for the time,
transactions/add, prevblock, and coinbase/append mutations. Also, the
addition of the mintime and maxtime fields imply support for the
time/decrement and time/increment mutations. Further, if the caller
indicates the coinbasevalue capability, the coinbasetxn field will be
omitted thereby implying support for the coinbase and generation
mutations.
Closes #124.
2014-06-30 03:00:51 +02:00
|
|
|
// Generate the block template reply. Note that following mutations are
|
|
|
|
// implied by the included or omission of fields:
|
|
|
|
// Including MinTime -> time/decrement
|
|
|
|
// Omitting CoinbaseTxn -> coinbase, generation
|
2015-01-30 23:25:42 +01:00
|
|
|
targetDifficulty := fmt.Sprintf("%064x", blockchain.CompactToBig(header.Bits))
|
2014-06-27 21:12:22 +02:00
|
|
|
templateID := encodeTemplateID(state.prevHash, state.lastGenerated)
|
2014-06-27 21:12:22 +02:00
|
|
|
reply := btcjson.GetBlockTemplateResult{
|
2021-08-04 04:48:59 +02:00
|
|
|
Bits: strconv.FormatInt(int64(header.Bits), 16),
|
|
|
|
CurTime: header.Timestamp.Unix(),
|
|
|
|
Height: int64(template.Height),
|
|
|
|
PreviousHash: header.PrevBlock.String(),
|
|
|
|
WeightLimit: blockchain.MaxBlockWeight,
|
|
|
|
SigOpLimit: blockchain.MaxBlockSigOpsCost,
|
|
|
|
SizeLimit: wire.MaxBlockPayload,
|
|
|
|
Transactions: transactions,
|
|
|
|
Version: header.Version,
|
|
|
|
LongPollID: templateID,
|
|
|
|
SubmitOld: submitOld,
|
|
|
|
Target: targetDifficulty,
|
|
|
|
MinTime: state.minTimestamp.Unix(),
|
|
|
|
MaxTime: maxTime.Unix(),
|
|
|
|
Mutable: gbtMutableFields,
|
|
|
|
NonceRange: gbtNonceRange,
|
|
|
|
Capabilities: gbtCapabilities,
|
|
|
|
ClaimTrieHash: header.ClaimTrie.String(),
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
2016-10-19 04:23:27 +02:00
|
|
|
// If the generated block template includes transactions with witness
|
|
|
|
// data, then include the witness commitment in the GBT result.
|
|
|
|
if template.WitnessCommitment != nil {
|
|
|
|
reply.DefaultWitnessCommitment = hex.EncodeToString(template.WitnessCommitment)
|
2021-07-30 20:12:28 +02:00
|
|
|
reply.Rules = append(reply.Rules, "!segwit")
|
2016-10-19 04:23:27 +02:00
|
|
|
}
|
|
|
|
|
2014-06-27 21:12:22 +02:00
|
|
|
if useCoinbaseValue {
|
|
|
|
reply.CoinbaseAux = gbtCoinbaseAux
|
|
|
|
reply.CoinbaseValue = &msgBlock.Transactions[0].TxOut[0].Value
|
|
|
|
} else {
|
|
|
|
// Ensure the template has a valid payment address associated
|
|
|
|
// with it when a full coinbase is requested.
|
2016-04-11 17:27:29 +02:00
|
|
|
if !template.ValidPayAddress {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInternal.Code,
|
2014-06-27 21:12:22 +02:00
|
|
|
Message: "A coinbase transaction has been " +
|
|
|
|
"requested, but the server has not " +
|
2014-06-30 21:14:31 +02:00
|
|
|
"been configured with any payment " +
|
2014-06-27 21:12:22 +02:00
|
|
|
"addresses via --miningaddr",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Serialize the transaction for conversion to hex.
|
|
|
|
tx := msgBlock.Transactions[0]
|
|
|
|
txBuf := bytes.NewBuffer(make([]byte, 0, tx.SerializeSize()))
|
|
|
|
if err := tx.Serialize(txBuf); err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
context := "Failed to serialize transaction"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
resultTx := btcjson.GetBlockTemplateResultTx{
|
|
|
|
Data: hex.EncodeToString(txBuf.Bytes()),
|
2016-08-08 21:04:33 +02:00
|
|
|
Hash: tx.TxHash().String(),
|
2014-06-27 21:12:22 +02:00
|
|
|
Depends: []int64{},
|
2016-04-11 17:27:29 +02:00
|
|
|
Fee: template.Fees[0],
|
2016-10-19 04:23:27 +02:00
|
|
|
SigOps: template.SigOpCosts[0],
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
reply.CoinbaseTxn = &resultTx
|
|
|
|
}
|
|
|
|
|
|
|
|
return &reply, nil
|
|
|
|
}
|
|
|
|
|
2016-02-22 11:35:54 +01:00
|
|
|
// handleGetBlockTemplateLongPoll is a helper for handleGetBlockTemplateRequest
|
2014-06-27 21:12:22 +02:00
|
|
|
// which deals with handling long polling for block templates. When a caller
|
|
|
|
// sends a request with a long poll ID that was previously returned, a response
|
|
|
|
// is not sent until the caller should stop working on the previous block
|
|
|
|
// template in favor of the new one. In particular, this is the case when the
|
|
|
|
// old block template is no longer valid due to a solution already being found
|
|
|
|
// and added to the block chain, or new transactions have shown up and some time
|
|
|
|
// has passed without finding a solution.
|
|
|
|
//
|
|
|
|
// See https://en.bitcoin.it/wiki/BIP_0022 for more details.
|
|
|
|
func handleGetBlockTemplateLongPoll(s *rpcServer, longPollID string, useCoinbaseValue bool, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
state := s.gbtWorkState
|
|
|
|
state.Lock()
|
|
|
|
// The state unlock is intentionally not deferred here since it needs to
|
|
|
|
// be manually unlocked before waiting for a notification about block
|
|
|
|
// template changes.
|
|
|
|
|
|
|
|
if err := state.updateBlockTemplate(s, useCoinbaseValue); err != nil {
|
|
|
|
state.Unlock()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-02-22 11:35:54 +01:00
|
|
|
// Just return the current block template if the long poll ID provided by
|
|
|
|
// the caller is invalid.
|
2014-06-27 21:12:22 +02:00
|
|
|
prevHash, lastGenerated, err := decodeTemplateID(longPollID)
|
|
|
|
if err != nil {
|
|
|
|
result, err := state.blockTemplateResult(useCoinbaseValue, nil)
|
|
|
|
if err != nil {
|
|
|
|
state.Unlock()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
state.Unlock()
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the block template now if the specific block template
|
|
|
|
// identified by the long poll ID no longer matches the current block
|
|
|
|
// template as this means the provided template is stale.
|
2016-04-11 17:27:29 +02:00
|
|
|
prevTemplateHash := &state.template.Block.Header.PrevBlock
|
2014-06-27 21:12:22 +02:00
|
|
|
if !prevHash.IsEqual(prevTemplateHash) ||
|
|
|
|
lastGenerated != state.lastGenerated.Unix() {
|
|
|
|
|
|
|
|
// Include whether or not it is valid to submit work against the
|
|
|
|
// old block template depending on whether or not a solution has
|
|
|
|
// already been found and added to the block chain.
|
|
|
|
submitOld := prevHash.IsEqual(prevTemplateHash)
|
|
|
|
result, err := state.blockTemplateResult(useCoinbaseValue,
|
|
|
|
&submitOld)
|
|
|
|
if err != nil {
|
|
|
|
state.Unlock()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
state.Unlock()
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the previous hash and last generated time for notifications
|
|
|
|
// Get a channel that will be notified when the template associated with
|
2016-02-22 11:35:54 +01:00
|
|
|
// the provided ID is stale and a new block template should be returned to
|
|
|
|
// the caller.
|
2014-06-27 21:12:22 +02:00
|
|
|
longPollChan := state.templateUpdateChan(prevHash, lastGenerated)
|
|
|
|
state.Unlock()
|
|
|
|
|
|
|
|
select {
|
|
|
|
// When the client closes before it's time to send a reply, just return
|
|
|
|
// now so the goroutine doesn't hang around.
|
|
|
|
case <-closeChan:
|
|
|
|
return nil, ErrClientQuit
|
|
|
|
|
|
|
|
// Wait until signal received to send the reply.
|
|
|
|
case <-longPollChan:
|
|
|
|
// Fallthrough
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the lastest block template
|
|
|
|
state.Lock()
|
|
|
|
defer state.Unlock()
|
|
|
|
|
|
|
|
if err := state.updateBlockTemplate(s, useCoinbaseValue); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Include whether or not it is valid to submit work against the old
|
|
|
|
// block template depending on whether or not a solution has already
|
|
|
|
// been found and added to the block chain.
|
2016-04-11 17:27:29 +02:00
|
|
|
submitOld := prevHash.IsEqual(&state.template.Block.Header.PrevBlock)
|
2014-06-27 21:12:22 +02:00
|
|
|
result, err := state.blockTemplateResult(useCoinbaseValue, &submitOld)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2014-06-27 21:12:22 +02:00
|
|
|
// handleGetBlockTemplateRequest is a helper for handleGetBlockTemplate which
|
|
|
|
// deals with generating and returning block templates to the caller. It
|
2014-06-27 21:12:22 +02:00
|
|
|
// handles both long poll requests as specified by BIP 0022 as well as regular
|
|
|
|
// requests. In addition, it detects the capabilities reported by the caller
|
|
|
|
// in regards to whether or not it supports creating its own coinbase (the
|
|
|
|
// coinbasetxn and coinbasevalue capabilities) and modifies the returned block
|
|
|
|
// template accordingly.
|
|
|
|
func handleGetBlockTemplateRequest(s *rpcServer, request *btcjson.TemplateRequest, closeChan <-chan struct{}) (interface{}, error) {
|
2014-06-27 21:12:22 +02:00
|
|
|
// Extract the relevant passed capabilities and restrict the result to
|
|
|
|
// either a coinbase value or a coinbase transaction object depending on
|
|
|
|
// the request. Default to only providing a coinbase value.
|
|
|
|
useCoinbaseValue := true
|
|
|
|
if request != nil {
|
|
|
|
var hasCoinbaseValue, hasCoinbaseTxn bool
|
|
|
|
for _, capability := range request.Capabilities {
|
|
|
|
switch capability {
|
|
|
|
case "coinbasetxn":
|
|
|
|
hasCoinbaseTxn = true
|
|
|
|
case "coinbasevalue":
|
|
|
|
hasCoinbaseValue = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if hasCoinbaseTxn && !hasCoinbaseValue {
|
|
|
|
useCoinbaseValue = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// When a coinbase transaction has been requested, respond with an error
|
|
|
|
// if there are no addresses to pay the created block template to.
|
|
|
|
if !useCoinbaseValue && len(cfg.miningAddrs) == 0 {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInternal.Code,
|
2014-06-27 21:12:22 +02:00
|
|
|
Message: "A coinbase transaction has been requested, " +
|
|
|
|
"but the server has not been configured with " +
|
2014-06-30 21:14:31 +02:00
|
|
|
"any payment addresses via --miningaddr",
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return an error if there are no peers connected since there is no
|
|
|
|
// way to relay a found block or receive transactions to work on.
|
|
|
|
// However, allow this state when running in the regression test or
|
|
|
|
// simulation test mode.
|
2016-04-02 23:58:01 +02:00
|
|
|
if !(cfg.RegressionTest || cfg.SimNet) &&
|
|
|
|
s.cfg.ConnMgr.ConnectedCount() == 0 {
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCClientNotConnected,
|
|
|
|
Message: "Bitcoin is not connected",
|
|
|
|
}
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// No point in generating or accepting work before the chain is synced.
|
2016-04-02 23:58:01 +02:00
|
|
|
currentHeight := s.cfg.Chain.BestSnapshot().Height
|
|
|
|
if currentHeight != 0 && !s.cfg.SyncMgr.IsCurrent() {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCClientInInitialDownload,
|
|
|
|
Message: "Bitcoin is downloading blocks...",
|
|
|
|
}
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
|
2014-06-27 21:12:22 +02:00
|
|
|
// When a long poll ID was provided, this is a long poll request by the
|
|
|
|
// client to be notified when block template referenced by the ID should
|
|
|
|
// be replaced with a new one.
|
|
|
|
if request != nil && request.LongPollID != "" {
|
|
|
|
return handleGetBlockTemplateLongPoll(s, request.LongPollID,
|
|
|
|
useCoinbaseValue, closeChan)
|
|
|
|
}
|
|
|
|
|
2014-06-27 21:12:22 +02:00
|
|
|
// Protect concurrent access when updating block templates.
|
|
|
|
state := s.gbtWorkState
|
|
|
|
state.Lock()
|
|
|
|
defer state.Unlock()
|
|
|
|
|
|
|
|
// Get and return a block template. A new block template will be
|
|
|
|
// generated when the current best block has changed or the transactions
|
|
|
|
// in the memory pool have been updated and it has been at least five
|
|
|
|
// seconds since the last template was generated. Otherwise, the
|
|
|
|
// timestamp for the existing block template is updated (and possibly
|
|
|
|
// the difficulty on testnet per the consesus rules).
|
|
|
|
if err := state.updateBlockTemplate(s, useCoinbaseValue); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-06-27 21:12:22 +02:00
|
|
|
return state.blockTemplateResult(useCoinbaseValue, nil)
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
|
2014-06-27 21:12:22 +02:00
|
|
|
// chainErrToGBTErrString converts an error returned from btcchain to a string
|
|
|
|
// which matches the reasons and format described in BIP0022 for rejection
|
|
|
|
// reasons.
|
|
|
|
func chainErrToGBTErrString(err error) string {
|
|
|
|
// When the passed error is not a RuleError, just return a generic
|
|
|
|
// rejected string with the error text.
|
2015-01-30 23:25:42 +01:00
|
|
|
ruleErr, ok := err.(blockchain.RuleError)
|
2014-06-27 21:12:22 +02:00
|
|
|
if !ok {
|
|
|
|
return "rejected: " + err.Error()
|
|
|
|
}
|
|
|
|
|
|
|
|
switch ruleErr.ErrorCode {
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrDuplicateBlock:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "duplicate"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrBlockTooBig:
|
2017-09-06 05:42:54 +02:00
|
|
|
return "bad-blk-length"
|
|
|
|
case blockchain.ErrBlockWeightTooHigh:
|
|
|
|
return "bad-blk-weight"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrBlockVersionTooOld:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-version"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrInvalidTime:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-time"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrTimeTooOld:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "time-too-old"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrTimeTooNew:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "time-too-new"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrDifficultyTooLow:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-diffbits"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrUnexpectedDifficulty:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-diffbits"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrHighHash:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "high-hash"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrBadMerkleRoot:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txnmrklroot"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrBadCheckpoint:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-checkpoint"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrForkTooOld:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "fork-too-old"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrCheckpointTimeTooOld:
|
2014-07-11 16:57:03 +02:00
|
|
|
return "checkpoint-time-too-old"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrNoTransactions:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-none"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrNoTxInputs:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-noinputs"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrNoTxOutputs:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-nooutputs"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrTxTooBig:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-size"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrBadTxOutValue:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-outputvalue"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrDuplicateTxInputs:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-dupinputs"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrBadTxInput:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-badinput"
|
2017-08-14 07:22:40 +02:00
|
|
|
case blockchain.ErrMissingTxOut:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-missinginput"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrUnfinalizedTx:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-unfinalizedtx"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrDuplicateTx:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-duplicate"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrOverwriteTx:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-overwrite"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrImmatureSpend:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-maturity"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrSpendTooHigh:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-highspend"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrBadFees:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-fees"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrTooManySigOps:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "high-sigops"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrFirstTxNotCoinbase:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-nocoinbase"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrMultipleCoinbases:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-txns-multicoinbase"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrBadCoinbaseScriptLen:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-cb-length"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrBadCoinbaseValue:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-cb-value"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrMissingCoinbaseHeight:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-cb-height"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrBadCoinbaseHeight:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-cb-height"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrScriptMalformed:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-script-malformed"
|
2015-01-30 23:25:42 +01:00
|
|
|
case blockchain.ErrScriptValidation:
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-script-validate"
|
2017-09-06 05:42:54 +02:00
|
|
|
case blockchain.ErrUnexpectedWitness:
|
|
|
|
return "unexpected-witness"
|
|
|
|
case blockchain.ErrInvalidWitnessCommitment:
|
|
|
|
return "bad-witness-nonce-size"
|
|
|
|
case blockchain.ErrWitnessCommitmentMismatch:
|
|
|
|
return "bad-witness-merkle-match"
|
|
|
|
case blockchain.ErrPreviousBlockUnknown:
|
|
|
|
return "prev-blk-not-found"
|
|
|
|
case blockchain.ErrInvalidAncestorBlock:
|
|
|
|
return "bad-prevblk"
|
2017-09-15 04:09:32 +02:00
|
|
|
case blockchain.ErrPrevBlockNotBest:
|
|
|
|
return "inconclusive-not-best-prvblk"
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return "rejected: " + err.Error()
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleGetBlockTemplateProposal is a helper for handleGetBlockTemplate which
|
|
|
|
// deals with block proposals.
|
|
|
|
//
|
|
|
|
// See https://en.bitcoin.it/wiki/BIP_0023 for more details.
|
|
|
|
func handleGetBlockTemplateProposal(s *rpcServer, request *btcjson.TemplateRequest) (interface{}, error) {
|
|
|
|
hexData := request.Data
|
|
|
|
if hexData == "" {
|
2015-02-21 05:34:57 +01:00
|
|
|
return false, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCType,
|
|
|
|
Message: fmt.Sprintf("Data must contain the " +
|
2014-06-27 21:12:22 +02:00
|
|
|
"hex-encoded serialized block that is being " +
|
|
|
|
"proposed"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the provided data is sane and deserialize the proposed block.
|
|
|
|
if len(hexData)%2 != 0 {
|
|
|
|
hexData = "0" + hexData
|
|
|
|
}
|
|
|
|
dataBytes, err := hex.DecodeString(hexData)
|
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return false, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCDeserialization,
|
|
|
|
Message: fmt.Sprintf("Data must be "+
|
2014-06-27 21:12:22 +02:00
|
|
|
"hexadecimal string (not %q)", hexData),
|
|
|
|
}
|
|
|
|
}
|
2015-02-05 22:16:39 +01:00
|
|
|
var msgBlock wire.MsgBlock
|
2014-06-27 21:12:22 +02:00
|
|
|
if err := msgBlock.Deserialize(bytes.NewReader(dataBytes)); err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCDeserialization,
|
2014-06-27 21:12:22 +02:00
|
|
|
Message: "Block decode failed: " + err.Error(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
block := btcutil.NewBlock(&msgBlock)
|
|
|
|
|
|
|
|
// Ensure the block is building from the expected previous block.
|
2016-04-02 23:58:01 +02:00
|
|
|
expectedPrevHash := s.cfg.Chain.BestSnapshot().Hash
|
2014-06-27 21:12:22 +02:00
|
|
|
prevHash := &block.MsgBlock().Header.PrevBlock
|
2017-02-01 22:25:25 +01:00
|
|
|
if !expectedPrevHash.IsEqual(prevHash) {
|
2014-06-27 21:12:22 +02:00
|
|
|
return "bad-prevblk", nil
|
|
|
|
}
|
|
|
|
|
2017-09-15 04:09:32 +02:00
|
|
|
if err := s.cfg.Chain.CheckConnectBlockTemplate(block); err != nil {
|
2015-01-30 23:25:42 +01:00
|
|
|
if _, ok := err.(blockchain.RuleError); !ok {
|
2017-05-24 23:24:06 +02:00
|
|
|
errStr := fmt.Sprintf("Failed to process block proposal: %v", err)
|
|
|
|
rpcsLog.Error(errStr)
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
2016-11-15 00:44:38 +01:00
|
|
|
Code: btcjson.ErrRPCVerify,
|
2017-05-24 23:24:06 +02:00
|
|
|
Message: errStr,
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rpcsLog.Infof("Rejected block proposal: %v", err)
|
|
|
|
return chainErrToGBTErrString(err), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2014-06-27 21:12:22 +02:00
|
|
|
// handleGetBlockTemplate implements the getblocktemplate command.
|
|
|
|
//
|
2014-06-27 21:12:22 +02:00
|
|
|
// See https://en.bitcoin.it/wiki/BIP_0022 and
|
|
|
|
// https://en.bitcoin.it/wiki/BIP_0023 for more details.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetBlockTemplate(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2014-06-27 21:12:22 +02:00
|
|
|
c := cmd.(*btcjson.GetBlockTemplateCmd)
|
|
|
|
request := c.Request
|
|
|
|
|
|
|
|
// Set the default mode and override it if supplied.
|
|
|
|
mode := "template"
|
|
|
|
if request != nil && request.Mode != "" {
|
|
|
|
mode = request.Mode
|
|
|
|
}
|
|
|
|
|
|
|
|
switch mode {
|
|
|
|
case "template":
|
2014-06-27 21:12:22 +02:00
|
|
|
return handleGetBlockTemplateRequest(s, request, closeChan)
|
2014-06-27 21:12:22 +02:00
|
|
|
case "proposal":
|
|
|
|
return handleGetBlockTemplateProposal(s, request)
|
2014-06-27 21:12:22 +02:00
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParameter,
|
2014-06-27 21:12:22 +02:00
|
|
|
Message: "Invalid mode",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-18 09:09:05 +01:00
|
|
|
// handleGetCFilter implements the getcfilter command.
|
|
|
|
func handleGetCFilter(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2017-08-30 00:03:40 +02:00
|
|
|
if s.cfg.CfIndex == nil {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCNoCFIndex,
|
|
|
|
Message: "The CF index must be enabled for this command",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-18 09:09:05 +01:00
|
|
|
c := cmd.(*btcjson.GetCFilterCmd)
|
2017-01-14 11:26:13 +01:00
|
|
|
hash, err := chainhash.NewHashFromStr(c.Hash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, rpcDecodeHexError(c.Hash)
|
|
|
|
}
|
|
|
|
|
2017-09-13 14:42:24 +02:00
|
|
|
filterBytes, err := s.cfg.CfIndex.FilterByBlockHash(hash, c.FilterType)
|
2017-05-12 05:34:05 +02:00
|
|
|
if err != nil {
|
2017-01-18 09:09:05 +01:00
|
|
|
rpcsLog.Debugf("Could not find committed filter for %v: %v",
|
2017-04-14 04:15:14 +02:00
|
|
|
hash, err)
|
2017-01-14 11:26:13 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCBlockNotFound,
|
|
|
|
Message: "Block not found",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-12 05:34:05 +02:00
|
|
|
rpcsLog.Debugf("Found committed filter for %v", hash)
|
2017-01-14 11:38:46 +01:00
|
|
|
return hex.EncodeToString(filterBytes), nil
|
2017-01-13 13:07:25 +01:00
|
|
|
}
|
|
|
|
|
2017-02-01 14:57:45 +01:00
|
|
|
// handleGetCFilterHeader implements the getcfilterheader command.
|
|
|
|
func handleGetCFilterHeader(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2017-08-30 00:03:40 +02:00
|
|
|
if s.cfg.CfIndex == nil {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCNoCFIndex,
|
|
|
|
Message: "The CF index must be enabled for this command",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-01 14:57:45 +01:00
|
|
|
c := cmd.(*btcjson.GetCFilterHeaderCmd)
|
|
|
|
hash, err := chainhash.NewHashFromStr(c.Hash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, rpcDecodeHexError(c.Hash)
|
|
|
|
}
|
|
|
|
|
2017-09-13 14:42:24 +02:00
|
|
|
headerBytes, err := s.cfg.CfIndex.FilterHeaderByBlockHash(hash, c.FilterType)
|
2017-02-01 14:57:45 +01:00
|
|
|
if len(headerBytes) > 0 {
|
|
|
|
rpcsLog.Debugf("Found header of committed filter for %v", hash)
|
|
|
|
} else {
|
|
|
|
rpcsLog.Debugf("Could not find header of committed filter for %v: %v",
|
2017-04-14 04:15:14 +02:00
|
|
|
hash, err)
|
2017-02-01 14:57:45 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCBlockNotFound,
|
|
|
|
Message: "Block not found",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-14 04:15:14 +02:00
|
|
|
hash.SetBytes(headerBytes)
|
|
|
|
return hash.String(), nil
|
2017-02-01 14:57:45 +01:00
|
|
|
}
|
|
|
|
|
2013-10-29 16:42:34 +01:00
|
|
|
// handleGetConnectionCount implements the getconnectioncount command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetConnectionCount(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2016-04-02 23:58:01 +02:00
|
|
|
return s.cfg.ConnMgr.ConnectedCount(), nil
|
2013-10-29 16:42:34 +01:00
|
|
|
}
|
Clean up notification contexts and goroutines after ws disconnect.
This refactors the wallet notification code to reverse the order of
how notification contexts are stored. Before, watched addresses and
outpoints were used as keys, with a special reply channel as the
value. This channel was read from and replies were marshalled and
sent to the main wallet notification chan, but the goroutine handling
this marshalling never exited because the reply channel was never
closed (and couldn't have been, because there was no way to tell it
was handling notifications for any particular wallet).
Notification contexts are now primarily mapped by wallet notification
channels, and code to send the notifications send directly to the
wallet channel, with the previous goroutine reading the reply chan
properly closing.
The RPC code is also refactored with this change as well, to separate
it more from websocket code. Websocket JSON extensions are no longer
available to RPC clients.
While here, unbreak RPC. Previously, replies were never sent back.
This broke when I merged in my websocket code, as sends for the reply
channel in jsonRead blocked before a reader for the channel was
opened. A 3 liner could have fixed this, but doing a proper fix
(changing jsonRead so it did not use the reply channel as it is
unneeded for the standard RPC API) is preferred.
2013-10-16 20:12:00 +02:00
|
|
|
|
2014-05-11 09:21:27 +02:00
|
|
|
// handleGetCurrentNet implements the getcurrentnet command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetCurrentNet(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2016-04-02 23:58:01 +02:00
|
|
|
return s.cfg.ChainParams.Net, nil
|
2014-05-11 09:21:27 +02:00
|
|
|
}
|
|
|
|
|
2013-10-29 16:42:34 +01:00
|
|
|
// handleGetDifficulty implements the getdifficulty command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetDifficulty(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2016-04-02 23:58:01 +02:00
|
|
|
best := s.cfg.Chain.BestSnapshot()
|
2017-08-13 18:45:00 +02:00
|
|
|
return getDifficultyRatio(best.Bits, s.cfg.ChainParams), nil
|
2013-10-29 16:42:34 +01:00
|
|
|
}
|
2013-10-29 01:43:09 +01:00
|
|
|
|
2013-10-29 16:42:34 +01:00
|
|
|
// handleGetGenerate implements the getgenerate command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetGenerate(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2016-04-02 23:58:01 +02:00
|
|
|
return s.cfg.CPUMiner.IsMining(), nil
|
2013-10-29 16:42:34 +01:00
|
|
|
}
|
2013-10-29 01:43:09 +01:00
|
|
|
|
2013-10-29 16:42:34 +01:00
|
|
|
// handleGetHashesPerSec implements the gethashespersec command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetHashesPerSec(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2016-04-02 23:58:01 +02:00
|
|
|
return int64(s.cfg.CPUMiner.HashesPerSecond()), nil
|
2013-10-29 16:42:34 +01:00
|
|
|
}
|
2013-10-29 01:43:09 +01:00
|
|
|
|
2016-12-17 00:59:25 +01:00
|
|
|
// handleGetHeaders implements the getheaders command.
|
|
|
|
//
|
2017-08-14 05:49:05 +02:00
|
|
|
// NOTE: This is a btcsuite extension originally ported from
|
2016-12-17 00:59:25 +01:00
|
|
|
// github.com/decred/dcrd.
|
|
|
|
func handleGetHeaders(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
c := cmd.(*btcjson.GetHeadersCmd)
|
2017-08-14 05:49:05 +02:00
|
|
|
|
|
|
|
// Fetch the requested headers from chain while respecting the provided
|
|
|
|
// block locators and stop hash.
|
2016-12-17 00:59:25 +01:00
|
|
|
blockLocators := make([]*chainhash.Hash, len(c.BlockLocators))
|
|
|
|
for i := range c.BlockLocators {
|
|
|
|
blockLocator, err := chainhash.NewHashFromStr(c.BlockLocators[i])
|
|
|
|
if err != nil {
|
2017-08-14 05:49:05 +02:00
|
|
|
return nil, rpcDecodeHexError(c.BlockLocators[i])
|
2016-12-17 00:59:25 +01:00
|
|
|
}
|
|
|
|
blockLocators[i] = blockLocator
|
|
|
|
}
|
|
|
|
var hashStop chainhash.Hash
|
|
|
|
if c.HashStop != "" {
|
|
|
|
err := chainhash.Decode(&hashStop, c.HashStop)
|
|
|
|
if err != nil {
|
2017-08-14 05:49:05 +02:00
|
|
|
return nil, rpcDecodeHexError(c.HashStop)
|
2016-12-17 00:59:25 +01:00
|
|
|
}
|
|
|
|
}
|
2017-08-20 03:35:37 +02:00
|
|
|
headers := s.cfg.SyncMgr.LocateHeaders(blockLocators, &hashStop)
|
2016-12-17 00:59:25 +01:00
|
|
|
|
2017-08-14 05:49:05 +02:00
|
|
|
// Return the serialized block headers as hex-encoded strings.
|
|
|
|
hexBlockHeaders := make([]string, len(headers))
|
2016-12-17 00:59:25 +01:00
|
|
|
var buf bytes.Buffer
|
2017-08-14 05:49:05 +02:00
|
|
|
for i, h := range headers {
|
2016-12-17 00:59:25 +01:00
|
|
|
err := h.Serialize(&buf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, internalRPCError(err.Error(),
|
|
|
|
"Failed to serialize block header")
|
|
|
|
}
|
|
|
|
hexBlockHeaders[i] = hex.EncodeToString(buf.Bytes())
|
|
|
|
buf.Reset()
|
|
|
|
}
|
|
|
|
return hexBlockHeaders, nil
|
|
|
|
}
|
|
|
|
|
2014-01-29 02:24:30 +01:00
|
|
|
// handleGetInfo implements the getinfo command. We only return the fields
|
|
|
|
// that are not related to wallet functionality.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2016-04-02 23:58:01 +02:00
|
|
|
best := s.cfg.Chain.BestSnapshot()
|
2015-02-21 05:34:57 +01:00
|
|
|
ret := &btcjson.InfoChainResult{
|
2014-06-29 23:36:41 +02:00
|
|
|
Version: int32(1000000*appMajor + 10000*appMinor + 100*appPatch),
|
|
|
|
ProtocolVersion: int32(maxProtocolVersion),
|
2015-08-26 06:03:18 +02:00
|
|
|
Blocks: best.Height,
|
2016-04-02 23:58:01 +02:00
|
|
|
TimeOffset: int64(s.cfg.TimeSource.Offset().Seconds()),
|
|
|
|
Connections: s.cfg.ConnMgr.ConnectedCount(),
|
2014-03-15 20:36:16 +01:00
|
|
|
Proxy: cfg.Proxy,
|
2017-08-13 18:45:00 +02:00
|
|
|
Difficulty: getDifficultyRatio(best.Bits, s.cfg.ChainParams),
|
2014-03-15 20:36:16 +01:00
|
|
|
TestNet: cfg.TestNet3,
|
2015-10-19 22:21:31 +02:00
|
|
|
RelayFee: cfg.minRelayTxFee.ToBTC(),
|
2014-01-29 02:24:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret, nil
|
|
|
|
}
|
|
|
|
|
2015-06-23 21:43:39 +02:00
|
|
|
// handleGetMempoolInfo implements the getmempoolinfo command.
|
|
|
|
func handleGetMempoolInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2016-04-02 23:58:01 +02:00
|
|
|
mempoolTxns := s.cfg.TxMemPool.TxDescs()
|
2015-06-23 21:43:39 +02:00
|
|
|
|
|
|
|
var numBytes int64
|
2015-11-25 20:30:44 +01:00
|
|
|
for _, txD := range mempoolTxns {
|
|
|
|
numBytes += int64(txD.Tx.MsgTx().SerializeSize())
|
2015-06-23 21:43:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ret := &btcjson.GetMempoolInfoResult{
|
2015-11-25 20:30:44 +01:00
|
|
|
Size: int64(len(mempoolTxns)),
|
2015-06-23 21:43:39 +02:00
|
|
|
Bytes: numBytes,
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret, nil
|
|
|
|
}
|
|
|
|
|
2014-05-04 06:51:54 +02:00
|
|
|
// handleGetMiningInfo implements the getmininginfo command. We only return the
|
|
|
|
// fields that are not related to wallet functionality.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetMiningInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2014-05-04 06:51:54 +02:00
|
|
|
// Create a default getnetworkhashps command to use defaults and make
|
|
|
|
// use of the existing getnetworkhashps handler.
|
2015-02-21 05:34:57 +01:00
|
|
|
gnhpsCmd := btcjson.NewGetNetworkHashPSCmd(nil, nil)
|
2014-06-27 18:13:04 +02:00
|
|
|
networkHashesPerSecIface, err := handleGetNetworkHashPS(s, gnhpsCmd,
|
|
|
|
closeChan)
|
2014-05-04 06:51:54 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
networkHashesPerSec, ok := networkHashesPerSecIface.(int64)
|
|
|
|
if !ok {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInternal.Code,
|
2014-05-04 06:51:54 +02:00
|
|
|
Message: "networkHashesPerSec is not an int64",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-02 23:58:01 +02:00
|
|
|
best := s.cfg.Chain.BestSnapshot()
|
2014-07-09 08:32:56 +02:00
|
|
|
result := btcjson.GetMiningInfoResult{
|
2016-10-19 04:23:27 +02:00
|
|
|
Blocks: int64(best.Height),
|
|
|
|
CurrentBlockSize: best.BlockSize,
|
|
|
|
CurrentBlockWeight: best.BlockWeight,
|
|
|
|
CurrentBlockTx: best.NumTxns,
|
2017-08-13 18:45:00 +02:00
|
|
|
Difficulty: getDifficultyRatio(best.Bits, s.cfg.ChainParams),
|
2016-04-02 23:58:01 +02:00
|
|
|
Generate: s.cfg.CPUMiner.IsMining(),
|
|
|
|
GenProcLimit: s.cfg.CPUMiner.NumWorkers(),
|
2020-10-04 03:12:09 +02:00
|
|
|
HashesPerSec: s.cfg.CPUMiner.HashesPerSecond(),
|
|
|
|
NetworkHashPS: float64(networkHashesPerSec),
|
2016-04-02 23:58:01 +02:00
|
|
|
PooledTx: uint64(s.cfg.TxMemPool.Count()),
|
2016-10-19 04:23:27 +02:00
|
|
|
TestNet: cfg.TestNet3,
|
2014-05-04 06:51:54 +02:00
|
|
|
}
|
|
|
|
return &result, nil
|
|
|
|
}
|
|
|
|
|
2014-02-04 08:26:12 +01:00
|
|
|
// handleGetNetTotals implements the getnettotals command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetNetTotals(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2016-04-02 23:58:01 +02:00
|
|
|
totalBytesRecv, totalBytesSent := s.cfg.ConnMgr.NetTotals()
|
2014-02-04 08:26:12 +01:00
|
|
|
reply := &btcjson.GetNetTotalsResult{
|
2014-02-05 18:09:45 +01:00
|
|
|
TotalBytesRecv: totalBytesRecv,
|
|
|
|
TotalBytesSent: totalBytesSent,
|
2014-02-05 18:25:40 +01:00
|
|
|
TimeMillis: time.Now().UTC().UnixNano() / int64(time.Millisecond),
|
2014-02-04 08:26:12 +01:00
|
|
|
}
|
|
|
|
return reply, nil
|
|
|
|
}
|
|
|
|
|
2014-02-07 23:31:14 +01:00
|
|
|
// handleGetNetworkHashPS implements the getnetworkhashps command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetNetworkHashPS(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2015-03-01 03:40:13 +01:00
|
|
|
// Note: All valid error return paths should return an int64.
|
|
|
|
// Literal zeros are inferred as int, and won't coerce to int64
|
|
|
|
// because the return value is an interface{}.
|
|
|
|
|
2014-02-07 23:31:14 +01:00
|
|
|
c := cmd.(*btcjson.GetNetworkHashPSCmd)
|
|
|
|
|
|
|
|
// When the passed height is too high or zero, just return 0 now
|
|
|
|
// since we can't reasonably calculate the number of network hashes
|
|
|
|
// per second from invalid values. When it's negative, use the current
|
|
|
|
// best block height.
|
2016-04-02 23:58:01 +02:00
|
|
|
best := s.cfg.Chain.BestSnapshot()
|
2015-08-08 04:20:49 +02:00
|
|
|
endHeight := int32(-1)
|
2015-02-21 05:34:57 +01:00
|
|
|
if c.Height != nil {
|
2015-08-08 04:20:49 +02:00
|
|
|
endHeight = int32(*c.Height)
|
2015-02-21 05:34:57 +01:00
|
|
|
}
|
2015-08-26 06:03:18 +02:00
|
|
|
if endHeight > best.Height || endHeight == 0 {
|
2015-03-01 03:40:13 +01:00
|
|
|
return int64(0), nil
|
2014-02-07 23:31:14 +01:00
|
|
|
}
|
|
|
|
if endHeight < 0 {
|
2015-08-26 06:03:18 +02:00
|
|
|
endHeight = best.Height
|
2014-02-07 23:31:14 +01:00
|
|
|
}
|
|
|
|
|
2016-08-10 23:02:23 +02:00
|
|
|
// Calculate the number of blocks per retarget interval based on the
|
|
|
|
// chain parameters.
|
2016-04-02 23:58:01 +02:00
|
|
|
blocksPerRetarget := int32(s.cfg.ChainParams.TargetTimespan /
|
|
|
|
s.cfg.ChainParams.TargetTimePerBlock)
|
2016-08-10 23:02:23 +02:00
|
|
|
|
2014-02-07 23:31:14 +01:00
|
|
|
// Calculate the starting block height based on the passed number of
|
|
|
|
// blocks. When the passed value is negative, use the last block the
|
|
|
|
// difficulty changed as the starting height. Also make sure the
|
|
|
|
// starting height is not before the beginning of the chain.
|
2015-08-08 04:20:49 +02:00
|
|
|
numBlocks := int32(120)
|
2015-02-21 05:34:57 +01:00
|
|
|
if c.Blocks != nil {
|
2015-08-08 04:20:49 +02:00
|
|
|
numBlocks = int32(*c.Blocks)
|
2015-02-21 05:34:57 +01:00
|
|
|
}
|
2015-08-08 04:20:49 +02:00
|
|
|
var startHeight int32
|
2015-02-21 05:34:57 +01:00
|
|
|
if numBlocks <= 0 {
|
2016-08-10 23:02:23 +02:00
|
|
|
startHeight = endHeight - ((endHeight % blocksPerRetarget) + 1)
|
2014-02-07 23:31:14 +01:00
|
|
|
} else {
|
2015-02-21 05:34:57 +01:00
|
|
|
startHeight = endHeight - numBlocks
|
2014-02-07 23:31:14 +01:00
|
|
|
}
|
|
|
|
if startHeight < 0 {
|
|
|
|
startHeight = 0
|
|
|
|
}
|
|
|
|
rpcsLog.Debugf("Calculating network hashes per second from %d to %d",
|
|
|
|
startHeight, endHeight)
|
|
|
|
|
|
|
|
// Find the min and max block timestamps as well as calculate the total
|
|
|
|
// amount of work that happened between the start and end blocks.
|
|
|
|
var minTimestamp, maxTimestamp time.Time
|
|
|
|
totalWork := big.NewInt(0)
|
|
|
|
for curHeight := startHeight; curHeight <= endHeight; curHeight++ {
|
2016-04-02 23:58:01 +02:00
|
|
|
hash, err := s.cfg.Chain.BlockHashByHeight(curHeight)
|
2014-02-07 23:31:14 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
context := "Failed to fetch block hash"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
2014-02-07 23:31:14 +01:00
|
|
|
}
|
|
|
|
|
2017-01-31 07:36:15 +01:00
|
|
|
// Fetch the header from chain.
|
2018-08-02 01:50:56 +02:00
|
|
|
header, err := s.cfg.Chain.HeaderByHash(hash)
|
2014-02-07 23:31:14 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
context := "Failed to fetch block header"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
2014-02-07 23:31:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if curHeight == startHeight {
|
|
|
|
minTimestamp = header.Timestamp
|
|
|
|
maxTimestamp = minTimestamp
|
|
|
|
} else {
|
2015-01-30 23:25:42 +01:00
|
|
|
totalWork.Add(totalWork, blockchain.CalcWork(header.Bits))
|
2014-02-07 23:31:14 +01:00
|
|
|
|
|
|
|
if minTimestamp.After(header.Timestamp) {
|
|
|
|
minTimestamp = header.Timestamp
|
|
|
|
}
|
|
|
|
if maxTimestamp.Before(header.Timestamp) {
|
|
|
|
maxTimestamp = header.Timestamp
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate the difference in seconds between the min and max block
|
|
|
|
// timestamps and avoid division by zero in the case where there is no
|
|
|
|
// time difference.
|
|
|
|
timeDiff := int64(maxTimestamp.Sub(minTimestamp) / time.Second)
|
|
|
|
if timeDiff == 0 {
|
2015-03-01 03:40:13 +01:00
|
|
|
return int64(0), nil
|
2014-02-07 23:31:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
hashesPerSec := new(big.Int).Div(totalWork, big.NewInt(timeDiff))
|
|
|
|
return hashesPerSec.Int64(), nil
|
|
|
|
}
|
|
|
|
|
2020-05-31 14:30:50 +02:00
|
|
|
// handleGetNodeAddresses implements the getnodeaddresses command.
|
|
|
|
func handleGetNodeAddresses(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
c := cmd.(*btcjson.GetNodeAddressesCmd)
|
|
|
|
|
|
|
|
count := int32(1)
|
|
|
|
if c.Count != nil {
|
|
|
|
count = *c.Count
|
|
|
|
if count <= 0 {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParameter,
|
|
|
|
Message: "Address count out of range",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes := s.cfg.ConnMgr.NodeAddresses()
|
|
|
|
if n := int32(len(nodes)); n < count {
|
|
|
|
count = n
|
|
|
|
}
|
|
|
|
|
|
|
|
addresses := make([]*btcjson.GetNodeAddressesResult, 0, count)
|
|
|
|
for _, node := range nodes[:count] {
|
|
|
|
address := &btcjson.GetNodeAddressesResult{
|
|
|
|
Time: node.Timestamp.Unix(),
|
|
|
|
Services: uint64(node.Services),
|
|
|
|
Address: node.IP.String(),
|
|
|
|
Port: node.Port,
|
|
|
|
}
|
|
|
|
addresses = append(addresses, address)
|
|
|
|
}
|
|
|
|
|
|
|
|
return addresses, nil
|
|
|
|
}
|
|
|
|
|
2021-10-28 20:02:44 +02:00
|
|
|
// handleGetNetworkInfo implements the getnetworkinfo command.
|
|
|
|
func handleGetNetworkInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
ver := wire.MsgVersion{}
|
|
|
|
_ = ver.AddUserAgent(userAgentName, userAgentVersion, cfg.UserAgentComments...)
|
|
|
|
|
|
|
|
var localAddrs []btcjson.LocalAddressesResult
|
|
|
|
var ipv4Reachable, ipv6Reachable bool
|
|
|
|
for _, addr := range s.cfg.AddrMgr.LocalAddresses() {
|
|
|
|
localAddrs = append(localAddrs, btcjson.LocalAddressesResult{
|
|
|
|
Address: addr.NA.IP.String(),
|
|
|
|
Port: addr.NA.Port,
|
|
|
|
Score: int32(addr.Score),
|
|
|
|
})
|
|
|
|
if addr.NA.IP.To4() != nil {
|
|
|
|
ipv4Reachable = true
|
|
|
|
} else {
|
|
|
|
ipv6Reachable = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
onionProxy := cfg.Proxy
|
|
|
|
if cfg.OnionProxy != "" {
|
|
|
|
onionProxy = cfg.OnionProxy
|
|
|
|
}
|
|
|
|
|
|
|
|
var warnings string
|
|
|
|
unknownRulesWarned := s.cfg.Chain.GetWarnings()
|
|
|
|
if unknownRulesWarned {
|
|
|
|
warnings = "Warning: Unknown new rules activated! "
|
|
|
|
}
|
|
|
|
|
|
|
|
var timeOffset int64
|
|
|
|
if !s.cfg.SyncMgr.IsCurrent() {
|
|
|
|
ss := s.cfg.Chain.BestSnapshot()
|
|
|
|
bestHeader, err := s.cfg.Chain.HeaderByHash(&ss.Hash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
timeOffset = int64(time.Since(bestHeader.Timestamp).Seconds())
|
|
|
|
}
|
|
|
|
|
|
|
|
reply := &btcjson.GetNetworkInfoResult{
|
|
|
|
ProtocolVersion: int32(wire.ProtocolVersion),
|
|
|
|
Version: versionNumeric(),
|
|
|
|
Connections: s.cfg.ConnMgr.ConnectedCount(),
|
|
|
|
IncrementalFee: cfg.MinRelayTxFee,
|
|
|
|
LocalAddresses: localAddrs,
|
|
|
|
LocalRelay: !cfg.BlocksOnly,
|
|
|
|
LocalServices: s.cfg.Services.String(),
|
|
|
|
NetworkActive: true,
|
|
|
|
Networks: []btcjson.NetworksResult{
|
|
|
|
{
|
|
|
|
Name: "ipv4",
|
|
|
|
Reachable: ipv4Reachable,
|
|
|
|
Proxy: cfg.Proxy,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "ipv6",
|
|
|
|
Reachable: ipv6Reachable,
|
|
|
|
Proxy: cfg.Proxy,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "onion",
|
|
|
|
|
|
|
|
ProxyRandomizeCredentials: cfg.TorIsolation,
|
|
|
|
|
|
|
|
Proxy: onionProxy,
|
|
|
|
Reachable: cfg.Proxy != "" || cfg.OnionProxy != "",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
RelayFee: cfg.MinRelayTxFee,
|
|
|
|
SubVersion: ver.UserAgent,
|
|
|
|
TimeOffset: timeOffset,
|
|
|
|
Warnings: warnings,
|
|
|
|
}
|
|
|
|
return reply, nil
|
|
|
|
}
|
|
|
|
|
2013-10-21 19:45:30 +02:00
|
|
|
// handleGetPeerInfo implements the getpeerinfo command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetPeerInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2016-04-02 23:58:01 +02:00
|
|
|
peers := s.cfg.ConnMgr.ConnectedPeers()
|
2017-08-15 07:03:06 +02:00
|
|
|
syncPeerID := s.cfg.SyncMgr.SyncPeerID()
|
peer: Refactor peer code into its own package.
This commit introduces package peer which contains peer related features
refactored from peer.go.
The following is an overview of the features the package provides:
- Provides a basic concurrent safe bitcoin peer for handling bitcoin
communications via the peer-to-peer protocol
- Full duplex reading and writing of bitcoin protocol messages
- Automatic handling of the initial handshake process including protocol
version negotiation
- Automatic periodic keep-alive pinging and pong responses
- Asynchronous message queueing of outbound messages with optional
channel for notification when the message is actually sent
- Inventory message batching and send trickling with known inventory
detection and avoidance
- Ability to wait for shutdown/disconnect
- Flexible peer configuration
- Caller is responsible for creating outgoing connections and listening
for incoming connections so they have flexibility to establish
connections as they see fit (proxies, etc.)
- User agent name and version
- Bitcoin network
- Service support signalling (full nodes, bloom filters, etc.)
- Maximum supported protocol version
- Ability to register callbacks for handling bitcoin protocol messages
- Proper handling of bloom filter related commands when the caller does
not specify the related flag to signal support
- Disconnects the peer when the protocol version is high enough
- Does not invoke the related callbacks for older protocol versions
- Snapshottable peer statistics such as the total number of bytes read
and written, the remote address, user agent, and negotiated protocol
version
- Helper functions for pushing addresses, getblocks, getheaders, and
reject messages
- These could all be sent manually via the standard message output
function, but the helpers provide additional nice functionality such
as duplicate filtering and address randomization
- Full documentation with example usage
- Test coverage
In addition to the addition of the new package, btcd has been refactored
to make use of the new package by extending the basic peer it provides to
work with the blockmanager and server to act as a full node. The
following is a broad overview of the changes to integrate the package:
- The server is responsible for all connection management including
persistent peers and banning
- Callbacks for all messages that are required to implement a full node
are registered
- Logic necessary to serve data and behave as a full node is now in the
callback registered with the peer
Finally, the following peer-related things have been improved as a part
of this refactor:
- Don't log or send reject message due to peer disconnects
- Remove trace logs that aren't particularly helpful
- Finish an old TODO to switch the queue WaitGroup over to a channel
- Improve various comments and fix some code consistency cases
- Improve a few logging bits
- Implement a most-recently-used nonce tracking for detecting self
connections and generate a unique nonce for each peer
2015-10-02 08:03:20 +02:00
|
|
|
infos := make([]*btcjson.GetPeerInfoResult, 0, len(peers))
|
|
|
|
for _, p := range peers {
|
2016-04-02 23:58:01 +02:00
|
|
|
statsSnap := p.ToPeer().StatsSnapshot()
|
peer: Refactor peer code into its own package.
This commit introduces package peer which contains peer related features
refactored from peer.go.
The following is an overview of the features the package provides:
- Provides a basic concurrent safe bitcoin peer for handling bitcoin
communications via the peer-to-peer protocol
- Full duplex reading and writing of bitcoin protocol messages
- Automatic handling of the initial handshake process including protocol
version negotiation
- Automatic periodic keep-alive pinging and pong responses
- Asynchronous message queueing of outbound messages with optional
channel for notification when the message is actually sent
- Inventory message batching and send trickling with known inventory
detection and avoidance
- Ability to wait for shutdown/disconnect
- Flexible peer configuration
- Caller is responsible for creating outgoing connections and listening
for incoming connections so they have flexibility to establish
connections as they see fit (proxies, etc.)
- User agent name and version
- Bitcoin network
- Service support signalling (full nodes, bloom filters, etc.)
- Maximum supported protocol version
- Ability to register callbacks for handling bitcoin protocol messages
- Proper handling of bloom filter related commands when the caller does
not specify the related flag to signal support
- Disconnects the peer when the protocol version is high enough
- Does not invoke the related callbacks for older protocol versions
- Snapshottable peer statistics such as the total number of bytes read
and written, the remote address, user agent, and negotiated protocol
version
- Helper functions for pushing addresses, getblocks, getheaders, and
reject messages
- These could all be sent manually via the standard message output
function, but the helpers provide additional nice functionality such
as duplicate filtering and address randomization
- Full documentation with example usage
- Test coverage
In addition to the addition of the new package, btcd has been refactored
to make use of the new package by extending the basic peer it provides to
work with the blockmanager and server to act as a full node. The
following is a broad overview of the changes to integrate the package:
- The server is responsible for all connection management including
persistent peers and banning
- Callbacks for all messages that are required to implement a full node
are registered
- Logic necessary to serve data and behave as a full node is now in the
callback registered with the peer
Finally, the following peer-related things have been improved as a part
of this refactor:
- Don't log or send reject message due to peer disconnects
- Remove trace logs that aren't particularly helpful
- Finish an old TODO to switch the queue WaitGroup over to a channel
- Improve various comments and fix some code consistency cases
- Improve a few logging bits
- Implement a most-recently-used nonce tracking for detecting self
connections and generate a unique nonce for each peer
2015-10-02 08:03:20 +02:00
|
|
|
info := &btcjson.GetPeerInfoResult{
|
|
|
|
ID: statsSnap.ID,
|
|
|
|
Addr: statsSnap.Addr,
|
2016-04-02 23:58:01 +02:00
|
|
|
AddrLocal: p.ToPeer().LocalAddr().String(),
|
peer: Refactor peer code into its own package.
This commit introduces package peer which contains peer related features
refactored from peer.go.
The following is an overview of the features the package provides:
- Provides a basic concurrent safe bitcoin peer for handling bitcoin
communications via the peer-to-peer protocol
- Full duplex reading and writing of bitcoin protocol messages
- Automatic handling of the initial handshake process including protocol
version negotiation
- Automatic periodic keep-alive pinging and pong responses
- Asynchronous message queueing of outbound messages with optional
channel for notification when the message is actually sent
- Inventory message batching and send trickling with known inventory
detection and avoidance
- Ability to wait for shutdown/disconnect
- Flexible peer configuration
- Caller is responsible for creating outgoing connections and listening
for incoming connections so they have flexibility to establish
connections as they see fit (proxies, etc.)
- User agent name and version
- Bitcoin network
- Service support signalling (full nodes, bloom filters, etc.)
- Maximum supported protocol version
- Ability to register callbacks for handling bitcoin protocol messages
- Proper handling of bloom filter related commands when the caller does
not specify the related flag to signal support
- Disconnects the peer when the protocol version is high enough
- Does not invoke the related callbacks for older protocol versions
- Snapshottable peer statistics such as the total number of bytes read
and written, the remote address, user agent, and negotiated protocol
version
- Helper functions for pushing addresses, getblocks, getheaders, and
reject messages
- These could all be sent manually via the standard message output
function, but the helpers provide additional nice functionality such
as duplicate filtering and address randomization
- Full documentation with example usage
- Test coverage
In addition to the addition of the new package, btcd has been refactored
to make use of the new package by extending the basic peer it provides to
work with the blockmanager and server to act as a full node. The
following is a broad overview of the changes to integrate the package:
- The server is responsible for all connection management including
persistent peers and banning
- Callbacks for all messages that are required to implement a full node
are registered
- Logic necessary to serve data and behave as a full node is now in the
callback registered with the peer
Finally, the following peer-related things have been improved as a part
of this refactor:
- Don't log or send reject message due to peer disconnects
- Remove trace logs that aren't particularly helpful
- Finish an old TODO to switch the queue WaitGroup over to a channel
- Improve various comments and fix some code consistency cases
- Improve a few logging bits
- Implement a most-recently-used nonce tracking for detecting self
connections and generate a unique nonce for each peer
2015-10-02 08:03:20 +02:00
|
|
|
Services: fmt.Sprintf("%08d", uint64(statsSnap.Services)),
|
2016-04-02 23:58:01 +02:00
|
|
|
RelayTxes: !p.IsTxRelayDisabled(),
|
peer: Refactor peer code into its own package.
This commit introduces package peer which contains peer related features
refactored from peer.go.
The following is an overview of the features the package provides:
- Provides a basic concurrent safe bitcoin peer for handling bitcoin
communications via the peer-to-peer protocol
- Full duplex reading and writing of bitcoin protocol messages
- Automatic handling of the initial handshake process including protocol
version negotiation
- Automatic periodic keep-alive pinging and pong responses
- Asynchronous message queueing of outbound messages with optional
channel for notification when the message is actually sent
- Inventory message batching and send trickling with known inventory
detection and avoidance
- Ability to wait for shutdown/disconnect
- Flexible peer configuration
- Caller is responsible for creating outgoing connections and listening
for incoming connections so they have flexibility to establish
connections as they see fit (proxies, etc.)
- User agent name and version
- Bitcoin network
- Service support signalling (full nodes, bloom filters, etc.)
- Maximum supported protocol version
- Ability to register callbacks for handling bitcoin protocol messages
- Proper handling of bloom filter related commands when the caller does
not specify the related flag to signal support
- Disconnects the peer when the protocol version is high enough
- Does not invoke the related callbacks for older protocol versions
- Snapshottable peer statistics such as the total number of bytes read
and written, the remote address, user agent, and negotiated protocol
version
- Helper functions for pushing addresses, getblocks, getheaders, and
reject messages
- These could all be sent manually via the standard message output
function, but the helpers provide additional nice functionality such
as duplicate filtering and address randomization
- Full documentation with example usage
- Test coverage
In addition to the addition of the new package, btcd has been refactored
to make use of the new package by extending the basic peer it provides to
work with the blockmanager and server to act as a full node. The
following is a broad overview of the changes to integrate the package:
- The server is responsible for all connection management including
persistent peers and banning
- Callbacks for all messages that are required to implement a full node
are registered
- Logic necessary to serve data and behave as a full node is now in the
callback registered with the peer
Finally, the following peer-related things have been improved as a part
of this refactor:
- Don't log or send reject message due to peer disconnects
- Remove trace logs that aren't particularly helpful
- Finish an old TODO to switch the queue WaitGroup over to a channel
- Improve various comments and fix some code consistency cases
- Improve a few logging bits
- Implement a most-recently-used nonce tracking for detecting self
connections and generate a unique nonce for each peer
2015-10-02 08:03:20 +02:00
|
|
|
LastSend: statsSnap.LastSend.Unix(),
|
|
|
|
LastRecv: statsSnap.LastRecv.Unix(),
|
|
|
|
BytesSent: statsSnap.BytesSent,
|
|
|
|
BytesRecv: statsSnap.BytesRecv,
|
|
|
|
ConnTime: statsSnap.ConnTime.Unix(),
|
|
|
|
PingTime: float64(statsSnap.LastPingMicros),
|
|
|
|
TimeOffset: statsSnap.TimeOffset,
|
|
|
|
Version: statsSnap.Version,
|
|
|
|
SubVer: statsSnap.UserAgent,
|
|
|
|
Inbound: statsSnap.Inbound,
|
|
|
|
StartingHeight: statsSnap.StartingHeight,
|
|
|
|
CurrentHeight: statsSnap.LastBlock,
|
2016-04-02 23:58:01 +02:00
|
|
|
BanScore: int32(p.BanScore()),
|
|
|
|
FeeFilter: p.FeeFilter(),
|
2017-08-15 07:03:06 +02:00
|
|
|
SyncNode: statsSnap.ID == syncPeerID,
|
peer: Refactor peer code into its own package.
This commit introduces package peer which contains peer related features
refactored from peer.go.
The following is an overview of the features the package provides:
- Provides a basic concurrent safe bitcoin peer for handling bitcoin
communications via the peer-to-peer protocol
- Full duplex reading and writing of bitcoin protocol messages
- Automatic handling of the initial handshake process including protocol
version negotiation
- Automatic periodic keep-alive pinging and pong responses
- Asynchronous message queueing of outbound messages with optional
channel for notification when the message is actually sent
- Inventory message batching and send trickling with known inventory
detection and avoidance
- Ability to wait for shutdown/disconnect
- Flexible peer configuration
- Caller is responsible for creating outgoing connections and listening
for incoming connections so they have flexibility to establish
connections as they see fit (proxies, etc.)
- User agent name and version
- Bitcoin network
- Service support signalling (full nodes, bloom filters, etc.)
- Maximum supported protocol version
- Ability to register callbacks for handling bitcoin protocol messages
- Proper handling of bloom filter related commands when the caller does
not specify the related flag to signal support
- Disconnects the peer when the protocol version is high enough
- Does not invoke the related callbacks for older protocol versions
- Snapshottable peer statistics such as the total number of bytes read
and written, the remote address, user agent, and negotiated protocol
version
- Helper functions for pushing addresses, getblocks, getheaders, and
reject messages
- These could all be sent manually via the standard message output
function, but the helpers provide additional nice functionality such
as duplicate filtering and address randomization
- Full documentation with example usage
- Test coverage
In addition to the addition of the new package, btcd has been refactored
to make use of the new package by extending the basic peer it provides to
work with the blockmanager and server to act as a full node. The
following is a broad overview of the changes to integrate the package:
- The server is responsible for all connection management including
persistent peers and banning
- Callbacks for all messages that are required to implement a full node
are registered
- Logic necessary to serve data and behave as a full node is now in the
callback registered with the peer
Finally, the following peer-related things have been improved as a part
of this refactor:
- Don't log or send reject message due to peer disconnects
- Remove trace logs that aren't particularly helpful
- Finish an old TODO to switch the queue WaitGroup over to a channel
- Improve various comments and fix some code consistency cases
- Improve a few logging bits
- Implement a most-recently-used nonce tracking for detecting self
connections and generate a unique nonce for each peer
2015-10-02 08:03:20 +02:00
|
|
|
}
|
2016-04-02 23:58:01 +02:00
|
|
|
if p.ToPeer().LastPingNonce() != 0 {
|
2016-11-03 05:02:04 +01:00
|
|
|
wait := float64(time.Since(statsSnap.LastPingTime).Nanoseconds())
|
peer: Refactor peer code into its own package.
This commit introduces package peer which contains peer related features
refactored from peer.go.
The following is an overview of the features the package provides:
- Provides a basic concurrent safe bitcoin peer for handling bitcoin
communications via the peer-to-peer protocol
- Full duplex reading and writing of bitcoin protocol messages
- Automatic handling of the initial handshake process including protocol
version negotiation
- Automatic periodic keep-alive pinging and pong responses
- Asynchronous message queueing of outbound messages with optional
channel for notification when the message is actually sent
- Inventory message batching and send trickling with known inventory
detection and avoidance
- Ability to wait for shutdown/disconnect
- Flexible peer configuration
- Caller is responsible for creating outgoing connections and listening
for incoming connections so they have flexibility to establish
connections as they see fit (proxies, etc.)
- User agent name and version
- Bitcoin network
- Service support signalling (full nodes, bloom filters, etc.)
- Maximum supported protocol version
- Ability to register callbacks for handling bitcoin protocol messages
- Proper handling of bloom filter related commands when the caller does
not specify the related flag to signal support
- Disconnects the peer when the protocol version is high enough
- Does not invoke the related callbacks for older protocol versions
- Snapshottable peer statistics such as the total number of bytes read
and written, the remote address, user agent, and negotiated protocol
version
- Helper functions for pushing addresses, getblocks, getheaders, and
reject messages
- These could all be sent manually via the standard message output
function, but the helpers provide additional nice functionality such
as duplicate filtering and address randomization
- Full documentation with example usage
- Test coverage
In addition to the addition of the new package, btcd has been refactored
to make use of the new package by extending the basic peer it provides to
work with the blockmanager and server to act as a full node. The
following is a broad overview of the changes to integrate the package:
- The server is responsible for all connection management including
persistent peers and banning
- Callbacks for all messages that are required to implement a full node
are registered
- Logic necessary to serve data and behave as a full node is now in the
callback registered with the peer
Finally, the following peer-related things have been improved as a part
of this refactor:
- Don't log or send reject message due to peer disconnects
- Remove trace logs that aren't particularly helpful
- Finish an old TODO to switch the queue WaitGroup over to a channel
- Improve various comments and fix some code consistency cases
- Improve a few logging bits
- Implement a most-recently-used nonce tracking for detecting self
connections and generate a unique nonce for each peer
2015-10-02 08:03:20 +02:00
|
|
|
// We actually want microseconds.
|
|
|
|
info.PingWait = wait / 1000
|
|
|
|
}
|
|
|
|
infos = append(infos, info)
|
|
|
|
}
|
|
|
|
return infos, nil
|
2013-10-21 19:45:30 +02:00
|
|
|
}
|
|
|
|
|
2013-10-29 16:42:34 +01:00
|
|
|
// handleGetRawMempool implements the getrawmempool command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetRawMempool(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2013-12-11 18:32:16 +01:00
|
|
|
c := cmd.(*btcjson.GetRawMempoolCmd)
|
2016-04-02 23:58:01 +02:00
|
|
|
mp := s.cfg.TxMemPool
|
2013-12-11 18:32:16 +01:00
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
if c.Verbose != nil && *c.Verbose {
|
2016-08-19 18:08:37 +02:00
|
|
|
return mp.RawMempoolVerbose(), nil
|
2013-12-11 18:32:16 +01:00
|
|
|
}
|
2013-12-25 19:28:54 +01:00
|
|
|
|
|
|
|
// The response is simply an array of the transaction hashes if the
|
|
|
|
// verbose flag is not set.
|
2016-08-19 18:08:37 +02:00
|
|
|
descs := mp.TxDescs()
|
2013-12-11 18:32:16 +01:00
|
|
|
hashStrings := make([]string, len(descs))
|
2013-12-17 15:02:35 +01:00
|
|
|
for i := range hashStrings {
|
2016-08-08 21:04:33 +02:00
|
|
|
hashStrings[i] = descs[i].Tx.Hash().String()
|
2013-10-29 16:42:34 +01:00
|
|
|
}
|
2013-12-11 18:32:16 +01:00
|
|
|
|
2013-10-29 16:42:34 +01:00
|
|
|
return hashStrings, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleGetRawTransaction implements the getrawtransaction command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2013-10-29 16:42:34 +01:00
|
|
|
c := cmd.(*btcjson.GetRawTransactionCmd)
|
2013-11-12 20:03:23 +01:00
|
|
|
|
2016-08-08 21:04:33 +02:00
|
|
|
// Convert the provided transaction hash hex to a Hash.
|
|
|
|
txHash, err := chainhash.NewHashFromStr(c.Txid)
|
2013-12-27 06:22:41 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, rpcDecodeHexError(c.Txid)
|
2013-12-27 06:22:41 +01:00
|
|
|
}
|
|
|
|
|
2015-08-26 06:03:18 +02:00
|
|
|
verbose := false
|
|
|
|
if c.Verbose != nil {
|
|
|
|
verbose = *c.Verbose != 0
|
|
|
|
}
|
|
|
|
|
2016-02-19 05:51:18 +01:00
|
|
|
// Try to fetch the transaction from the memory pool and if that fails,
|
|
|
|
// try the block database.
|
|
|
|
var mtx *wire.MsgTx
|
2016-08-08 21:04:33 +02:00
|
|
|
var blkHash *chainhash.Hash
|
2016-02-19 05:51:18 +01:00
|
|
|
var blkHeight int32
|
2016-04-02 23:58:01 +02:00
|
|
|
tx, err := s.cfg.TxMemPool.FetchTransaction(txHash)
|
2013-12-08 20:57:14 +01:00
|
|
|
if err != nil {
|
2016-04-02 23:58:01 +02:00
|
|
|
if s.cfg.TxIndex == nil {
|
2016-02-19 05:51:18 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCNoTxInfo,
|
|
|
|
Message: "The transaction index must be " +
|
|
|
|
"enabled to query the blockchain " +
|
|
|
|
"(specify --txindex)",
|
|
|
|
}
|
2014-07-03 17:30:46 +02:00
|
|
|
}
|
2013-10-29 01:43:09 +01:00
|
|
|
|
2016-02-19 05:51:18 +01:00
|
|
|
// Look up the location of the transaction.
|
2016-04-02 23:58:01 +02:00
|
|
|
blockRegion, err := s.cfg.TxIndex.TxBlockRegion(txHash)
|
2013-12-27 06:22:41 +01:00
|
|
|
if err != nil {
|
2016-02-19 05:51:18 +01:00
|
|
|
context := "Failed to retrieve transaction location"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
|
|
|
}
|
|
|
|
if blockRegion == nil {
|
|
|
|
return nil, rpcNoTxInfoError(txHash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load the raw transaction bytes from the database.
|
|
|
|
var txBytes []byte
|
2016-04-02 23:58:01 +02:00
|
|
|
err = s.cfg.DB.View(func(dbTx database.Tx) error {
|
2016-02-19 05:51:18 +01:00
|
|
|
var err error
|
|
|
|
txBytes, err = dbTx.FetchBlockRegion(blockRegion)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, rpcNoTxInfoError(txHash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// When the verbose flag isn't set, simply return the serialized
|
|
|
|
// transaction as a hex-encoded string. This is done here to
|
|
|
|
// avoid deserializing it only to reserialize it again later.
|
|
|
|
if !verbose {
|
|
|
|
return hex.EncodeToString(txBytes), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Grab the block height.
|
|
|
|
blkHash = blockRegion.Hash
|
2016-04-02 23:58:01 +02:00
|
|
|
blkHeight, err = s.cfg.Chain.BlockHeightByHash(blkHash)
|
2016-02-19 05:51:18 +01:00
|
|
|
if err != nil {
|
|
|
|
context := "Failed to retrieve block height"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deserialize the transaction
|
|
|
|
var msgTx wire.MsgTx
|
|
|
|
err = msgTx.Deserialize(bytes.NewReader(txBytes))
|
|
|
|
if err != nil {
|
|
|
|
context := "Failed to deserialize transaction"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
|
|
|
}
|
|
|
|
mtx = &msgTx
|
|
|
|
} else {
|
|
|
|
// When the verbose flag isn't set, simply return the
|
|
|
|
// network-serialized transaction as a hex-encoded string.
|
|
|
|
if !verbose {
|
|
|
|
// Note that this is intentionally not directly
|
|
|
|
// returning because the first return value is a
|
|
|
|
// string and it would result in returning an empty
|
|
|
|
// string to the client instead of nothing (nil) in the
|
|
|
|
// case of an error.
|
|
|
|
mtxHex, err := messageToHex(tx.MsgTx())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return mtxHex, nil
|
2013-12-27 06:22:41 +01:00
|
|
|
}
|
2016-02-19 05:51:18 +01:00
|
|
|
|
|
|
|
mtx = tx.MsgTx()
|
2013-12-27 06:22:41 +01:00
|
|
|
}
|
|
|
|
|
2015-08-26 06:03:18 +02:00
|
|
|
// The verbose flag is set, so generate the JSON object and return it.
|
2016-02-19 05:51:18 +01:00
|
|
|
var blkHeader *wire.BlockHeader
|
|
|
|
var blkHashStr string
|
|
|
|
var chainHeight int32
|
|
|
|
if blkHash != nil {
|
2017-01-31 07:36:15 +01:00
|
|
|
// Fetch the header from chain.
|
2018-08-02 01:50:56 +02:00
|
|
|
header, err := s.cfg.Chain.HeaderByHash(blkHash)
|
2016-02-19 05:51:18 +01:00
|
|
|
if err != nil {
|
|
|
|
context := "Failed to fetch block header"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
|
|
|
}
|
|
|
|
|
|
|
|
blkHeader = &header
|
|
|
|
blkHashStr = blkHash.String()
|
2016-04-02 23:58:01 +02:00
|
|
|
chainHeight = s.cfg.Chain.BestSnapshot().Height
|
2016-02-19 05:51:18 +01:00
|
|
|
}
|
|
|
|
|
2016-04-02 23:58:01 +02:00
|
|
|
rawTxn, err := createTxRawResult(s.cfg.ChainParams, mtx, txHash.String(),
|
|
|
|
blkHeader, blkHashStr, blkHeight, chainHeight)
|
2013-12-08 20:57:14 +01:00
|
|
|
if err != nil {
|
2014-12-22 04:48:40 +01:00
|
|
|
return nil, err
|
2013-12-08 20:57:14 +01:00
|
|
|
}
|
|
|
|
return *rawTxn, nil
|
|
|
|
}
|
|
|
|
|
2014-07-05 02:42:45 +02:00
|
|
|
// handleGetTxOut handles gettxout commands.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2014-07-05 02:42:45 +02:00
|
|
|
c := cmd.(*btcjson.GetTxOutCmd)
|
|
|
|
|
2016-08-08 21:04:33 +02:00
|
|
|
// Convert the provided transaction hash hex to a Hash.
|
|
|
|
txHash, err := chainhash.NewHashFromStr(c.Txid)
|
2014-07-05 02:42:45 +02:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, rpcDecodeHexError(c.Txid)
|
2014-07-05 02:42:45 +02:00
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// If requested and the tx is available in the mempool try to fetch it
|
|
|
|
// from there, otherwise attempt to fetch from the block database.
|
2016-08-08 21:04:33 +02:00
|
|
|
var bestBlockHash string
|
2015-08-08 04:20:49 +02:00
|
|
|
var confirmations int32
|
2015-08-26 06:03:18 +02:00
|
|
|
var value int64
|
|
|
|
var pkScript []byte
|
|
|
|
var isCoinbase bool
|
2015-02-21 05:34:57 +01:00
|
|
|
includeMempool := true
|
|
|
|
if c.IncludeMempool != nil {
|
|
|
|
includeMempool = *c.IncludeMempool
|
|
|
|
}
|
|
|
|
// TODO: This is racy. It should attempt to fetch it directly and check
|
|
|
|
// the error.
|
2016-04-02 23:58:01 +02:00
|
|
|
if includeMempool && s.cfg.TxMemPool.HaveTransaction(txHash) {
|
|
|
|
tx, err := s.cfg.TxMemPool.FetchTransaction(txHash)
|
2014-07-05 02:42:45 +02:00
|
|
|
if err != nil {
|
2016-02-19 05:51:18 +01:00
|
|
|
return nil, rpcNoTxInfoError(txHash)
|
2014-07-05 02:42:45 +02:00
|
|
|
}
|
2015-08-26 06:03:18 +02:00
|
|
|
|
|
|
|
mtx := tx.MsgTx()
|
|
|
|
if c.Vout > uint32(len(mtx.TxOut)-1) {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
2015-08-26 06:03:18 +02:00
|
|
|
Code: btcjson.ErrRPCInvalidTxVout,
|
2016-10-28 06:21:58 +02:00
|
|
|
Message: "Output index number (vout) does not " +
|
2015-08-26 06:03:18 +02:00
|
|
|
"exist for transaction.",
|
2015-02-21 05:34:57 +01:00
|
|
|
}
|
2014-07-05 02:42:45 +02:00
|
|
|
}
|
|
|
|
|
2015-08-26 06:03:18 +02:00
|
|
|
txOut := mtx.TxOut[c.Vout]
|
|
|
|
if txOut == nil {
|
|
|
|
errStr := fmt.Sprintf("Output index: %d for txid: %s "+
|
|
|
|
"does not exist", c.Vout, txHash)
|
|
|
|
return nil, internalRPCError(errStr, "")
|
|
|
|
}
|
2014-07-05 02:42:45 +02:00
|
|
|
|
2016-04-02 23:58:01 +02:00
|
|
|
best := s.cfg.Chain.BestSnapshot()
|
2016-08-08 21:04:33 +02:00
|
|
|
bestBlockHash = best.Hash.String()
|
2015-08-26 06:03:18 +02:00
|
|
|
confirmations = 0
|
|
|
|
value = txOut.Value
|
|
|
|
pkScript = txOut.PkScript
|
|
|
|
isCoinbase = blockchain.IsCoinBaseTx(mtx)
|
|
|
|
} else {
|
multi: Rework utxoset/view to use outpoints.
This modifies the utxoset in the database and related UtxoViewpoint to
store and work with unspent transaction outputs on a per-output basis
instead of at a transaction level. This was inspired by similar recent
changes in Bitcoin Core.
The primary motivation is to simplify the code, pave the way for a
utxo cache, and generally focus on optimizing runtime performance.
The tradeoff is that this approach does somewhat increase the size of
the serialized utxoset since it means that the transaction hash is
duplicated for each output as a part of the key and some additional
details such as whether the containing transaction is a coinbase and the
block height it was a part of are duplicated in each output.
However, in practice, the size difference isn't all that large, disk
space is relatively cheap, certainly cheaper than memory, and it is much
more important to provide more efficient runtime operation since that is
the ultimate purpose of the daemon.
While performing this conversion, it also simplifies the code to remove
the transaction version information from the utxoset as well as the
spend journal. The logic for only serializing it under certain
circumstances is complicated and it isn't actually used anywhere aside
from the gettxout RPC where it also isn't used by anything important
either. Consequently, this also removes the version field of the
gettxout RPC result.
The utxos in the database are automatically migrated to the new format
with this commit and it is possible to interrupt and resume the
migration process.
Finally, it also updates the tests for the new format and adds a new
function to the tests to convert the old test data to the new format for
convenience. The data has already been converted and updated in the
commit.
An overview of the changes are as follows:
- Remove transaction version from both spent and unspent output entries
- Update utxo serialization format to exclude the version
- Modify the spend journal serialization format
- The old version field is now reserved and always stores zero and
ignores it when reading
- This allows old entries to be used by new code without having to
migrate the entire spend journal
- Remove version field from gettxout RPC result
- Convert UtxoEntry to represent a specific utxo instead of a
transaction with all remaining utxos
- Optimize for memory usage with an eye towards a utxo cache
- Combine details such as whether the txout was contained in a
coinbase, is spent, and is modified into a single packed field of
bit flags
- Align entry fields to eliminate extra padding since ultimately
there will be a lot of these in memory
- Introduce a free list for serializing an outpoint to the database
key format to significantly reduce pressure on the GC
- Update all related functions that previously dealt with transaction
hashes to accept outpoints instead
- Update all callers accordingly
- Only add individually requested outputs from the mempool when
constructing a mempool view
- Modify the spend journal to always store the block height and coinbase
information with every spent txout
- Introduce code to handle fetching the missing information from
another utxo from the same transaction in the event an old style
entry is encountered
- Make use of a database cursor with seek to do this much more
efficiently than testing every possible output
- Always decompress data loaded from the database now that a utxo entry
only consists of a specific output
- Introduce upgrade code to migrate the utxo set to the new format
- Store versions of the utxoset and spend journal buckets
- Allow migration process to be interrupted and resumed
- Update all tests to expect the correct encodings, remove tests that no
longer apply, and add new ones for the new expected behavior
- Convert old tests for the legacy utxo format deserialization code to
test the new function that is used during upgrade
- Update the utxostore test data and add function that was used to
convert it
- Introduce a few new functions on UtxoViewpoint
- AddTxOut for adding an individual txout versus all of them
- addTxOut to handle the common code between the new AddTxOut and
existing AddTxOuts
- RemoveEntry for removing an individual txout
- fetchEntryByHash for fetching any remaining utxo for a given
transaction hash
2017-09-03 09:59:15 +02:00
|
|
|
out := wire.OutPoint{Hash: *txHash, Index: c.Vout}
|
|
|
|
entry, err := s.cfg.Chain.FetchUtxoEntry(out)
|
2014-07-05 02:42:45 +02:00
|
|
|
if err != nil {
|
2016-02-19 05:51:18 +01:00
|
|
|
return nil, rpcNoTxInfoError(txHash)
|
2014-07-05 02:42:45 +02:00
|
|
|
}
|
|
|
|
|
2015-08-26 06:03:18 +02:00
|
|
|
// To match the behavior of the reference client, return nil
|
|
|
|
// (JSON null) if the transaction output is spent by another
|
|
|
|
// transaction already in the main chain. Mined transactions
|
|
|
|
// that are spent by a mempool transaction are not affected by
|
|
|
|
// this.
|
multi: Rework utxoset/view to use outpoints.
This modifies the utxoset in the database and related UtxoViewpoint to
store and work with unspent transaction outputs on a per-output basis
instead of at a transaction level. This was inspired by similar recent
changes in Bitcoin Core.
The primary motivation is to simplify the code, pave the way for a
utxo cache, and generally focus on optimizing runtime performance.
The tradeoff is that this approach does somewhat increase the size of
the serialized utxoset since it means that the transaction hash is
duplicated for each output as a part of the key and some additional
details such as whether the containing transaction is a coinbase and the
block height it was a part of are duplicated in each output.
However, in practice, the size difference isn't all that large, disk
space is relatively cheap, certainly cheaper than memory, and it is much
more important to provide more efficient runtime operation since that is
the ultimate purpose of the daemon.
While performing this conversion, it also simplifies the code to remove
the transaction version information from the utxoset as well as the
spend journal. The logic for only serializing it under certain
circumstances is complicated and it isn't actually used anywhere aside
from the gettxout RPC where it also isn't used by anything important
either. Consequently, this also removes the version field of the
gettxout RPC result.
The utxos in the database are automatically migrated to the new format
with this commit and it is possible to interrupt and resume the
migration process.
Finally, it also updates the tests for the new format and adds a new
function to the tests to convert the old test data to the new format for
convenience. The data has already been converted and updated in the
commit.
An overview of the changes are as follows:
- Remove transaction version from both spent and unspent output entries
- Update utxo serialization format to exclude the version
- Modify the spend journal serialization format
- The old version field is now reserved and always stores zero and
ignores it when reading
- This allows old entries to be used by new code without having to
migrate the entire spend journal
- Remove version field from gettxout RPC result
- Convert UtxoEntry to represent a specific utxo instead of a
transaction with all remaining utxos
- Optimize for memory usage with an eye towards a utxo cache
- Combine details such as whether the txout was contained in a
coinbase, is spent, and is modified into a single packed field of
bit flags
- Align entry fields to eliminate extra padding since ultimately
there will be a lot of these in memory
- Introduce a free list for serializing an outpoint to the database
key format to significantly reduce pressure on the GC
- Update all related functions that previously dealt with transaction
hashes to accept outpoints instead
- Update all callers accordingly
- Only add individually requested outputs from the mempool when
constructing a mempool view
- Modify the spend journal to always store the block height and coinbase
information with every spent txout
- Introduce code to handle fetching the missing information from
another utxo from the same transaction in the event an old style
entry is encountered
- Make use of a database cursor with seek to do this much more
efficiently than testing every possible output
- Always decompress data loaded from the database now that a utxo entry
only consists of a specific output
- Introduce upgrade code to migrate the utxo set to the new format
- Store versions of the utxoset and spend journal buckets
- Allow migration process to be interrupted and resumed
- Update all tests to expect the correct encodings, remove tests that no
longer apply, and add new ones for the new expected behavior
- Convert old tests for the legacy utxo format deserialization code to
test the new function that is used during upgrade
- Update the utxostore test data and add function that was used to
convert it
- Introduce a few new functions on UtxoViewpoint
- AddTxOut for adding an individual txout versus all of them
- addTxOut to handle the common code between the new AddTxOut and
existing AddTxOuts
- RemoveEntry for removing an individual txout
- fetchEntryByHash for fetching any remaining utxo for a given
transaction hash
2017-09-03 09:59:15 +02:00
|
|
|
if entry == nil || entry.IsSpent() {
|
2015-08-26 06:03:18 +02:00
|
|
|
return nil, nil
|
2015-02-21 05:34:57 +01:00
|
|
|
}
|
2014-07-05 02:42:45 +02:00
|
|
|
|
2016-04-02 23:58:01 +02:00
|
|
|
best := s.cfg.Chain.BestSnapshot()
|
2016-08-08 21:04:33 +02:00
|
|
|
bestBlockHash = best.Hash.String()
|
2015-08-26 06:03:18 +02:00
|
|
|
confirmations = 1 + best.Height - entry.BlockHeight()
|
multi: Rework utxoset/view to use outpoints.
This modifies the utxoset in the database and related UtxoViewpoint to
store and work with unspent transaction outputs on a per-output basis
instead of at a transaction level. This was inspired by similar recent
changes in Bitcoin Core.
The primary motivation is to simplify the code, pave the way for a
utxo cache, and generally focus on optimizing runtime performance.
The tradeoff is that this approach does somewhat increase the size of
the serialized utxoset since it means that the transaction hash is
duplicated for each output as a part of the key and some additional
details such as whether the containing transaction is a coinbase and the
block height it was a part of are duplicated in each output.
However, in practice, the size difference isn't all that large, disk
space is relatively cheap, certainly cheaper than memory, and it is much
more important to provide more efficient runtime operation since that is
the ultimate purpose of the daemon.
While performing this conversion, it also simplifies the code to remove
the transaction version information from the utxoset as well as the
spend journal. The logic for only serializing it under certain
circumstances is complicated and it isn't actually used anywhere aside
from the gettxout RPC where it also isn't used by anything important
either. Consequently, this also removes the version field of the
gettxout RPC result.
The utxos in the database are automatically migrated to the new format
with this commit and it is possible to interrupt and resume the
migration process.
Finally, it also updates the tests for the new format and adds a new
function to the tests to convert the old test data to the new format for
convenience. The data has already been converted and updated in the
commit.
An overview of the changes are as follows:
- Remove transaction version from both spent and unspent output entries
- Update utxo serialization format to exclude the version
- Modify the spend journal serialization format
- The old version field is now reserved and always stores zero and
ignores it when reading
- This allows old entries to be used by new code without having to
migrate the entire spend journal
- Remove version field from gettxout RPC result
- Convert UtxoEntry to represent a specific utxo instead of a
transaction with all remaining utxos
- Optimize for memory usage with an eye towards a utxo cache
- Combine details such as whether the txout was contained in a
coinbase, is spent, and is modified into a single packed field of
bit flags
- Align entry fields to eliminate extra padding since ultimately
there will be a lot of these in memory
- Introduce a free list for serializing an outpoint to the database
key format to significantly reduce pressure on the GC
- Update all related functions that previously dealt with transaction
hashes to accept outpoints instead
- Update all callers accordingly
- Only add individually requested outputs from the mempool when
constructing a mempool view
- Modify the spend journal to always store the block height and coinbase
information with every spent txout
- Introduce code to handle fetching the missing information from
another utxo from the same transaction in the event an old style
entry is encountered
- Make use of a database cursor with seek to do this much more
efficiently than testing every possible output
- Always decompress data loaded from the database now that a utxo entry
only consists of a specific output
- Introduce upgrade code to migrate the utxo set to the new format
- Store versions of the utxoset and spend journal buckets
- Allow migration process to be interrupted and resumed
- Update all tests to expect the correct encodings, remove tests that no
longer apply, and add new ones for the new expected behavior
- Convert old tests for the legacy utxo format deserialization code to
test the new function that is used during upgrade
- Update the utxostore test data and add function that was used to
convert it
- Introduce a few new functions on UtxoViewpoint
- AddTxOut for adding an individual txout versus all of them
- addTxOut to handle the common code between the new AddTxOut and
existing AddTxOuts
- RemoveEntry for removing an individual txout
- fetchEntryByHash for fetching any remaining utxo for a given
transaction hash
2017-09-03 09:59:15 +02:00
|
|
|
value = entry.Amount()
|
|
|
|
pkScript = entry.PkScript()
|
2015-08-26 06:03:18 +02:00
|
|
|
isCoinbase = entry.IsCoinBase()
|
2014-07-24 00:08:25 +02:00
|
|
|
}
|
|
|
|
|
2014-07-05 02:42:45 +02:00
|
|
|
// Disassemble script into single line printable format.
|
|
|
|
// The disassembled string will contain [error] inline if the script
|
|
|
|
// doesn't fully parse, so ignore the error here.
|
2015-08-26 06:03:18 +02:00
|
|
|
disbuf, _ := txscript.DisasmString(pkScript)
|
2014-07-05 02:42:45 +02:00
|
|
|
|
2021-07-23 18:13:31 +02:00
|
|
|
script := txscript.StripClaimScriptPrefix(pkScript)
|
|
|
|
|
2014-07-05 02:42:45 +02:00
|
|
|
// Get further info about the script.
|
|
|
|
// Ignore the error here since an error means the script couldn't parse
|
|
|
|
// and there is no additional information about it anyways.
|
2021-07-23 18:13:31 +02:00
|
|
|
scriptClass, addrs, reqSigs, _ := txscript.ExtractPkScriptAddrs(script,
|
2016-04-02 23:58:01 +02:00
|
|
|
s.cfg.ChainParams)
|
2014-07-05 02:42:45 +02:00
|
|
|
addresses := make([]string, len(addrs))
|
|
|
|
for i, addr := range addrs {
|
|
|
|
addresses[i] = addr.EncodeAddress()
|
|
|
|
}
|
|
|
|
|
|
|
|
txOutReply := &btcjson.GetTxOutResult{
|
2016-08-08 21:04:33 +02:00
|
|
|
BestBlock: bestBlockHash,
|
2015-08-08 04:20:49 +02:00
|
|
|
Confirmations: int64(confirmations),
|
2015-08-26 06:03:18 +02:00
|
|
|
Value: btcutil.Amount(value).ToBTC(),
|
2014-07-05 02:42:45 +02:00
|
|
|
ScriptPubKey: btcjson.ScriptPubKeyResult{
|
|
|
|
Asm: disbuf,
|
2015-08-26 06:03:18 +02:00
|
|
|
Hex: hex.EncodeToString(pkScript),
|
2014-07-05 02:42:45 +02:00
|
|
|
ReqSigs: int32(reqSigs),
|
|
|
|
Addresses: addresses,
|
|
|
|
},
|
2015-08-26 06:03:18 +02:00
|
|
|
Coinbase: isCoinbase,
|
2014-07-05 02:42:45 +02:00
|
|
|
}
|
2021-07-23 18:13:31 +02:00
|
|
|
|
|
|
|
if len(script) < len(pkScript) {
|
|
|
|
txOutReply.ScriptPubKey.IsClaim = pkScript[0] == txscript.OP_CLAIMNAME || pkScript[0] == txscript.OP_UPDATECLAIM
|
|
|
|
txOutReply.ScriptPubKey.IsSupport = pkScript[0] == txscript.OP_SUPPORTCLAIM
|
|
|
|
txOutReply.ScriptPubKey.SubType = scriptClass.String()
|
|
|
|
txOutReply.ScriptPubKey.Type = txscript.ScriptClass.String(0)
|
|
|
|
} else {
|
|
|
|
txOutReply.ScriptPubKey.Type = scriptClass.String()
|
|
|
|
}
|
|
|
|
|
2014-07-05 02:42:45 +02:00
|
|
|
return txOutReply, nil
|
|
|
|
}
|
|
|
|
|
2021-10-28 22:29:03 +02:00
|
|
|
// handleInvalidateBlock implements the invalidateblock command
|
|
|
|
func handleInvalidateBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
c := cmd.(*btcjson.InvalidateBlockCmd)
|
|
|
|
|
|
|
|
hash, err := chainhash.NewHashFromStr(c.BlockHash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, s.cfg.Chain.InvalidateBlock(hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleReconsiderBlock implements the reconsiderblock command
|
|
|
|
func handleReconsiderBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
c := cmd.(*btcjson.ReconsiderBlockCmd)
|
|
|
|
|
|
|
|
hash, err := chainhash.NewHashFromStr(c.BlockHash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, s.cfg.Chain.ReconsiderBlock(hash)
|
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// handleHelp implements the help command.
|
|
|
|
func handleHelp(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
c := cmd.(*btcjson.HelpCmd)
|
2014-01-16 15:45:17 +01:00
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// Provide a usage overview of all commands when no specific command
|
|
|
|
// was specified.
|
|
|
|
var command string
|
|
|
|
if c.Command != nil {
|
|
|
|
command = *c.Command
|
2014-01-16 15:45:17 +01:00
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
if command == "" {
|
|
|
|
usage, err := s.helpCacher.rpcUsage(false)
|
|
|
|
if err != nil {
|
|
|
|
context := "Failed to generate RPC usage"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
2014-01-16 15:45:17 +01:00
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
return usage, nil
|
2014-01-16 15:45:17 +01:00
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// Check that the command asked for is supported and implemented. Only
|
|
|
|
// search the main list of handlers since help should not be provided
|
|
|
|
// for commands that are unimplemented or related to wallet
|
2014-01-16 15:45:17 +01:00
|
|
|
// functionality.
|
2015-02-21 05:34:57 +01:00
|
|
|
if _, ok := rpcHandlers[command]; !ok {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidParameter,
|
|
|
|
Message: "Unknown command: " + command,
|
|
|
|
}
|
2014-01-16 15:45:17 +01:00
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// Get the help for the command.
|
|
|
|
help, err := s.helpCacher.rpcMethodHelp(command)
|
|
|
|
if err != nil {
|
|
|
|
context := "Failed to generate help"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
|
|
|
}
|
|
|
|
return help, nil
|
2014-01-16 15:45:17 +01:00
|
|
|
}
|
|
|
|
|
2014-01-21 16:03:15 +01:00
|
|
|
// handlePing implements the ping command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handlePing(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2014-01-21 16:03:15 +01:00
|
|
|
// Ask server to ping \o_
|
2015-02-05 22:16:39 +01:00
|
|
|
nonce, err := wire.RandomUint64()
|
2014-01-21 16:03:15 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, internalRPCError("Not sending ping - failed to "+
|
|
|
|
"generate nonce: "+err.Error(), "")
|
2014-01-21 16:03:15 +01:00
|
|
|
}
|
2016-04-02 23:58:01 +02:00
|
|
|
s.cfg.ConnMgr.BroadcastMessage(wire.NewMsgPing(nonce))
|
2014-01-21 16:03:15 +01:00
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-02-19 05:51:18 +01:00
|
|
|
// retrievedTx represents a transaction that was either loaded from the
|
|
|
|
// transaction memory pool or from the database. When a transaction is loaded
|
|
|
|
// from the database, it is loaded with the raw serialized bytes while the
|
|
|
|
// mempool has the fully deserialized structure. This structure therefore will
|
|
|
|
// have one of the two fields set depending on where is was retrieved from.
|
|
|
|
// This is mainly done for efficiency to avoid extra serialization steps when
|
|
|
|
// possible.
|
|
|
|
type retrievedTx struct {
|
|
|
|
txBytes []byte
|
2016-08-08 21:04:33 +02:00
|
|
|
blkHash *chainhash.Hash // Only set when transaction is in a block.
|
2016-02-19 05:51:18 +01:00
|
|
|
tx *btcutil.Tx
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetchInputTxos fetches the outpoints from all transactions referenced by the
|
|
|
|
// inputs to the passed transaction by checking the transaction mempool first
|
|
|
|
// then the transaction index for those already mined into blocks.
|
|
|
|
func fetchInputTxos(s *rpcServer, tx *wire.MsgTx) (map[wire.OutPoint]wire.TxOut, error) {
|
2016-04-02 23:58:01 +02:00
|
|
|
mp := s.cfg.TxMemPool
|
2016-02-19 05:51:18 +01:00
|
|
|
originOutputs := make(map[wire.OutPoint]wire.TxOut)
|
|
|
|
for txInIndex, txIn := range tx.TxIn {
|
|
|
|
// Attempt to fetch and use the referenced transaction from the
|
|
|
|
// memory pool.
|
|
|
|
origin := &txIn.PreviousOutPoint
|
|
|
|
originTx, err := mp.FetchTransaction(&origin.Hash)
|
|
|
|
if err == nil {
|
|
|
|
txOuts := originTx.MsgTx().TxOut
|
|
|
|
if origin.Index >= uint32(len(txOuts)) {
|
|
|
|
errStr := fmt.Sprintf("unable to find output "+
|
|
|
|
"%v referenced from transaction %s:%d",
|
2016-08-08 21:04:33 +02:00
|
|
|
origin, tx.TxHash(), txInIndex)
|
2016-02-19 05:51:18 +01:00
|
|
|
return nil, internalRPCError(errStr, "")
|
|
|
|
}
|
|
|
|
|
|
|
|
originOutputs[*origin] = *txOuts[origin.Index]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Look up the location of the transaction.
|
2016-04-02 23:58:01 +02:00
|
|
|
blockRegion, err := s.cfg.TxIndex.TxBlockRegion(&origin.Hash)
|
2016-02-19 05:51:18 +01:00
|
|
|
if err != nil {
|
|
|
|
context := "Failed to retrieve transaction location"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
|
|
|
}
|
|
|
|
if blockRegion == nil {
|
|
|
|
return nil, rpcNoTxInfoError(&origin.Hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load the raw transaction bytes from the database.
|
|
|
|
var txBytes []byte
|
2016-04-02 23:58:01 +02:00
|
|
|
err = s.cfg.DB.View(func(dbTx database.Tx) error {
|
2016-02-19 05:51:18 +01:00
|
|
|
var err error
|
|
|
|
txBytes, err = dbTx.FetchBlockRegion(blockRegion)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, rpcNoTxInfoError(&origin.Hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deserialize the transaction
|
|
|
|
var msgTx wire.MsgTx
|
|
|
|
err = msgTx.Deserialize(bytes.NewReader(txBytes))
|
|
|
|
if err != nil {
|
|
|
|
context := "Failed to deserialize transaction"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the referenced output to the map.
|
|
|
|
if origin.Index >= uint32(len(msgTx.TxOut)) {
|
|
|
|
errStr := fmt.Sprintf("unable to find output %v "+
|
|
|
|
"referenced from transaction %s:%d", origin,
|
2016-08-08 21:04:33 +02:00
|
|
|
tx.TxHash(), txInIndex)
|
2016-02-19 05:51:18 +01:00
|
|
|
return nil, internalRPCError(errStr, "")
|
|
|
|
}
|
|
|
|
originOutputs[*origin] = *msgTx.TxOut[origin.Index]
|
|
|
|
}
|
|
|
|
|
|
|
|
return originOutputs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createVinListPrevOut returns a slice of JSON objects for the inputs of the
|
|
|
|
// passed transaction.
|
|
|
|
func createVinListPrevOut(s *rpcServer, mtx *wire.MsgTx, chainParams *chaincfg.Params, vinExtra bool, filterAddrMap map[string]struct{}) ([]btcjson.VinPrevOut, error) {
|
|
|
|
// Coinbase transactions only have a single txin by definition.
|
|
|
|
if blockchain.IsCoinBaseTx(mtx) {
|
|
|
|
// Only include the transaction if the filter map is empty
|
|
|
|
// because a coinbase input has no addresses and so would never
|
|
|
|
// match a non-empty filter.
|
|
|
|
if len(filterAddrMap) != 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
txIn := mtx.TxIn[0]
|
|
|
|
vinList := make([]btcjson.VinPrevOut, 1)
|
|
|
|
vinList[0].Coinbase = hex.EncodeToString(txIn.SignatureScript)
|
|
|
|
vinList[0].Sequence = txIn.Sequence
|
|
|
|
return vinList, nil
|
|
|
|
}
|
|
|
|
|
2016-10-28 06:21:58 +02:00
|
|
|
// Use a dynamically sized list to accommodate the address filter.
|
2016-02-19 05:51:18 +01:00
|
|
|
vinList := make([]btcjson.VinPrevOut, 0, len(mtx.TxIn))
|
|
|
|
|
|
|
|
// Lookup all of the referenced transaction outputs needed to populate
|
|
|
|
// the previous output information if requested.
|
|
|
|
var originOutputs map[wire.OutPoint]wire.TxOut
|
|
|
|
if vinExtra || len(filterAddrMap) > 0 {
|
|
|
|
var err error
|
|
|
|
originOutputs, err = fetchInputTxos(s, mtx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, txIn := range mtx.TxIn {
|
|
|
|
// The disassembled string will contain [error] inline
|
|
|
|
// if the script doesn't fully parse, so ignore the
|
|
|
|
// error here.
|
|
|
|
disbuf, _ := txscript.DisasmString(txIn.SignatureScript)
|
|
|
|
|
|
|
|
// Create the basic input entry without the additional optional
|
|
|
|
// previous output details which will be added later if
|
|
|
|
// requested and available.
|
|
|
|
prevOut := &txIn.PreviousOutPoint
|
|
|
|
vinEntry := btcjson.VinPrevOut{
|
|
|
|
Txid: prevOut.Hash.String(),
|
|
|
|
Vout: prevOut.Index,
|
|
|
|
Sequence: txIn.Sequence,
|
|
|
|
ScriptSig: &btcjson.ScriptSig{
|
|
|
|
Asm: disbuf,
|
|
|
|
Hex: hex.EncodeToString(txIn.SignatureScript),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-10-19 04:23:27 +02:00
|
|
|
if len(txIn.Witness) != 0 {
|
2017-08-30 03:40:19 +02:00
|
|
|
vinEntry.Witness = witnessToHex(txIn.Witness)
|
2016-10-19 04:23:27 +02:00
|
|
|
}
|
|
|
|
|
2016-02-19 05:51:18 +01:00
|
|
|
// Add the entry to the list now if it already passed the filter
|
|
|
|
// since the previous output might not be available.
|
|
|
|
passesFilter := len(filterAddrMap) == 0
|
|
|
|
if passesFilter {
|
|
|
|
vinList = append(vinList, vinEntry)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only populate previous output information if requested and
|
|
|
|
// available.
|
|
|
|
if len(originOutputs) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
originTxOut, ok := originOutputs[*prevOut]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ignore the error here since an error means the script
|
|
|
|
// couldn't parse and there is no additional information about
|
|
|
|
// it anyways.
|
2021-07-23 18:13:31 +02:00
|
|
|
class, addrs, _, _ := txscript.ExtractPkScriptAddrs(
|
2016-02-19 05:51:18 +01:00
|
|
|
originTxOut.PkScript, chainParams)
|
|
|
|
|
|
|
|
// Encode the addresses while checking if the address passes the
|
|
|
|
// filter when needed.
|
|
|
|
encodedAddrs := make([]string, len(addrs))
|
|
|
|
for j, addr := range addrs {
|
|
|
|
encodedAddr := addr.EncodeAddress()
|
|
|
|
encodedAddrs[j] = encodedAddr
|
|
|
|
|
|
|
|
// No need to check the map again if the filter already
|
|
|
|
// passes.
|
|
|
|
if passesFilter {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, exists := filterAddrMap[encodedAddr]; exists {
|
|
|
|
passesFilter = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ignore the entry if it doesn't pass the filter.
|
|
|
|
if !passesFilter {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add entry to the list if it wasn't already done above.
|
|
|
|
if len(filterAddrMap) != 0 {
|
|
|
|
vinList = append(vinList, vinEntry)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the entry with previous output information if
|
|
|
|
// requested.
|
|
|
|
if vinExtra {
|
|
|
|
vinListEntry := &vinList[len(vinList)-1]
|
|
|
|
vinListEntry.PrevOut = &btcjson.PrevOut{
|
|
|
|
Addresses: encodedAddrs,
|
|
|
|
Value: btcutil.Amount(originTxOut.Value).ToBTC(),
|
2021-07-23 18:13:31 +02:00
|
|
|
IsClaim: class == txscript.NonStandardTy &&
|
|
|
|
(originTxOut.PkScript[0] == txscript.OP_CLAIMNAME || originTxOut.PkScript[0] == txscript.OP_UPDATECLAIM),
|
|
|
|
IsSupport: class == txscript.NonStandardTy && originTxOut.PkScript[0] == txscript.OP_SUPPORTCLAIM,
|
2016-02-19 05:51:18 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return vinList, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetchMempoolTxnsForAddress queries the address index for all unconfirmed
|
|
|
|
// transactions that involve the provided address. The results will be limited
|
|
|
|
// by the number to skip and the number requested.
|
|
|
|
func fetchMempoolTxnsForAddress(s *rpcServer, addr btcutil.Address, numToSkip, numRequested uint32) ([]*btcutil.Tx, uint32) {
|
|
|
|
// There are no entries to return when there are less available than the
|
|
|
|
// number being skipped.
|
2016-04-02 23:58:01 +02:00
|
|
|
mpTxns := s.cfg.AddrIndex.UnconfirmedTxnsForAddress(addr)
|
2016-02-19 05:51:18 +01:00
|
|
|
numAvailable := uint32(len(mpTxns))
|
|
|
|
if numToSkip > numAvailable {
|
|
|
|
return nil, numAvailable
|
|
|
|
}
|
|
|
|
|
|
|
|
// Filter the available entries based on the number to skip and number
|
|
|
|
// requested.
|
|
|
|
rangeEnd := numToSkip + numRequested
|
|
|
|
if rangeEnd > numAvailable {
|
|
|
|
rangeEnd = numAvailable
|
|
|
|
}
|
|
|
|
return mpTxns[numToSkip:rangeEnd], numToSkip
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleSearchRawTransactions implements the searchrawtransactions command.
|
|
|
|
func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
// Respond with an error if the address index is not enabled.
|
2016-04-02 23:58:01 +02:00
|
|
|
addrIndex := s.cfg.AddrIndex
|
2016-02-19 05:51:18 +01:00
|
|
|
if addrIndex == nil {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCMisc,
|
|
|
|
Message: "Address index must be enabled (--addrindex)",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Override the flag for including extra previous output information in
|
|
|
|
// each input if needed.
|
|
|
|
c := cmd.(*btcjson.SearchRawTransactionsCmd)
|
|
|
|
vinExtra := false
|
|
|
|
if c.VinExtra != nil {
|
|
|
|
vinExtra = *c.VinExtra != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Including the extra previous output information requires the
|
|
|
|
// transaction index. Currently the address index relies on the
|
|
|
|
// transaction index, so this check is redundant, but it's better to be
|
|
|
|
// safe in case the address index is ever changed to not rely on it.
|
2016-04-02 23:58:01 +02:00
|
|
|
if vinExtra && s.cfg.TxIndex == nil {
|
2016-02-19 05:51:18 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCMisc,
|
|
|
|
Message: "Transaction index must be enabled (--txindex)",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to decode the supplied address.
|
2017-08-13 18:45:00 +02:00
|
|
|
params := s.cfg.ChainParams
|
|
|
|
addr, err := btcutil.DecodeAddress(c.Address, params)
|
2016-02-19 05:51:18 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidAddressOrKey,
|
|
|
|
Message: "Invalid address or key: " + err.Error(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Override the default number of requested entries if needed. Also,
|
|
|
|
// just return now if the number of requested entries is zero to avoid
|
|
|
|
// extra work.
|
|
|
|
numRequested := 100
|
|
|
|
if c.Count != nil {
|
|
|
|
numRequested = *c.Count
|
|
|
|
if numRequested < 0 {
|
|
|
|
numRequested = 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if numRequested == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Override the default number of entries to skip if needed.
|
|
|
|
var numToSkip int
|
|
|
|
if c.Skip != nil {
|
|
|
|
numToSkip = *c.Skip
|
|
|
|
if numToSkip < 0 {
|
|
|
|
numToSkip = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Override the reverse flag if needed.
|
|
|
|
var reverse bool
|
|
|
|
if c.Reverse != nil {
|
|
|
|
reverse = *c.Reverse
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add transactions from mempool first if client asked for reverse
|
|
|
|
// order. Otherwise, they will be added last (as needed depending on
|
|
|
|
// the requested counts).
|
|
|
|
//
|
|
|
|
// NOTE: This code doesn't sort by dependency. This might be something
|
|
|
|
// to do in the future for the client's convenience, or leave it to the
|
|
|
|
// client.
|
|
|
|
numSkipped := uint32(0)
|
|
|
|
addressTxns := make([]retrievedTx, 0, numRequested)
|
|
|
|
if reverse {
|
|
|
|
// Transactions in the mempool are not in a block header yet,
|
|
|
|
// so the block header field in the retieved transaction struct
|
|
|
|
// is left nil.
|
|
|
|
mpTxns, mpSkipped := fetchMempoolTxnsForAddress(s, addr,
|
|
|
|
uint32(numToSkip), uint32(numRequested))
|
|
|
|
numSkipped += mpSkipped
|
|
|
|
for _, tx := range mpTxns {
|
|
|
|
addressTxns = append(addressTxns, retrievedTx{tx: tx})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch transactions from the database in the desired order if more are
|
|
|
|
// needed.
|
|
|
|
if len(addressTxns) < numRequested {
|
2016-04-02 23:58:01 +02:00
|
|
|
err = s.cfg.DB.View(func(dbTx database.Tx) error {
|
2016-02-19 05:51:18 +01:00
|
|
|
regions, dbSkipped, err := addrIndex.TxRegionsForAddress(
|
|
|
|
dbTx, addr, uint32(numToSkip)-numSkipped,
|
|
|
|
uint32(numRequested-len(addressTxns)), reverse)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load the raw transaction bytes from the database.
|
|
|
|
serializedTxns, err := dbTx.FetchBlockRegions(regions)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the transaction and the hash of the block it is
|
|
|
|
// contained in to the list. Note that the transaction
|
|
|
|
// is left serialized here since the caller might have
|
|
|
|
// requested non-verbose output and hence there would be
|
|
|
|
// no point in deserializing it just to reserialize it
|
|
|
|
// later.
|
|
|
|
for i, serializedTx := range serializedTxns {
|
|
|
|
addressTxns = append(addressTxns, retrievedTx{
|
|
|
|
txBytes: serializedTx,
|
|
|
|
blkHash: regions[i].Hash,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
numSkipped += dbSkipped
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
context := "Failed to load address index entries"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add transactions from mempool last if client did not request reverse
|
|
|
|
// order and the number of results is still under the number requested.
|
|
|
|
if !reverse && len(addressTxns) < numRequested {
|
|
|
|
// Transactions in the mempool are not in a block header yet,
|
|
|
|
// so the block header field in the retieved transaction struct
|
|
|
|
// is left nil.
|
|
|
|
mpTxns, mpSkipped := fetchMempoolTxnsForAddress(s, addr,
|
|
|
|
uint32(numToSkip)-numSkipped, uint32(numRequested-
|
|
|
|
len(addressTxns)))
|
|
|
|
numSkipped += mpSkipped
|
|
|
|
for _, tx := range mpTxns {
|
|
|
|
addressTxns = append(addressTxns, retrievedTx{tx: tx})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Address has never been used if neither source yielded any results.
|
|
|
|
if len(addressTxns) == 0 {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCNoTxInfo,
|
|
|
|
Message: "No information available about address",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Serialize all of the transactions to hex.
|
|
|
|
hexTxns := make([]string, len(addressTxns))
|
|
|
|
for i := range addressTxns {
|
|
|
|
// Simply encode the raw bytes to hex when the retrieved
|
|
|
|
// transaction is already in serialized form.
|
|
|
|
rtx := &addressTxns[i]
|
|
|
|
if rtx.txBytes != nil {
|
|
|
|
hexTxns[i] = hex.EncodeToString(rtx.txBytes)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Serialize the transaction first and convert to hex when the
|
|
|
|
// retrieved transaction is the deserialized structure.
|
|
|
|
hexTxns[i], err = messageToHex(rtx.tx.MsgTx())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// When not in verbose mode, simply return a list of serialized txns.
|
|
|
|
if c.Verbose != nil && *c.Verbose == 0 {
|
|
|
|
return hexTxns, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Normalize the provided filter addresses (if any) to ensure there are
|
|
|
|
// no duplicates.
|
|
|
|
filterAddrMap := make(map[string]struct{})
|
|
|
|
if c.FilterAddrs != nil && len(*c.FilterAddrs) > 0 {
|
|
|
|
for _, addr := range *c.FilterAddrs {
|
|
|
|
filterAddrMap[addr] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The verbose flag is set, so generate the JSON object and return it.
|
2016-04-02 23:58:01 +02:00
|
|
|
best := s.cfg.Chain.BestSnapshot()
|
2016-02-19 05:51:18 +01:00
|
|
|
srtList := make([]btcjson.SearchRawTransactionsResult, len(addressTxns))
|
|
|
|
for i := range addressTxns {
|
|
|
|
// The deserialized transaction is needed, so deserialize the
|
|
|
|
// retrieved transaction if it's in serialized form (which will
|
|
|
|
// be the case when it was lookup up from the database).
|
|
|
|
// Otherwise, use the existing deserialized transaction.
|
|
|
|
rtx := &addressTxns[i]
|
|
|
|
var mtx *wire.MsgTx
|
|
|
|
if rtx.tx == nil {
|
|
|
|
// Deserialize the transaction.
|
|
|
|
mtx = new(wire.MsgTx)
|
|
|
|
err := mtx.Deserialize(bytes.NewReader(rtx.txBytes))
|
|
|
|
if err != nil {
|
|
|
|
context := "Failed to deserialize transaction"
|
|
|
|
return nil, internalRPCError(err.Error(),
|
|
|
|
context)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mtx = rtx.tx.MsgTx()
|
|
|
|
}
|
|
|
|
|
|
|
|
result := &srtList[i]
|
|
|
|
result.Hex = hexTxns[i]
|
2016-08-08 21:04:33 +02:00
|
|
|
result.Txid = mtx.TxHash().String()
|
2017-08-13 18:45:00 +02:00
|
|
|
result.Vin, err = createVinListPrevOut(s, mtx, params, vinExtra,
|
|
|
|
filterAddrMap)
|
2016-02-19 05:51:18 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-08-13 18:45:00 +02:00
|
|
|
result.Vout = createVoutList(mtx, params, filterAddrMap)
|
2016-02-19 05:51:18 +01:00
|
|
|
result.Version = mtx.Version
|
|
|
|
result.LockTime = mtx.LockTime
|
|
|
|
|
|
|
|
// Transactions grabbed from the mempool aren't yet in a block,
|
|
|
|
// so conditionally fetch block details here. This will be
|
|
|
|
// reflected in the final JSON output (mempool won't have
|
|
|
|
// confirmations or block information).
|
|
|
|
var blkHeader *wire.BlockHeader
|
|
|
|
var blkHashStr string
|
|
|
|
var blkHeight int32
|
|
|
|
if blkHash := rtx.blkHash; blkHash != nil {
|
2017-01-31 07:36:15 +01:00
|
|
|
// Fetch the header from chain.
|
2018-08-02 01:50:56 +02:00
|
|
|
header, err := s.cfg.Chain.HeaderByHash(blkHash)
|
2016-02-19 05:51:18 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCBlockNotFound,
|
|
|
|
Message: "Block not found",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the block height from chain.
|
2016-04-02 23:58:01 +02:00
|
|
|
height, err := s.cfg.Chain.BlockHeightByHash(blkHash)
|
2016-02-19 05:51:18 +01:00
|
|
|
if err != nil {
|
|
|
|
context := "Failed to obtain block height"
|
|
|
|
return nil, internalRPCError(err.Error(), context)
|
|
|
|
}
|
|
|
|
|
|
|
|
blkHeader = &header
|
|
|
|
blkHashStr = blkHash.String()
|
|
|
|
blkHeight = height
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the block information to the result if there is any.
|
|
|
|
if blkHeader != nil {
|
|
|
|
// This is not a typo, they are identical in Bitcoin
|
|
|
|
// Core as well.
|
|
|
|
result.Time = blkHeader.Timestamp.Unix()
|
|
|
|
result.Blocktime = blkHeader.Timestamp.Unix()
|
|
|
|
result.BlockHash = blkHashStr
|
|
|
|
result.Confirmations = uint64(1 + best.Height - blkHeight)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return srtList, nil
|
|
|
|
}
|
|
|
|
|
2013-10-29 16:42:34 +01:00
|
|
|
// handleSendRawTransaction implements the sendrawtransaction command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleSendRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2013-10-29 16:42:34 +01:00
|
|
|
c := cmd.(*btcjson.SendRawTransactionCmd)
|
|
|
|
// Deserialize and send off to tx relay
|
2014-07-02 03:09:22 +02:00
|
|
|
hexStr := c.HexTx
|
|
|
|
if len(hexStr)%2 != 0 {
|
|
|
|
hexStr = "0" + hexStr
|
|
|
|
}
|
|
|
|
serializedTx, err := hex.DecodeString(hexStr)
|
2013-10-29 16:42:34 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, rpcDecodeHexError(hexStr)
|
2013-10-29 16:42:34 +01:00
|
|
|
}
|
2016-10-27 04:09:19 +02:00
|
|
|
var msgTx wire.MsgTx
|
|
|
|
err = msgTx.Deserialize(bytes.NewReader(serializedTx))
|
2013-10-29 16:42:34 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCDeserialization,
|
|
|
|
Message: "TX decode failed: " + err.Error(),
|
2013-08-14 22:55:31 +02:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
|
2016-10-28 19:48:54 +02:00
|
|
|
// Use 0 for the tag to represent local node.
|
2016-10-27 04:09:19 +02:00
|
|
|
tx := btcutil.NewTx(&msgTx)
|
2016-04-02 23:58:01 +02:00
|
|
|
acceptedTxs, err := s.cfg.TxMemPool.ProcessTransaction(tx, false, false, 0)
|
2015-02-20 03:44:48 +01:00
|
|
|
if err != nil {
|
|
|
|
// When the error is a rule error, it means the transaction was
|
|
|
|
// simply rejected as opposed to something actually going wrong,
|
2019-05-12 02:48:24 +02:00
|
|
|
// so log it as such. Otherwise, something really did go wrong,
|
|
|
|
// so log it as an actual error and return.
|
|
|
|
ruleErr, ok := err.(mempool.RuleError)
|
|
|
|
if !ok {
|
2015-02-20 03:44:48 +01:00
|
|
|
rpcsLog.Errorf("Failed to process transaction %v: %v",
|
2016-08-08 21:04:33 +02:00
|
|
|
tx.Hash(), err)
|
2019-05-12 02:48:24 +02:00
|
|
|
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCTxError,
|
|
|
|
Message: "TX rejected: " + err.Error(),
|
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
2019-05-12 02:48:24 +02:00
|
|
|
|
|
|
|
rpcsLog.Debugf("Rejected transaction %v: %v", tx.Hash(), err)
|
|
|
|
|
|
|
|
// We'll then map the rule error to the appropriate RPC error,
|
|
|
|
// matching bitcoind's behavior.
|
|
|
|
code := btcjson.ErrRPCTxError
|
|
|
|
if txRuleErr, ok := ruleErr.Err.(mempool.TxRuleError); ok {
|
|
|
|
errDesc := txRuleErr.Description
|
|
|
|
switch {
|
|
|
|
case strings.Contains(
|
|
|
|
strings.ToLower(errDesc), "orphan transaction",
|
|
|
|
):
|
|
|
|
code = btcjson.ErrRPCTxError
|
|
|
|
|
|
|
|
case strings.Contains(
|
|
|
|
strings.ToLower(errDesc), "transaction already exists",
|
|
|
|
):
|
|
|
|
code = btcjson.ErrRPCTxAlreadyInChain
|
|
|
|
|
|
|
|
default:
|
|
|
|
code = btcjson.ErrRPCTxRejected
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
2019-05-12 02:48:24 +02:00
|
|
|
Code: code,
|
2015-02-21 05:34:57 +01:00
|
|
|
Message: "TX rejected: " + err.Error(),
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-16 17:08:37 +01:00
|
|
|
// When the transaction was accepted it should be the first item in the
|
|
|
|
// returned array of accepted transactions. The only way this will not
|
|
|
|
// be true is if the API for ProcessTransaction changes and this code is
|
|
|
|
// not properly updated, but ensure the condition holds as a safeguard.
|
|
|
|
//
|
|
|
|
// Also, since an error is being returned to the caller, ensure the
|
|
|
|
// transaction is removed from the memory pool.
|
|
|
|
if len(acceptedTxs) == 0 || !acceptedTxs[0].Tx.Hash().IsEqual(tx.Hash()) {
|
2016-04-02 23:58:01 +02:00
|
|
|
s.cfg.TxMemPool.RemoveTransaction(tx, true)
|
2017-01-16 17:08:37 +01:00
|
|
|
|
|
|
|
errStr := fmt.Sprintf("transaction %v is not in accepted list",
|
|
|
|
tx.Hash())
|
|
|
|
return nil, internalRPCError(errStr, "")
|
|
|
|
}
|
|
|
|
|
2016-04-02 23:58:01 +02:00
|
|
|
// Generate and relay inventory vectors for all newly accepted
|
|
|
|
// transactions into the memory pool due to the original being
|
|
|
|
// accepted.
|
|
|
|
s.cfg.ConnMgr.RelayTransactions(acceptedTxs)
|
|
|
|
|
|
|
|
// Notify both websocket and getblocktemplate long poll clients of all
|
|
|
|
// newly accepted transactions.
|
|
|
|
s.NotifyNewTransactions(acceptedTxs)
|
2016-04-14 19:58:09 +02:00
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// Keep track of all the sendrawtransaction request txns so that they
|
|
|
|
// can be rebroadcast if they don't make their way into a block.
|
2017-01-16 17:08:37 +01:00
|
|
|
txD := acceptedTxs[0]
|
|
|
|
iv := wire.NewInvVect(wire.InvTypeTx, txD.Tx.Hash())
|
2016-04-02 23:58:01 +02:00
|
|
|
s.cfg.ConnMgr.AddRebroadcastInventory(iv, txD)
|
2015-02-20 03:44:48 +01:00
|
|
|
|
2016-08-08 21:04:33 +02:00
|
|
|
return tx.Hash().String(), nil
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// handleSetGenerate implements the setgenerate command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleSetGenerate(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2015-02-20 03:44:48 +01:00
|
|
|
c := cmd.(*btcjson.SetGenerateCmd)
|
|
|
|
|
|
|
|
// Disable generation regardless of the provided generate flag if the
|
|
|
|
// maximum number of threads (goroutines for our purposes) is 0.
|
|
|
|
// Otherwise enable or disable it depending on the provided flag.
|
|
|
|
generate := c.Generate
|
2015-02-21 05:34:57 +01:00
|
|
|
genProcLimit := -1
|
|
|
|
if c.GenProcLimit != nil {
|
|
|
|
genProcLimit = *c.GenProcLimit
|
|
|
|
}
|
|
|
|
if genProcLimit == 0 {
|
2015-02-20 03:44:48 +01:00
|
|
|
generate = false
|
|
|
|
}
|
|
|
|
|
|
|
|
if !generate {
|
2016-04-02 23:58:01 +02:00
|
|
|
s.cfg.CPUMiner.Stop()
|
2015-02-20 03:44:48 +01:00
|
|
|
} else {
|
|
|
|
// Respond with an error if there are no addresses to pay the
|
|
|
|
// created blocks to.
|
|
|
|
if len(cfg.miningAddrs) == 0 {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInternal.Code,
|
2015-02-20 03:44:48 +01:00
|
|
|
Message: "No payment addresses specified " +
|
|
|
|
"via --miningaddr",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// It's safe to call start even if it's already started.
|
2016-04-02 23:58:01 +02:00
|
|
|
s.cfg.CPUMiner.SetNumWorkers(int32(genProcLimit))
|
|
|
|
s.cfg.CPUMiner.Start()
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2020-05-23 20:54:49 +02:00
|
|
|
// Text used to signify that a signed message follows and to prevent
|
|
|
|
// inadvertently signing a transaction.
|
|
|
|
const messageSignatureHeader = "Bitcoin Signed Message:\n"
|
|
|
|
|
|
|
|
// handleSignMessageWithPrivKey implements the signmessagewithprivkey command.
|
|
|
|
func handleSignMessageWithPrivKey(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
c := cmd.(*btcjson.SignMessageWithPrivKeyCmd)
|
|
|
|
|
|
|
|
wif, err := btcutil.DecodeWIF(c.PrivKey)
|
|
|
|
if err != nil {
|
|
|
|
message := "Invalid private key"
|
|
|
|
switch err {
|
|
|
|
case btcutil.ErrMalformedPrivateKey:
|
|
|
|
message = "Malformed private key"
|
|
|
|
case btcutil.ErrChecksumMismatch:
|
|
|
|
message = "Private key checksum mismatch"
|
|
|
|
}
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidAddressOrKey,
|
|
|
|
Message: message,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !wif.IsForNet(s.cfg.ChainParams) {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidAddressOrKey,
|
|
|
|
Message: "Private key for wrong network",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
wire.WriteVarString(&buf, 0, messageSignatureHeader)
|
|
|
|
wire.WriteVarString(&buf, 0, c.Message)
|
|
|
|
messageHash := chainhash.DoubleHashB(buf.Bytes())
|
|
|
|
|
|
|
|
sig, err := btcec.SignCompact(btcec.S256(), wif.PrivKey,
|
|
|
|
messageHash, wif.CompressPubKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidAddressOrKey,
|
|
|
|
Message: "Sign failed",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return base64.StdEncoding.EncodeToString(sig), nil
|
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// handleStop implements the stop command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleStop(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2016-08-11 20:39:23 +02:00
|
|
|
select {
|
|
|
|
case s.requestProcessShutdown <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
2021-10-15 07:45:32 +02:00
|
|
|
return "lbcd stopping.", nil
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// handleSubmitBlock implements the submitblock command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleSubmitBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2015-02-20 03:44:48 +01:00
|
|
|
c := cmd.(*btcjson.SubmitBlockCmd)
|
|
|
|
|
|
|
|
// Deserialize the submitted block.
|
|
|
|
hexStr := c.HexBlock
|
|
|
|
if len(hexStr)%2 != 0 {
|
|
|
|
hexStr = "0" + c.HexBlock
|
|
|
|
}
|
|
|
|
serializedBlock, err := hex.DecodeString(hexStr)
|
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, rpcDecodeHexError(hexStr)
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
block, err := btcutil.NewBlockFromBytes(serializedBlock)
|
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCDeserialization,
|
|
|
|
Message: "Block decode failed: " + err.Error(),
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-02 23:58:01 +02:00
|
|
|
// Process this block using the same rules as blocks coming from other
|
|
|
|
// nodes. This will in turn relay it to the network like normal.
|
|
|
|
_, err = s.cfg.SyncMgr.SubmitBlock(block, blockchain.BFNone)
|
2015-02-20 03:44:48 +01:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Sprintf("rejected: %s", err.Error()), nil
|
|
|
|
}
|
|
|
|
|
2016-08-08 21:04:33 +02:00
|
|
|
rpcsLog.Infof("Accepted block %s via submitblock", block.Hash())
|
2015-02-20 03:44:48 +01:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2017-07-03 01:04:40 +02:00
|
|
|
// handleUptime implements the uptime command.
|
|
|
|
func handleUptime(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2016-04-02 23:58:01 +02:00
|
|
|
return time.Now().Unix() - s.cfg.StartupTime, nil
|
2017-07-03 01:04:40 +02:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// handleValidateAddress implements the validateaddress command.
|
|
|
|
func handleValidateAddress(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
c := cmd.(*btcjson.ValidateAddressCmd)
|
|
|
|
|
|
|
|
result := btcjson.ValidateAddressChainResult{}
|
2017-08-13 18:45:00 +02:00
|
|
|
addr, err := btcutil.DecodeAddress(c.Address, s.cfg.ChainParams)
|
2015-02-20 03:44:48 +01:00
|
|
|
if err != nil {
|
|
|
|
// Return the default value (false) for IsValid.
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2020-08-24 20:47:32 +02:00
|
|
|
switch addr := addr.(type) {
|
|
|
|
case *btcutil.AddressPubKeyHash:
|
|
|
|
result.IsScript = btcjson.Bool(false)
|
|
|
|
result.IsWitness = btcjson.Bool(false)
|
|
|
|
|
|
|
|
case *btcutil.AddressScriptHash:
|
|
|
|
result.IsScript = btcjson.Bool(true)
|
|
|
|
result.IsWitness = btcjson.Bool(false)
|
|
|
|
|
|
|
|
case *btcutil.AddressPubKey:
|
|
|
|
result.IsScript = btcjson.Bool(false)
|
|
|
|
result.IsWitness = btcjson.Bool(false)
|
|
|
|
|
|
|
|
case *btcutil.AddressWitnessPubKeyHash:
|
|
|
|
result.IsScript = btcjson.Bool(false)
|
|
|
|
result.IsWitness = btcjson.Bool(true)
|
|
|
|
result.WitnessVersion = btcjson.Int32(int32(addr.WitnessVersion()))
|
|
|
|
result.WitnessProgram = btcjson.String(hex.EncodeToString(addr.WitnessProgram()))
|
|
|
|
|
|
|
|
case *btcutil.AddressWitnessScriptHash:
|
|
|
|
result.IsScript = btcjson.Bool(true)
|
|
|
|
result.IsWitness = btcjson.Bool(true)
|
|
|
|
result.WitnessVersion = btcjson.Int32(int32(addr.WitnessVersion()))
|
|
|
|
result.WitnessProgram = btcjson.String(hex.EncodeToString(addr.WitnessProgram()))
|
|
|
|
|
|
|
|
default:
|
|
|
|
// Handle the case when a new Address is supported by btcutil, but none
|
|
|
|
// of the cases were matched in the switch block. The current behaviour
|
|
|
|
// is to do nothing, and only populate the Address and IsValid fields.
|
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
result.Address = addr.EncodeAddress()
|
|
|
|
result.IsValid = true
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2015-08-26 06:03:18 +02:00
|
|
|
func verifyChain(s *rpcServer, level, depth int32) error {
|
2016-04-02 23:58:01 +02:00
|
|
|
best := s.cfg.Chain.BestSnapshot()
|
2015-08-26 06:03:18 +02:00
|
|
|
finishHeight := best.Height - depth
|
2015-02-20 03:44:48 +01:00
|
|
|
if finishHeight < 0 {
|
|
|
|
finishHeight = 0
|
|
|
|
}
|
|
|
|
rpcsLog.Infof("Verifying chain for %d blocks at level %d",
|
2015-08-26 06:03:18 +02:00
|
|
|
best.Height-finishHeight, level)
|
2015-02-20 03:44:48 +01:00
|
|
|
|
2015-08-26 06:03:18 +02:00
|
|
|
for height := best.Height; height > finishHeight; height-- {
|
2015-02-20 03:44:48 +01:00
|
|
|
// Level 0 just looks up the block.
|
2016-04-02 23:58:01 +02:00
|
|
|
block, err := s.cfg.Chain.BlockByHeight(height)
|
2015-02-20 03:44:48 +01:00
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("Verify is unable to fetch block at "+
|
|
|
|
"height %d: %v", height, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Level 1 does basic chain sanity checks.
|
|
|
|
if level > 0 {
|
|
|
|
err := blockchain.CheckBlockSanity(block,
|
2016-04-02 23:58:01 +02:00
|
|
|
s.cfg.ChainParams.PowLimit, s.cfg.TimeSource)
|
2015-02-20 03:44:48 +01:00
|
|
|
if err != nil {
|
2015-08-26 06:03:18 +02:00
|
|
|
rpcsLog.Errorf("Verify is unable to validate "+
|
|
|
|
"block at hash %v height %d: %v",
|
2016-08-08 21:04:33 +02:00
|
|
|
block.Hash(), height, err)
|
2015-02-20 03:44:48 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rpcsLog.Infof("Chain verify completed successfully")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleVerifyChain implements the verifychain command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleVerifyChain(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2015-02-20 03:44:48 +01:00
|
|
|
c := cmd.(*btcjson.VerifyChainCmd)
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
var checkLevel, checkDepth int32
|
|
|
|
if c.CheckLevel != nil {
|
|
|
|
checkLevel = *c.CheckLevel
|
|
|
|
}
|
|
|
|
if c.CheckDepth != nil {
|
|
|
|
checkDepth = *c.CheckDepth
|
|
|
|
}
|
|
|
|
|
2015-08-26 06:03:18 +02:00
|
|
|
err := verifyChain(s, checkLevel, checkDepth)
|
2015-02-20 03:44:48 +01:00
|
|
|
return err == nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleVerifyMessage implements the verifymessage command.
|
2015-02-21 05:34:57 +01:00
|
|
|
func handleVerifyMessage(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
2015-02-20 03:44:48 +01:00
|
|
|
c := cmd.(*btcjson.VerifyMessageCmd)
|
|
|
|
|
|
|
|
// Decode the provided address.
|
2017-08-13 18:45:00 +02:00
|
|
|
params := s.cfg.ChainParams
|
|
|
|
addr, err := btcutil.DecodeAddress(c.Address, params)
|
2015-02-20 03:44:48 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidAddressOrKey,
|
|
|
|
Message: "Invalid address or key: " + err.Error(),
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only P2PKH addresses are valid for signing.
|
|
|
|
if _, ok := addr.(*btcutil.AddressPubKeyHash); !ok {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCType,
|
2015-02-20 03:44:48 +01:00
|
|
|
Message: "Address is not a pay-to-pubkey-hash address",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decode base64 signature.
|
|
|
|
sig, err := base64.StdEncoding.DecodeString(c.Signature)
|
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCParse.Code,
|
|
|
|
Message: "Malformed base64 encoding: " + err.Error(),
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate the signature - this just shows that it was valid at all.
|
|
|
|
// we will compare it with the key next.
|
2015-10-16 20:11:05 +02:00
|
|
|
var buf bytes.Buffer
|
2020-05-23 20:54:49 +02:00
|
|
|
wire.WriteVarString(&buf, 0, messageSignatureHeader)
|
2015-10-16 20:11:05 +02:00
|
|
|
wire.WriteVarString(&buf, 0, c.Message)
|
2016-08-08 21:04:33 +02:00
|
|
|
expectedMessageHash := chainhash.DoubleHashB(buf.Bytes())
|
2015-02-20 03:44:48 +01:00
|
|
|
pk, wasCompressed, err := btcec.RecoverCompact(btcec.S256(), sig,
|
2015-10-16 20:11:05 +02:00
|
|
|
expectedMessageHash)
|
2015-02-20 03:44:48 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
// Mirror Bitcoin Core behavior, which treats error in
|
|
|
|
// RecoverCompact as invalid signature.
|
2015-02-20 03:44:48 +01:00
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reconstruct the pubkey hash.
|
|
|
|
var serializedPK []byte
|
|
|
|
if wasCompressed {
|
2016-11-03 00:18:48 +01:00
|
|
|
serializedPK = pk.SerializeCompressed()
|
2015-02-20 03:44:48 +01:00
|
|
|
} else {
|
2016-11-03 00:18:48 +01:00
|
|
|
serializedPK = pk.SerializeUncompressed()
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
2017-08-13 18:45:00 +02:00
|
|
|
address, err := btcutil.NewAddressPubKey(serializedPK, params)
|
2015-02-20 03:44:48 +01:00
|
|
|
if err != nil {
|
|
|
|
// Again mirror Bitcoin Core behavior, which treats error in public key
|
|
|
|
// reconstruction as invalid signature.
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return boolean if addresses match.
|
|
|
|
return address.EncodeAddress() == c.Address, nil
|
|
|
|
}
|
|
|
|
|
2016-12-15 22:10:06 +01:00
|
|
|
// handleVersion implements the version command.
|
|
|
|
//
|
2016-04-02 23:58:01 +02:00
|
|
|
// NOTE: This is a btcsuite extension ported from github.com/decred/dcrd.
|
2016-12-15 22:10:06 +01:00
|
|
|
func handleVersion(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
result := map[string]btcjson.VersionResult{
|
2021-10-15 07:45:32 +02:00
|
|
|
"lbcdjsonrpcapi": {
|
2016-12-15 22:10:06 +01:00
|
|
|
VersionString: jsonrpcSemverString,
|
|
|
|
Major: jsonrpcSemverMajor,
|
|
|
|
Minor: jsonrpcSemverMinor,
|
|
|
|
Patch: jsonrpcSemverPatch,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2016-04-02 23:58:01 +02:00
|
|
|
// rpcServer provides a concurrent safe RPC server to a chain server.
|
2015-02-20 03:44:48 +01:00
|
|
|
type rpcServer struct {
|
2016-08-11 20:39:23 +02:00
|
|
|
started int32
|
|
|
|
shutdown int32
|
2016-04-02 23:58:01 +02:00
|
|
|
cfg rpcserverConfig
|
2017-01-11 16:00:16 +01:00
|
|
|
authsha [sha256.Size]byte
|
|
|
|
limitauthsha [sha256.Size]byte
|
2016-08-11 20:39:23 +02:00
|
|
|
ntfnMgr *wsNotificationManager
|
|
|
|
numClients int32
|
|
|
|
statusLines map[int]string
|
|
|
|
statusLock sync.RWMutex
|
|
|
|
wg sync.WaitGroup
|
|
|
|
gbtWorkState *gbtWorkState
|
|
|
|
helpCacher *helpCacher
|
|
|
|
requestProcessShutdown chan struct{}
|
|
|
|
quit chan int
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// httpStatusLine returns a response Status-Line (RFC 2616 Section 6.1)
|
|
|
|
// for the given request and response status code. This function was lifted and
|
|
|
|
// adapted from the standard library HTTP server code since it's not exported.
|
|
|
|
func (s *rpcServer) httpStatusLine(req *http.Request, code int) string {
|
|
|
|
// Fast path:
|
|
|
|
key := code
|
|
|
|
proto11 := req.ProtoAtLeast(1, 1)
|
|
|
|
if !proto11 {
|
|
|
|
key = -key
|
|
|
|
}
|
|
|
|
s.statusLock.RLock()
|
|
|
|
line, ok := s.statusLines[key]
|
|
|
|
s.statusLock.RUnlock()
|
|
|
|
if ok {
|
|
|
|
return line
|
|
|
|
}
|
|
|
|
|
|
|
|
// Slow path:
|
|
|
|
proto := "HTTP/1.0"
|
|
|
|
if proto11 {
|
|
|
|
proto = "HTTP/1.1"
|
|
|
|
}
|
|
|
|
codeStr := strconv.Itoa(code)
|
|
|
|
text := http.StatusText(code)
|
|
|
|
if text != "" {
|
|
|
|
line = proto + " " + codeStr + " " + text + "\r\n"
|
|
|
|
s.statusLock.Lock()
|
|
|
|
s.statusLines[key] = line
|
|
|
|
s.statusLock.Unlock()
|
|
|
|
} else {
|
|
|
|
text = "status code " + codeStr
|
|
|
|
line = proto + " " + codeStr + " " + text + "\r\n"
|
|
|
|
}
|
|
|
|
|
|
|
|
return line
|
|
|
|
}
|
|
|
|
|
|
|
|
// writeHTTPResponseHeaders writes the necessary response headers prior to
|
|
|
|
// writing an HTTP body given a request to use for protocol negotiation, headers
|
|
|
|
// to write, a status code, and a writer.
|
|
|
|
func (s *rpcServer) writeHTTPResponseHeaders(req *http.Request, headers http.Header, code int, w io.Writer) error {
|
|
|
|
_, err := io.WriteString(w, s.httpStatusLine(req, code))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = headers.Write(w)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = io.WriteString(w, "\r\n")
|
2016-11-03 05:02:04 +01:00
|
|
|
return err
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stop is used by server.go to stop the rpc listener.
|
|
|
|
func (s *rpcServer) Stop() error {
|
|
|
|
if atomic.AddInt32(&s.shutdown, 1) != 1 {
|
|
|
|
rpcsLog.Infof("RPC server is already in the process of shutting down")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
rpcsLog.Warnf("RPC server shutting down")
|
2017-08-14 01:58:58 +02:00
|
|
|
for _, listener := range s.cfg.Listeners {
|
2015-02-20 03:44:48 +01:00
|
|
|
err := listener.Close()
|
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("Problem shutting down rpc: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.ntfnMgr.Shutdown()
|
|
|
|
s.ntfnMgr.WaitForShutdown()
|
|
|
|
close(s.quit)
|
|
|
|
s.wg.Wait()
|
|
|
|
rpcsLog.Infof("RPC server shutdown complete")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-11 20:39:23 +02:00
|
|
|
// RequestedProcessShutdown returns a channel that is sent to when an authorized
|
|
|
|
// RPC client requests the process to shutdown. If the request can not be read
|
|
|
|
// immediately, it is dropped.
|
|
|
|
func (s *rpcServer) RequestedProcessShutdown() <-chan struct{} {
|
|
|
|
return s.requestProcessShutdown
|
|
|
|
}
|
|
|
|
|
2016-04-02 23:58:01 +02:00
|
|
|
// NotifyNewTransactions notifies both websocket and getblocktemplate long
|
|
|
|
// poll clients of the passed transactions. This function should be called
|
|
|
|
// whenever new transactions are added to the mempool.
|
|
|
|
func (s *rpcServer) NotifyNewTransactions(txns []*mempool.TxDesc) {
|
|
|
|
for _, txD := range txns {
|
|
|
|
// Notify websocket clients about mempool transactions.
|
|
|
|
s.ntfnMgr.NotifyMempoolTx(txD.Tx, true)
|
|
|
|
|
|
|
|
// Potentially notify any getblocktemplate long poll clients
|
|
|
|
// about stale block templates due to the new transaction.
|
|
|
|
s.gbtWorkState.NotifyMempoolTx(s.cfg.TxMemPool.LastUpdated())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// limitConnections responds with a 503 service unavailable and returns true if
|
|
|
|
// adding another client would exceed the maximum allow RPC clients.
|
|
|
|
//
|
|
|
|
// This function is safe for concurrent access.
|
|
|
|
func (s *rpcServer) limitConnections(w http.ResponseWriter, remoteAddr string) bool {
|
|
|
|
if int(atomic.LoadInt32(&s.numClients)+1) > cfg.RPCMaxClients {
|
|
|
|
rpcsLog.Infof("Max RPC clients exceeded [%d] - "+
|
|
|
|
"disconnecting client %s", cfg.RPCMaxClients,
|
|
|
|
remoteAddr)
|
|
|
|
http.Error(w, "503 Too busy. Try again later.",
|
|
|
|
http.StatusServiceUnavailable)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// incrementClients adds one to the number of connected RPC clients. Note
|
|
|
|
// this only applies to standard clients. Websocket clients have their own
|
|
|
|
// limits and are tracked separately.
|
|
|
|
//
|
|
|
|
// This function is safe for concurrent access.
|
|
|
|
func (s *rpcServer) incrementClients() {
|
|
|
|
atomic.AddInt32(&s.numClients, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// decrementClients subtracts one from the number of connected RPC clients.
|
|
|
|
// Note this only applies to standard clients. Websocket clients have their own
|
|
|
|
// limits and are tracked separately.
|
|
|
|
//
|
|
|
|
// This function is safe for concurrent access.
|
|
|
|
func (s *rpcServer) decrementClients() {
|
|
|
|
atomic.AddInt32(&s.numClients, -1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkAuth checks the HTTP Basic authentication supplied by a wallet
|
|
|
|
// or RPC client in the HTTP request r. If the supplied authentication
|
|
|
|
// does not match the username and password expected, a non-nil error is
|
|
|
|
// returned.
|
|
|
|
//
|
|
|
|
// This check is time-constant.
|
2015-03-30 19:45:31 +02:00
|
|
|
//
|
|
|
|
// The first bool return value signifies auth success (true if successful) and
|
|
|
|
// the second bool return value specifies whether the user can change the state
|
|
|
|
// of the server (true) or whether the user is limited (false). The second is
|
|
|
|
// always false if the first is.
|
2016-02-19 05:51:18 +01:00
|
|
|
func (s *rpcServer) checkAuth(r *http.Request, require bool) (bool, bool, error) {
|
2015-02-20 03:44:48 +01:00
|
|
|
authhdr := r.Header["Authorization"]
|
|
|
|
if len(authhdr) <= 0 {
|
|
|
|
if require {
|
|
|
|
rpcsLog.Warnf("RPC authentication failure from %s",
|
|
|
|
r.RemoteAddr)
|
2015-03-30 19:45:31 +02:00
|
|
|
return false, false, errors.New("auth failure")
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
2013-10-31 06:28:37 +01:00
|
|
|
|
2015-03-30 19:45:31 +02:00
|
|
|
return false, false, nil
|
2013-10-29 16:42:34 +01:00
|
|
|
}
|
2013-08-14 22:55:31 +02:00
|
|
|
|
2017-01-11 16:00:16 +01:00
|
|
|
authsha := sha256.Sum256([]byte(authhdr[0]))
|
2015-03-30 19:45:31 +02:00
|
|
|
|
|
|
|
// Check for limited auth first as in environments with limited users, those
|
|
|
|
// are probably expected to have a higher volume of calls
|
|
|
|
limitcmp := subtle.ConstantTimeCompare(authsha[:], s.limitauthsha[:])
|
|
|
|
if limitcmp == 1 {
|
|
|
|
return true, false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for admin-level auth
|
2015-02-20 03:44:48 +01:00
|
|
|
cmp := subtle.ConstantTimeCompare(authsha[:], s.authsha[:])
|
2015-03-30 19:45:31 +02:00
|
|
|
if cmp == 1 {
|
|
|
|
return true, true, nil
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
2015-03-30 19:45:31 +02:00
|
|
|
|
|
|
|
// Request's auth doesn't match either user
|
|
|
|
rpcsLog.Warnf("RPC authentication failure from %s", r.RemoteAddr)
|
|
|
|
return false, false, errors.New("auth failure")
|
2013-10-29 16:42:34 +01:00
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// parsedRPCCmd represents a JSON-RPC request object that has been parsed into
|
|
|
|
// a known concrete command along with any error that might have happened while
|
|
|
|
// parsing it.
|
|
|
|
type parsedRPCCmd struct {
|
2020-12-06 04:39:40 +01:00
|
|
|
jsonrpc btcjson.RPCVersion
|
|
|
|
id interface{}
|
|
|
|
method string
|
|
|
|
cmd interface{}
|
|
|
|
err *btcjson.RPCError
|
2015-02-21 05:34:57 +01:00
|
|
|
}
|
2014-06-12 04:51:21 +02:00
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// standardCmdResult checks that a parsed command is a standard Bitcoin JSON-RPC
|
|
|
|
// command and runs the appropriate handler to reply to the command. Any
|
|
|
|
// commands which are not recognized or not implemented will return an error
|
|
|
|
// suitable for use in replies.
|
|
|
|
func (s *rpcServer) standardCmdResult(cmd *parsedRPCCmd, closeChan <-chan struct{}) (interface{}, error) {
|
|
|
|
handler, ok := rpcHandlers[cmd.method]
|
2015-02-20 03:44:48 +01:00
|
|
|
if ok {
|
|
|
|
goto handled
|
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
_, ok = rpcAskWallet[cmd.method]
|
2015-02-20 03:44:48 +01:00
|
|
|
if ok {
|
|
|
|
handler = handleAskWallet
|
|
|
|
goto handled
|
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
_, ok = rpcUnimplemented[cmd.method]
|
2015-02-20 03:44:48 +01:00
|
|
|
if ok {
|
|
|
|
handler = handleUnimplemented
|
|
|
|
goto handled
|
2014-06-12 04:51:21 +02:00
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
return nil, btcjson.ErrRPCMethodNotFound
|
2015-02-20 03:44:48 +01:00
|
|
|
handled:
|
2014-06-12 04:51:21 +02:00
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
return handler(s, cmd.cmd, closeChan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// parseCmd parses a JSON-RPC request object into known concrete command. The
|
|
|
|
// err field of the returned parsedRPCCmd struct will contain an RPC error that
|
|
|
|
// is suitable for use in replies if the command is invalid in some way such as
|
|
|
|
// an unregistered command or invalid parameters.
|
|
|
|
func parseCmd(request *btcjson.Request) *parsedRPCCmd {
|
2020-12-06 04:39:40 +01:00
|
|
|
parsedCmd := parsedRPCCmd{
|
|
|
|
jsonrpc: request.Jsonrpc,
|
|
|
|
id: request.ID,
|
|
|
|
method: request.Method,
|
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
|
|
|
|
cmd, err := btcjson.UnmarshalCmd(request)
|
2015-02-20 03:44:48 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
// When the error is because the method is not registered,
|
|
|
|
// produce a method not found RPC error.
|
|
|
|
if jerr, ok := err.(btcjson.Error); ok &&
|
|
|
|
jerr.ErrorCode == btcjson.ErrUnregisteredMethod {
|
|
|
|
|
|
|
|
parsedCmd.err = btcjson.ErrRPCMethodNotFound
|
|
|
|
return &parsedCmd
|
2014-06-12 04:51:21 +02:00
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
|
|
|
|
// Otherwise, some type of invalid parameters is the
|
|
|
|
// cause, so produce the equivalent RPC error.
|
|
|
|
parsedCmd.err = btcjson.NewRPCError(
|
|
|
|
btcjson.ErrRPCInvalidParams.Code, err.Error())
|
|
|
|
return &parsedCmd
|
2014-06-12 04:51:21 +02:00
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
|
|
|
|
parsedCmd.cmd = cmd
|
|
|
|
return &parsedCmd
|
2013-10-29 16:42:34 +01:00
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// createMarshalledReply returns a new marshalled JSON-RPC response given the
|
|
|
|
// passed parameters. It will automatically convert errors that are not of
|
|
|
|
// the type *btcjson.RPCError to the appropriate type as needed.
|
2020-12-06 04:39:40 +01:00
|
|
|
func createMarshalledReply(rpcVersion btcjson.RPCVersion, id interface{}, result interface{}, replyErr error) ([]byte, error) {
|
2015-02-21 05:34:57 +01:00
|
|
|
var jsonErr *btcjson.RPCError
|
|
|
|
if replyErr != nil {
|
|
|
|
if jErr, ok := replyErr.(*btcjson.RPCError); ok {
|
|
|
|
jsonErr = jErr
|
|
|
|
} else {
|
|
|
|
jsonErr = internalRPCError(replyErr.Error(), "")
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
|
2020-12-06 04:39:40 +01:00
|
|
|
return btcjson.MarshalResponse(rpcVersion, id, result, jsonErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// processRequest determines the incoming request type (single or batched),
|
|
|
|
// parses it and returns a marshalled response.
|
|
|
|
func (s *rpcServer) processRequest(request *btcjson.Request, isAdmin bool, closeChan <-chan struct{}) []byte {
|
|
|
|
var result interface{}
|
|
|
|
var err error
|
|
|
|
var jsonErr *btcjson.RPCError
|
|
|
|
|
|
|
|
if !isAdmin {
|
|
|
|
if _, ok := rpcLimited[request.Method]; !ok {
|
|
|
|
jsonErr = internalRPCError("limited user not "+
|
|
|
|
"authorized for this method", "")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if jsonErr == nil {
|
|
|
|
if request.Method == "" || request.Params == nil {
|
|
|
|
jsonErr = &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidRequest.Code,
|
|
|
|
Message: "Invalid request: malformed",
|
|
|
|
}
|
|
|
|
msg, err := createMarshalledReply(request.Jsonrpc, request.ID, result, jsonErr)
|
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("Failed to marshal reply: %v", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return msg
|
|
|
|
}
|
|
|
|
|
|
|
|
// Valid requests with no ID (notifications) must not have a response
|
|
|
|
// per the JSON-RPC spec.
|
|
|
|
if request.ID == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to parse the JSON-RPC request into a known
|
|
|
|
// concrete command.
|
|
|
|
parsedCmd := parseCmd(request)
|
|
|
|
if parsedCmd.err != nil {
|
|
|
|
jsonErr = parsedCmd.err
|
|
|
|
} else {
|
|
|
|
result, err = s.standardCmdResult(parsedCmd,
|
|
|
|
closeChan)
|
|
|
|
if err != nil {
|
2021-03-09 10:37:31 +01:00
|
|
|
if rpcErr, ok := err.(*btcjson.RPCError); ok {
|
|
|
|
jsonErr = rpcErr
|
|
|
|
} else {
|
|
|
|
jsonErr = &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidRequest.Code,
|
|
|
|
Message: "Invalid request: malformed",
|
|
|
|
}
|
2020-12-06 04:39:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Marshal the response.
|
|
|
|
msg, err := createMarshalledReply(request.Jsonrpc, request.ID, result, jsonErr)
|
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("Failed to marshal reply: %v", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return msg
|
2013-10-29 16:42:34 +01:00
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// jsonRPCRead handles reading and responding to RPC messages.
|
2016-02-19 05:51:18 +01:00
|
|
|
func (s *rpcServer) jsonRPCRead(w http.ResponseWriter, r *http.Request, isAdmin bool) {
|
2015-02-20 03:44:48 +01:00
|
|
|
if atomic.LoadInt32(&s.shutdown) != 0 {
|
|
|
|
return
|
2014-07-02 03:09:22 +02:00
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
|
|
|
|
// Read and close the JSON-RPC request body from the caller.
|
|
|
|
body, err := ioutil.ReadAll(r.Body)
|
|
|
|
r.Body.Close()
|
2014-01-15 16:01:12 +01:00
|
|
|
if err != nil {
|
2015-02-21 05:34:57 +01:00
|
|
|
errCode := http.StatusBadRequest
|
2016-11-03 05:02:04 +01:00
|
|
|
http.Error(w, fmt.Sprintf("%d error reading JSON message: %v",
|
|
|
|
errCode, err), errCode)
|
2015-02-20 03:44:48 +01:00
|
|
|
return
|
2014-01-15 16:01:12 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Unfortunately, the http server doesn't provide the ability to
|
|
|
|
// change the read deadline for the new connection and having one breaks
|
|
|
|
// long polling. However, not having a read deadline on the initial
|
|
|
|
// connection would mean clients can connect and idle forever. Thus,
|
|
|
|
// hijack the connecton from the HTTP server, clear the read deadline,
|
|
|
|
// and handle writing the response manually.
|
|
|
|
hj, ok := w.(http.Hijacker)
|
|
|
|
if !ok {
|
|
|
|
errMsg := "webserver doesn't support hijacking"
|
|
|
|
rpcsLog.Warnf(errMsg)
|
|
|
|
errCode := http.StatusInternalServerError
|
2016-11-03 05:02:04 +01:00
|
|
|
http.Error(w, strconv.Itoa(errCode)+" "+errMsg, errCode)
|
2015-02-20 03:44:48 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
conn, buf, err := hj.Hijack()
|
2014-01-15 16:01:12 +01:00
|
|
|
if err != nil {
|
2015-02-20 03:44:48 +01:00
|
|
|
rpcsLog.Warnf("Failed to hijack HTTP connection: %v", err)
|
|
|
|
errCode := http.StatusInternalServerError
|
2016-11-03 05:02:04 +01:00
|
|
|
http.Error(w, strconv.Itoa(errCode)+" "+err.Error(), errCode)
|
2015-02-20 03:44:48 +01:00
|
|
|
return
|
2014-01-15 16:01:12 +01:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
defer conn.Close()
|
|
|
|
defer buf.Flush()
|
|
|
|
conn.SetReadDeadline(timeZeroVal)
|
2014-01-15 16:01:12 +01:00
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// Attempt to parse the raw body into a JSON-RPC request.
|
2020-12-06 04:39:40 +01:00
|
|
|
// Setup a close notifier. Since the connection is hijacked,
|
|
|
|
// the CloseNotifer on the ResponseWriter is not available.
|
|
|
|
closeChan := make(chan struct{}, 1)
|
|
|
|
go func() {
|
|
|
|
_, err = conn.Read(make([]byte, 1))
|
|
|
|
if err != nil {
|
|
|
|
close(closeChan)
|
2015-02-21 05:34:57 +01:00
|
|
|
}
|
2020-12-06 04:39:40 +01:00
|
|
|
}()
|
|
|
|
|
|
|
|
var results []json.RawMessage
|
|
|
|
var batchSize int
|
|
|
|
var batchedRequest bool
|
|
|
|
|
|
|
|
// Determine request type
|
|
|
|
if bytes.HasPrefix(body, batchedRequestPrefix) {
|
|
|
|
batchedRequest = true
|
2015-02-21 05:34:57 +01:00
|
|
|
}
|
2020-12-06 04:39:40 +01:00
|
|
|
|
|
|
|
// Process a single request
|
|
|
|
if !batchedRequest {
|
|
|
|
var req btcjson.Request
|
|
|
|
var resp json.RawMessage
|
|
|
|
err = json.Unmarshal(body, &req)
|
|
|
|
if err != nil {
|
|
|
|
jsonErr := &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCParse.Code,
|
|
|
|
Message: fmt.Sprintf("Failed to parse request: %v",
|
|
|
|
err),
|
|
|
|
}
|
|
|
|
resp, err = btcjson.MarshalResponse(btcjson.RpcVersion1, nil, nil, jsonErr)
|
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("Failed to create reply: %v", err)
|
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
2020-12-06 04:39:40 +01:00
|
|
|
if err == nil {
|
|
|
|
// The JSON-RPC 1.0 spec defines that notifications must have their "id"
|
|
|
|
// set to null and states that notifications do not have a response.
|
|
|
|
//
|
|
|
|
// A JSON-RPC 2.0 notification is a request with "json-rpc":"2.0", and
|
|
|
|
// without an "id" member. The specification states that notifications
|
|
|
|
// must not be responded to. JSON-RPC 2.0 permits the null value as a
|
|
|
|
// valid request id, therefore such requests are not notifications.
|
|
|
|
//
|
|
|
|
// Bitcoin Core serves requests with "id":null or even an absent "id",
|
|
|
|
// and responds to such requests with "id":null in the response.
|
|
|
|
//
|
|
|
|
// Btcd does not respond to any request without and "id" or "id":null,
|
|
|
|
// regardless the indicated JSON-RPC protocol version unless RPC quirks
|
|
|
|
// are enabled. With RPC quirks enabled, such requests will be responded
|
|
|
|
// to if the reqeust does not indicate JSON-RPC version.
|
|
|
|
//
|
|
|
|
// RPC quirks can be enabled by the user to avoid compatibility issues
|
|
|
|
// with software relying on Core's behavior.
|
|
|
|
if req.ID == nil && !(cfg.RPCQuirks && req.Jsonrpc == "") {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
resp = s.processRequest(&req, isAdmin, closeChan)
|
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
|
2020-12-06 04:39:40 +01:00
|
|
|
if resp != nil {
|
|
|
|
results = append(results, resp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process a batched request
|
|
|
|
if batchedRequest {
|
|
|
|
var batchedRequests []interface{}
|
|
|
|
var resp json.RawMessage
|
|
|
|
err = json.Unmarshal(body, &batchedRequests)
|
|
|
|
if err != nil {
|
|
|
|
jsonErr := &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCParse.Code,
|
|
|
|
Message: fmt.Sprintf("Failed to parse request: %v",
|
|
|
|
err),
|
|
|
|
}
|
|
|
|
resp, err = btcjson.MarshalResponse(btcjson.RpcVersion2, nil, nil, jsonErr)
|
2015-02-20 03:44:48 +01:00
|
|
|
if err != nil {
|
2020-12-06 04:39:40 +01:00
|
|
|
rpcsLog.Errorf("Failed to create reply: %v", err)
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
|
2020-12-06 04:39:40 +01:00
|
|
|
if resp != nil {
|
|
|
|
results = append(results, resp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
// Response with an empty batch error if the batch size is zero
|
|
|
|
if len(batchedRequests) == 0 {
|
|
|
|
jsonErr := &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidRequest.Code,
|
|
|
|
Message: "Invalid request: empty batch",
|
|
|
|
}
|
|
|
|
resp, err = btcjson.MarshalResponse(btcjson.RpcVersion2, nil, nil, jsonErr)
|
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("Failed to marshal reply: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp != nil {
|
|
|
|
results = append(results, resp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process each batch entry individually
|
|
|
|
if len(batchedRequests) > 0 {
|
|
|
|
batchSize = len(batchedRequests)
|
|
|
|
|
|
|
|
for _, entry := range batchedRequests {
|
|
|
|
var reqBytes []byte
|
|
|
|
reqBytes, err = json.Marshal(entry)
|
|
|
|
if err != nil {
|
|
|
|
jsonErr := &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidRequest.Code,
|
|
|
|
Message: fmt.Sprintf("Invalid request: %v",
|
|
|
|
err),
|
|
|
|
}
|
|
|
|
resp, err = btcjson.MarshalResponse(btcjson.RpcVersion2, nil, nil, jsonErr)
|
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("Failed to create reply: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp != nil {
|
|
|
|
results = append(results, resp)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
var req btcjson.Request
|
|
|
|
err := json.Unmarshal(reqBytes, &req)
|
|
|
|
if err != nil {
|
|
|
|
jsonErr := &btcjson.RPCError{
|
|
|
|
Code: btcjson.ErrRPCInvalidRequest.Code,
|
|
|
|
Message: fmt.Sprintf("Invalid request: %v",
|
|
|
|
err),
|
|
|
|
}
|
|
|
|
resp, err = btcjson.MarshalResponse("", nil, nil, jsonErr)
|
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("Failed to create reply: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp != nil {
|
|
|
|
results = append(results, resp)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
resp = s.processRequest(&req, isAdmin, closeChan)
|
|
|
|
if resp != nil {
|
|
|
|
results = append(results, resp)
|
|
|
|
}
|
2015-03-30 19:45:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-12-06 04:39:40 +01:00
|
|
|
}
|
2015-03-30 19:45:31 +02:00
|
|
|
|
2020-12-06 04:39:40 +01:00
|
|
|
var msg = []byte{}
|
|
|
|
if batchedRequest && batchSize > 0 {
|
|
|
|
if len(results) > 0 {
|
|
|
|
// Form the batched response json
|
|
|
|
var buffer bytes.Buffer
|
|
|
|
buffer.WriteByte('[')
|
|
|
|
for idx, reply := range results {
|
|
|
|
if idx == len(results)-1 {
|
|
|
|
buffer.Write(reply)
|
|
|
|
buffer.WriteByte(']')
|
|
|
|
break
|
|
|
|
}
|
|
|
|
buffer.Write(reply)
|
|
|
|
buffer.WriteByte(',')
|
2015-03-30 19:45:31 +02:00
|
|
|
}
|
2020-12-06 04:39:40 +01:00
|
|
|
msg = buffer.Bytes()
|
2015-02-21 05:34:57 +01:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
}
|
|
|
|
|
2020-12-06 04:39:40 +01:00
|
|
|
if !batchedRequest || batchSize == 0 {
|
|
|
|
// Respond with the first results entry for single requests
|
|
|
|
if len(results) > 0 {
|
|
|
|
msg = results[0]
|
|
|
|
}
|
2014-01-15 16:01:12 +01:00
|
|
|
}
|
|
|
|
|
2015-02-21 05:34:57 +01:00
|
|
|
// Write the response.
|
|
|
|
err = s.writeHTTPResponseHeaders(r, w.Header(), http.StatusOK, buf)
|
2015-02-20 03:44:48 +01:00
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Error(err)
|
|
|
|
return
|
2014-07-14 17:08:07 +02:00
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
if _, err := buf.Write(msg); err != nil {
|
|
|
|
rpcsLog.Errorf("Failed to write marshalled reply: %v", err)
|
|
|
|
}
|
2016-03-02 19:35:06 +01:00
|
|
|
|
|
|
|
// Terminate with newline to maintain compatibility with Bitcoin Core.
|
|
|
|
if err := buf.WriteByte('\n'); err != nil {
|
|
|
|
rpcsLog.Errorf("Failed to append terminating newline to reply: %v", err)
|
|
|
|
}
|
2014-01-15 16:01:12 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// jsonAuthFail sends a message back to the client if the http auth is rejected.
|
|
|
|
func jsonAuthFail(w http.ResponseWriter) {
|
2021-10-15 07:45:32 +02:00
|
|
|
w.Header().Add("WWW-Authenticate", `Basic realm="lbcd RPC"`)
|
2015-02-20 03:44:48 +01:00
|
|
|
http.Error(w, "401 Unauthorized.", http.StatusUnauthorized)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start is used by server.go to start the rpc listener.
|
|
|
|
func (s *rpcServer) Start() {
|
|
|
|
if atomic.AddInt32(&s.started, 1) != 1 {
|
|
|
|
return
|
2013-11-12 17:39:10 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
rpcsLog.Trace("Starting RPC server")
|
|
|
|
rpcServeMux := http.NewServeMux()
|
|
|
|
httpServer := &http.Server{
|
|
|
|
Handler: rpcServeMux,
|
|
|
|
|
|
|
|
// Timeout connections which don't complete the initial
|
|
|
|
// handshake within the allowed timeframe.
|
|
|
|
ReadTimeout: time.Second * rpcAuthTimeoutSeconds,
|
2013-11-12 17:39:10 +01:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
rpcServeMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.Header().Set("Connection", "close")
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
r.Close = true
|
2013-11-12 17:39:10 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Limit the number of connections to max allowed.
|
|
|
|
if s.limitConnections(w, r.RemoteAddr) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Keep track of the number of connected clients.
|
|
|
|
s.incrementClients()
|
|
|
|
defer s.decrementClients()
|
2015-03-30 19:45:31 +02:00
|
|
|
_, isAdmin, err := s.checkAuth(r, true)
|
|
|
|
if err != nil {
|
2015-02-20 03:44:48 +01:00
|
|
|
jsonAuthFail(w)
|
|
|
|
return
|
2013-11-12 17:39:10 +01:00
|
|
|
}
|
2015-02-21 05:34:57 +01:00
|
|
|
|
|
|
|
// Read and respond to the request.
|
2015-03-30 19:45:31 +02:00
|
|
|
s.jsonRPCRead(w, r, isAdmin)
|
2015-02-20 03:44:48 +01:00
|
|
|
})
|
2013-11-12 17:39:10 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Websocket endpoint.
|
|
|
|
rpcServeMux.HandleFunc("/ws", func(w http.ResponseWriter, r *http.Request) {
|
2015-03-30 19:45:31 +02:00
|
|
|
authenticated, isAdmin, err := s.checkAuth(r, false)
|
2013-11-12 17:39:10 +01:00
|
|
|
if err != nil {
|
2015-02-20 03:51:51 +01:00
|
|
|
jsonAuthFail(w)
|
2015-02-20 03:44:48 +01:00
|
|
|
return
|
2013-11-12 17:39:10 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Attempt to upgrade the connection to a websocket connection
|
|
|
|
// using the default size for read/write buffers.
|
|
|
|
ws, err := websocket.Upgrade(w, r, nil, 0, 0)
|
|
|
|
if err != nil {
|
|
|
|
if _, ok := err.(websocket.HandshakeError); !ok {
|
|
|
|
rpcsLog.Errorf("Unexpected websocket error: %v",
|
|
|
|
err)
|
2013-11-12 17:39:10 +01:00
|
|
|
}
|
2015-02-20 03:51:51 +01:00
|
|
|
http.Error(w, "400 Bad Request.", http.StatusBadRequest)
|
2015-02-20 03:44:48 +01:00
|
|
|
return
|
2013-11-12 17:39:10 +01:00
|
|
|
}
|
2015-03-30 19:45:31 +02:00
|
|
|
s.WebsocketHandler(ws, r.RemoteAddr, authenticated, isAdmin)
|
2015-02-20 03:44:48 +01:00
|
|
|
})
|
|
|
|
|
2017-08-14 01:58:58 +02:00
|
|
|
for _, listener := range s.cfg.Listeners {
|
2015-02-20 03:44:48 +01:00
|
|
|
s.wg.Add(1)
|
|
|
|
go func(listener net.Listener) {
|
|
|
|
rpcsLog.Infof("RPC server listening on %s", listener.Addr())
|
|
|
|
httpServer.Serve(listener)
|
|
|
|
rpcsLog.Tracef("RPC listener done for %s", listener.Addr())
|
|
|
|
s.wg.Done()
|
|
|
|
}(listener)
|
2013-11-12 17:39:10 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
s.ntfnMgr.Start()
|
2013-11-12 17:39:10 +01:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// genCertPair generates a key/cert pair to the paths provided.
|
|
|
|
func genCertPair(certFile, keyFile string) error {
|
|
|
|
rpcsLog.Infof("Generating TLS certificates...")
|
2014-07-03 05:52:34 +02:00
|
|
|
|
2021-10-15 07:45:32 +02:00
|
|
|
org := "lbcd autogenerated cert"
|
2015-02-20 03:44:48 +01:00
|
|
|
validUntil := time.Now().Add(10 * 365 * 24 * time.Hour)
|
|
|
|
cert, key, err := btcutil.NewTLSCertPair(org, validUntil, nil)
|
2014-07-03 05:52:34 +02:00
|
|
|
if err != nil {
|
2015-02-20 03:44:48 +01:00
|
|
|
return err
|
2014-07-03 05:52:34 +02:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// Write cert and key files.
|
|
|
|
if err = ioutil.WriteFile(certFile, cert, 0666); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err = ioutil.WriteFile(keyFile, key, 0600); err != nil {
|
|
|
|
os.Remove(certFile)
|
|
|
|
return err
|
|
|
|
}
|
2013-11-12 17:39:10 +01:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
rpcsLog.Infof("Done generating TLS certificates")
|
|
|
|
return nil
|
2013-11-12 17:39:10 +01:00
|
|
|
}
|
|
|
|
|
2016-04-02 23:58:01 +02:00
|
|
|
// rpcserverPeer represents a peer for use with the RPC server.
|
|
|
|
//
|
|
|
|
// The interface contract requires that all of these methods are safe for
|
|
|
|
// concurrent access.
|
|
|
|
type rpcserverPeer interface {
|
|
|
|
// ToPeer returns the underlying peer instance.
|
|
|
|
ToPeer() *peer.Peer
|
|
|
|
|
|
|
|
// IsTxRelayDisabled returns whether or not the peer has disabled
|
|
|
|
// transaction relay.
|
|
|
|
IsTxRelayDisabled() bool
|
|
|
|
|
|
|
|
// BanScore returns the current integer value that represents how close
|
|
|
|
// the peer is to being banned.
|
|
|
|
BanScore() uint32
|
|
|
|
|
|
|
|
// FeeFilter returns the requested current minimum fee rate for which
|
|
|
|
// transactions should be announced.
|
|
|
|
FeeFilter() int64
|
|
|
|
}
|
|
|
|
|
|
|
|
// rpcserverConnManager represents a connection manager for use with the RPC
|
|
|
|
// server.
|
|
|
|
//
|
|
|
|
// The interface contract requires that all of these methods are safe for
|
|
|
|
// concurrent access.
|
|
|
|
type rpcserverConnManager interface {
|
|
|
|
// Connect adds the provided address as a new outbound peer. The
|
|
|
|
// permanent flag indicates whether or not to make the peer persistent
|
|
|
|
// and reconnect if the connection is lost. Attempting to connect to an
|
|
|
|
// already existing peer will return an error.
|
|
|
|
Connect(addr string, permanent bool) error
|
|
|
|
|
|
|
|
// RemoveByID removes the peer associated with the provided id from the
|
|
|
|
// list of persistent peers. Attempting to remove an id that does not
|
|
|
|
// exist will return an error.
|
|
|
|
RemoveByID(id int32) error
|
|
|
|
|
|
|
|
// RemoveByAddr removes the peer associated with the provided address
|
|
|
|
// from the list of persistent peers. Attempting to remove an address
|
|
|
|
// that does not exist will return an error.
|
|
|
|
RemoveByAddr(addr string) error
|
|
|
|
|
|
|
|
// DisconnectByID disconnects the peer associated with the provided id.
|
|
|
|
// This applies to both inbound and outbound peers. Attempting to
|
|
|
|
// remove an id that does not exist will return an error.
|
|
|
|
DisconnectByID(id int32) error
|
|
|
|
|
|
|
|
// DisconnectByAddr disconnects the peer associated with the provided
|
|
|
|
// address. This applies to both inbound and outbound peers.
|
|
|
|
// Attempting to remove an address that does not exist will return an
|
|
|
|
// error.
|
|
|
|
DisconnectByAddr(addr string) error
|
|
|
|
|
|
|
|
// ConnectedCount returns the number of currently connected peers.
|
|
|
|
ConnectedCount() int32
|
|
|
|
|
|
|
|
// NetTotals returns the sum of all bytes received and sent across the
|
|
|
|
// network for all peers.
|
|
|
|
NetTotals() (uint64, uint64)
|
|
|
|
|
|
|
|
// ConnectedPeers returns an array consisting of all connected peers.
|
|
|
|
ConnectedPeers() []rpcserverPeer
|
|
|
|
|
|
|
|
// PersistentPeers returns an array consisting of all the persistent
|
|
|
|
// peers.
|
|
|
|
PersistentPeers() []rpcserverPeer
|
|
|
|
|
|
|
|
// BroadcastMessage sends the provided message to all currently
|
|
|
|
// connected peers.
|
|
|
|
BroadcastMessage(msg wire.Message)
|
|
|
|
|
|
|
|
// AddRebroadcastInventory adds the provided inventory to the list of
|
|
|
|
// inventories to be rebroadcast at random intervals until they show up
|
|
|
|
// in a block.
|
|
|
|
AddRebroadcastInventory(iv *wire.InvVect, data interface{})
|
|
|
|
|
|
|
|
// RelayTransactions generates and relays inventory vectors for all of
|
|
|
|
// the passed transactions to all connected peers.
|
|
|
|
RelayTransactions(txns []*mempool.TxDesc)
|
2020-05-31 14:30:50 +02:00
|
|
|
|
|
|
|
// NodeAddresses returns an array consisting node addresses which can
|
|
|
|
// potentially be used to find new nodes in the network.
|
|
|
|
NodeAddresses() []*wire.NetAddress
|
2016-04-02 23:58:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// rpcserverSyncManager represents a sync manager for use with the RPC server.
|
|
|
|
//
|
|
|
|
// The interface contract requires that all of these methods are safe for
|
|
|
|
// concurrent access.
|
|
|
|
type rpcserverSyncManager interface {
|
|
|
|
// IsCurrent returns whether or not the sync manager believes the chain
|
|
|
|
// is current as compared to the rest of the network.
|
|
|
|
IsCurrent() bool
|
|
|
|
|
|
|
|
// SubmitBlock submits the provided block to the network after
|
|
|
|
// processing it locally.
|
|
|
|
SubmitBlock(block *btcutil.Block, flags blockchain.BehaviorFlags) (bool, error)
|
|
|
|
|
|
|
|
// Pause pauses the sync manager until the returned channel is closed.
|
|
|
|
Pause() chan<- struct{}
|
|
|
|
|
2017-08-15 07:03:06 +02:00
|
|
|
// SyncPeerID returns the ID of the peer that is currently the peer being
|
|
|
|
// used to sync from or 0 if there is none.
|
|
|
|
SyncPeerID() int32
|
2016-04-02 23:58:01 +02:00
|
|
|
|
2017-08-20 03:35:37 +02:00
|
|
|
// LocateHeaders returns the headers of the blocks after the first known
|
2016-04-02 23:58:01 +02:00
|
|
|
// block in the provided locators until the provided stop hash or the
|
|
|
|
// current tip is reached, up to a max of wire.MaxBlockHeadersPerMsg
|
|
|
|
// hashes.
|
2017-08-20 03:35:37 +02:00
|
|
|
LocateHeaders(locators []*chainhash.Hash, hashStop *chainhash.Hash) []wire.BlockHeader
|
2016-04-02 23:58:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// rpcserverConfig is a descriptor containing the RPC server configuration.
|
|
|
|
type rpcserverConfig struct {
|
2017-08-14 01:58:58 +02:00
|
|
|
// Listeners defines a slice of listeners for which the RPC server will
|
|
|
|
// take ownership of and accept connections. Since the RPC server takes
|
|
|
|
// ownership of these listeners, they will be closed when the RPC server
|
|
|
|
// is stopped.
|
|
|
|
Listeners []net.Listener
|
2016-04-02 23:58:01 +02:00
|
|
|
|
|
|
|
// StartupTime is the unix timestamp for when the server that is hosting
|
|
|
|
// the RPC server started.
|
|
|
|
StartupTime int64
|
|
|
|
|
|
|
|
// ConnMgr defines the connection manager for the RPC server to use. It
|
|
|
|
// provides the RPC server with a means to do things such as add,
|
|
|
|
// remove, connect, disconnect, and query peers as well as other
|
|
|
|
// connection-related data and tasks.
|
|
|
|
ConnMgr rpcserverConnManager
|
|
|
|
|
2021-10-28 20:02:44 +02:00
|
|
|
// AddrMgr is the server's instance of the AddressManager.
|
|
|
|
AddrMgr *addrmgr.AddrManager
|
|
|
|
|
2016-04-02 23:58:01 +02:00
|
|
|
// SyncMgr defines the sync manager for the RPC server to use.
|
|
|
|
SyncMgr rpcserverSyncManager
|
|
|
|
|
|
|
|
// These fields allow the RPC server to interface with the local block
|
|
|
|
// chain data and state.
|
|
|
|
TimeSource blockchain.MedianTimeSource
|
|
|
|
Chain *blockchain.BlockChain
|
|
|
|
ChainParams *chaincfg.Params
|
|
|
|
DB database.DB
|
|
|
|
|
|
|
|
// TxMemPool defines the transaction memory pool to interact with.
|
|
|
|
TxMemPool *mempool.TxPool
|
|
|
|
|
|
|
|
// These fields allow the RPC server to interface with mining.
|
|
|
|
//
|
|
|
|
// Generator produces block templates and the CPUMiner solves them using
|
|
|
|
// the CPU. CPU mining is typically only useful for test purposes when
|
|
|
|
// doing regression or simulation testing.
|
|
|
|
Generator *mining.BlkTmplGenerator
|
|
|
|
CPUMiner *cpuminer.CPUMiner
|
|
|
|
|
|
|
|
// These fields define any optional indexes the RPC server can make use
|
|
|
|
// of to provide additional data when queried.
|
|
|
|
TxIndex *indexers.TxIndex
|
|
|
|
AddrIndex *indexers.AddrIndex
|
2017-08-25 01:30:34 +02:00
|
|
|
CfIndex *indexers.CfIndex
|
2017-11-13 23:39:16 +01:00
|
|
|
|
|
|
|
// The fee estimator keeps track of how long transactions are left in
|
|
|
|
// the mempool before they are mined into blocks.
|
|
|
|
FeeEstimator *mempool.FeeEstimator
|
2021-10-28 20:02:44 +02:00
|
|
|
|
|
|
|
// Services represents the services supported by this node.
|
|
|
|
Services wire.ServiceFlag
|
2016-04-02 23:58:01 +02:00
|
|
|
}
|
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
// newRPCServer returns a new instance of the rpcServer struct.
|
2016-04-02 23:58:01 +02:00
|
|
|
func newRPCServer(config *rpcserverConfig) (*rpcServer, error) {
|
2015-02-20 03:44:48 +01:00
|
|
|
rpc := rpcServer{
|
2016-04-02 23:58:01 +02:00
|
|
|
cfg: *config,
|
2016-08-11 20:39:23 +02:00
|
|
|
statusLines: make(map[int]string),
|
2016-04-02 23:58:01 +02:00
|
|
|
gbtWorkState: newGbtWorkState(config.TimeSource),
|
2016-08-11 20:39:23 +02:00
|
|
|
helpCacher: newHelpCacher(),
|
|
|
|
requestProcessShutdown: make(chan struct{}),
|
2019-05-12 02:48:24 +02:00
|
|
|
quit: make(chan int),
|
2014-08-22 23:22:46 +02:00
|
|
|
}
|
2015-03-30 19:45:31 +02:00
|
|
|
if cfg.RPCUser != "" && cfg.RPCPass != "" {
|
|
|
|
login := cfg.RPCUser + ":" + cfg.RPCPass
|
|
|
|
auth := "Basic " + base64.StdEncoding.EncodeToString([]byte(login))
|
2017-01-11 16:00:16 +01:00
|
|
|
rpc.authsha = sha256.Sum256([]byte(auth))
|
2015-03-30 19:45:31 +02:00
|
|
|
}
|
|
|
|
if cfg.RPCLimitUser != "" && cfg.RPCLimitPass != "" {
|
|
|
|
login := cfg.RPCLimitUser + ":" + cfg.RPCLimitPass
|
|
|
|
auth := "Basic " + base64.StdEncoding.EncodeToString([]byte(login))
|
2017-01-11 16:00:16 +01:00
|
|
|
rpc.limitauthsha = sha256.Sum256([]byte(auth))
|
2015-03-30 19:45:31 +02:00
|
|
|
}
|
2015-02-20 03:44:48 +01:00
|
|
|
rpc.ntfnMgr = newWsNotificationManager(&rpc)
|
2016-04-02 23:58:01 +02:00
|
|
|
rpc.cfg.Chain.Subscribe(rpc.handleBlockchainNotification)
|
2017-08-14 21:39:07 +02:00
|
|
|
|
2015-02-20 03:44:48 +01:00
|
|
|
return &rpc, nil
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
2014-06-30 20:16:05 +02:00
|
|
|
|
2017-08-14 21:39:07 +02:00
|
|
|
// Callback for notifications from blockchain. It notifies clients that are
|
|
|
|
// long polling for changes or subscribed to websockets notifications.
|
2017-08-11 02:07:06 +02:00
|
|
|
func (s *rpcServer) handleBlockchainNotification(notification *blockchain.Notification) {
|
|
|
|
switch notification.Type {
|
|
|
|
case blockchain.NTBlockAccepted:
|
|
|
|
block, ok := notification.Data.(*btcutil.Block)
|
|
|
|
if !ok {
|
|
|
|
rpcsLog.Warnf("Chain accepted notification is not a block.")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allow any clients performing long polling via the
|
|
|
|
// getblocktemplate RPC to be notified when the new block causes
|
|
|
|
// their old block template to become stale.
|
|
|
|
s.gbtWorkState.NotifyBlockConnected(block.Hash())
|
|
|
|
|
|
|
|
case blockchain.NTBlockConnected:
|
|
|
|
block, ok := notification.Data.(*btcutil.Block)
|
|
|
|
if !ok {
|
|
|
|
rpcsLog.Warnf("Chain connected notification is not a block.")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify registered websocket clients of incoming block.
|
|
|
|
s.ntfnMgr.NotifyBlockConnected(block)
|
|
|
|
|
|
|
|
case blockchain.NTBlockDisconnected:
|
|
|
|
block, ok := notification.Data.(*btcutil.Block)
|
|
|
|
if !ok {
|
|
|
|
rpcsLog.Warnf("Chain disconnected notification is not a block.")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify registered websocket clients.
|
|
|
|
s.ntfnMgr.NotifyBlockDisconnected(block)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-30 20:16:05 +02:00
|
|
|
func init() {
|
|
|
|
rpcHandlers = rpcHandlersBeforeInit
|
2021-08-04 04:48:59 +02:00
|
|
|
for key := range claimtrieHandlers {
|
|
|
|
rpcHandlers[key] = claimtrieHandlers[key]
|
|
|
|
}
|
2014-06-30 20:16:05 +02:00
|
|
|
rand.Seed(time.Now().UnixNano())
|
|
|
|
}
|