btcd/multi: fix a number of typos in comments.

This commit is contained in:
Nicola 'tekNico' Larosa 2017-10-29 20:56:03 +01:00 committed by Dave Collins
parent 16dbb2602a
commit 11fcd83963
38 changed files with 52 additions and 52 deletions

View file

@ -570,8 +570,8 @@ Changes in 0.8.0-beta (Sun May 25 2014)
- btcctl utility changes:
- Add createencryptedwallet command
- Add getblockchaininfo command
- Add importwallet commmand
- Add addmultisigaddress commmand
- Add importwallet command
- Add addmultisigaddress command
- Add setgenerate command
- Accept --testnet and --wallet flags which automatically select
the appropriate port and TLS certificates needed to communicate

View file

@ -92,7 +92,7 @@ $ go install . ./cmd/...
## Getting Started
btcd has several configuration options avilable to tweak how it runs, but all
btcd has several configuration options available to tweak how it runs, but all
of the basic operations described in the intro section work with zero
configuration.

View file

@ -74,7 +74,7 @@ func zipLocators(locators ...BlockLocator) BlockLocator {
}
// TestChainView ensures all of the exported functionality of chain views works
// as intended with the expection of some special cases which are handled in
// as intended with the exception of some special cases which are handled in
// other tests.
func TestChainView(t *testing.T) {
// Construct a synthetic block index consisting of the following

View file

@ -15,7 +15,7 @@ independent versions over the peer-to-peer network.
This package has intentionally been designed so it can be used as a standalone
package for any projects needing to test their implementation against a full set
of blocks that excerise the consensus validation rules.
of blocks that exercise the consensus validation rules.
## Installation and Updating

View file

@ -15,6 +15,6 @@ independent versions over the peer-to-peer network.
This package has intentionally been designed so it can be used as a standalone
package for any projects needing to test their implementation against a full set
of blocks that excerise the consensus validation rules.
of blocks that exercise the consensus validation rules.
*/
package fullblocktests

View file

@ -651,7 +651,7 @@ func (idx *AddrIndex) Create(dbTx database.Tx) error {
}
// writeIndexData represents the address index data to be written for one block.
// It consistens of the address mapped to an ordered list of the transactions
// It consists of the address mapped to an ordered list of the transactions
// that involve the address in block. It is ordered so the transactions can be
// stored in the order they appear in the block.
type writeIndexData map[[addrKeySize]byte][]int
@ -690,7 +690,7 @@ func (idx *AddrIndex) indexPkScript(data writeIndexData, pkScript []byte, txIdx
}
// indexBlock extract all of the standard addresses from all of the transactions
// in the passed block and maps each of them to the assocaited transaction using
// in the passed block and maps each of them to the associated transaction using
// the passed map.
func (idx *AddrIndex) indexBlock(data writeIndexData, block *btcutil.Block, view *blockchain.UtxoViewpoint) {
for txIdx, tx := range block.Transactions() {

View file

@ -40,7 +40,7 @@ var (
// -----------------------------------------------------------------------------
// The transaction index consists of an entry for every transaction in the main
// chain. In order to significanly optimize the space requirements a separate
// chain. In order to significantly optimize the space requirements a separate
// index which provides an internal mapping between each block that has been
// indexed and a unique ID for use within the hash to location mappings. The ID
// is simply a sequentially incremented uint32. This is useful because it is

View file

@ -45,7 +45,7 @@ func TestMedianTime(t *testing.T) {
// be ignored.
{in: []int64{-4201, 4202, -4203, 4204, -4205}, wantOffset: 0},
// Excerise the condition where the median offset is greater
// Exercise the condition where the median offset is greater
// than the max allowed adjustment, but there is at least one
// sample that is close enough to the current time to avoid
// triggering a warning about an invalid local clock.

View file

@ -85,7 +85,7 @@ type thresholdConditionChecker interface {
// Condition returns whether or not the rule change activation condition
// has been met. This typically involves checking whether or not the
// bit assocaited with the condition is set, but can be more complex as
// bit associated with the condition is set, but can be more complex as
// needed.
Condition(*blockNode) (bool, error)
}

View file

@ -791,7 +791,7 @@ func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode
}
// If segwit is active, then we'll need to fully validate the
// new witness commitment for adherance to the rules.
// new witness commitment for adherence to the rules.
if segwitState == ThresholdActive {
// Validate the witness commitment (if any) within the
// block. This involves asserting that if the coinbase

View file

@ -27,7 +27,7 @@ const (
// MaxBlockSigOpsCost is the maximum number of signature operations
// allowed for a block. It is calculated via a weighted algorithm which
// weights segragated witness sig ops lower than regular sig ops.
// weights segregated witness sig ops lower than regular sig ops.
MaxBlockSigOpsCost = 80000
// WitnessScaleFactor determines the level of "discount" witness data

View file

@ -202,10 +202,10 @@ func blockDbPath(dbType string) string {
return dbPath
}
// warnMultipeDBs shows a warning if multiple block database types are detected.
// warnMultipleDBs shows a warning if multiple block database types are detected.
// This is not a situation most users want. It is handy for development however
// to support multiple side-by-side databases.
func warnMultipeDBs() {
func warnMultipleDBs() {
// This is intentionally not using the known db types which depend
// on the database types compiled into the binary since we want to
// detect legacy db types as well.
@ -253,7 +253,7 @@ func loadBlockDB() (database.DB, error) {
return db, nil
}
warnMultipeDBs()
warnMultipleDBs()
// The database name is based on the database type.
dbPath := blockDbPath(cfg.DbType)

View file

@ -269,7 +269,7 @@ func hashToInt(hash []byte, c elliptic.Curve) *big.Int {
return ret
}
// recoverKeyFromSignature recoves a public key from the signature "sig" on the
// recoverKeyFromSignature recovers a public key from the signature "sig" on the
// given message hash "msg". Based on the algorithm found in section 5.1.5 of
// SEC 1 Ver 2.0, page 47-48 (53 and 54 in the pdf). This performs the details
// in the inner loop in Step 1. The counter provided is actually the j parameter

View file

@ -183,7 +183,7 @@ func TestMethodUsageText(t *testing.T) {
continue
}
// Get the usage again to excerise caching.
// Get the usage again to exercise caching.
usage, err = btcjson.MethodUsageText(test.method)
if err != nil {
t.Errorf("Test #%d (%s) unexpected error: %v", i,

View file

@ -183,7 +183,7 @@ func typesMaybeCompatible(dest reflect.Type, src reflect.Type) bool {
return true
}
// When both types are numeric, they are potentially compatibile.
// When both types are numeric, they are potentially compatible.
srcKind := src.Kind()
destKind := dest.Kind()
if isNumeric(destKind) && isNumeric(srcKind) {

View file

@ -247,7 +247,7 @@ func TestMustRegisterCmdPanic(t *testing.T) {
func TestRegisteredCmdMethods(t *testing.T) {
t.Parallel()
// Ensure the registerd methods are returned.
// Ensure the registered methods are returned.
methods := btcjson.RegisteredCmdMethods()
if len(methods) == 0 {
t.Fatal("RegisteredCmdMethods: no methods")

View file

@ -21,7 +21,7 @@ const (
WalletLockStateNtfnMethod = "walletlockstate"
// NewTxNtfnMethod is the method used to notify that a wallet server has
// added a new transaction to the transaciton store.
// added a new transaction to the transaction store.
NewTxNtfnMethod = "newtx"
)

View file

@ -87,12 +87,12 @@ const (
DeploymentTestDummy = iota
// DeploymentCSV defines the rule change deployment ID for the CSV
// soft-fork package. The CSV package includes the depolyment of BIPS
// soft-fork package. The CSV package includes the deployment of BIPS
// 68, 112, and 113.
DeploymentCSV
// DeploymentSegwit defines the rule change deployment ID for the
// Segragated Witness (segwit) soft-fork package. The segwit package
// Segregated Witness (segwit) soft-fork package. The segwit package
// includes the deployment of BIPS 141, 142, 144, 145, 147 and 173.
DeploymentSegwit

View file

@ -88,7 +88,7 @@ func makeTestOutput(r *rpctest.Harness, t *testing.T,
return key, utxo, selfAddrScript, nil
}
// TestBIP0113Activation tests for proper adherance of the BIP 113 rule
// TestBIP0113Activation tests for proper adherence of the BIP 113 rule
// constraint which requires all transaction finality tests to use the MTP of
// the last 11 blocks, rather than the timestamp of the block which includes
// them.
@ -188,7 +188,7 @@ func TestBIP0113Activation(t *testing.T) {
// At this point, the block height should be 103: we mined 101 blocks
// to create a single mature output, then an additional block to create
// a new output, and then mined a single block above to include our
// transation.
// transaction.
assertChainHeight(r, t, 103)
// Next, mine enough blocks to ensure that the soft-fork becomes

View file

@ -18,7 +18,7 @@ further filtered based upon a configurable policy.
One of the policy configuration options controls whether or not "standard"
transactions are accepted. In essence, a "standard" transaction is one that
satisfies a fairly strict set of requirements that are largley intended to help
satisfies a fairly strict set of requirements that are largely intended to help
provide fair use of the system to all users. It is important to note that what
is considered a "standard" transaction changes over time. For some insight, at
the time of this writing, an example of _some_ of the criteria that are required

View file

@ -16,7 +16,7 @@ further filtered based upon a configurable policy.
One of the policy configuration options controls whether or not "standard"
transactions are accepted. In essence, a "standard" transaction is one that
satisfies a fairly strict set of requirements that are largley intended to help
satisfies a fairly strict set of requirements that are largely intended to help
provide fair use of the system to all users. It is important to note that what
is considered a "standard" transaction changes over time. For some insight, at
the time of this writing, an example of SOME of the criteria that are required

View file

@ -23,7 +23,7 @@ import (
// fakeChain is used by the pool harness to provide generated test utxos and
// a current faked chain height to the pool callbacks. This, in turn, allows
// transations to be appear as though they are spending completely valid utxos.
// transactions to appear as though they are spending completely valid utxos.
type fakeChain struct {
sync.RWMutex
utxos *blockchain.UtxoViewpoint

View file

@ -219,7 +219,7 @@ type BlockTemplate struct {
WitnessCommitment []byte
}
// mergeUtxoView adds all of the entries in view to viewA. The result is that
// mergeUtxoView adds all of the entries in viewB to viewA. The result is that
// viewA will contain all of its original entries plus all of the entries
// in viewB. It will replace any entries in viewB which also exist in viewA
// if the entry in viewA is fully spent.
@ -828,7 +828,7 @@ mempoolLoop:
// witness preimage generated above. With the commitment
// generated, the witness script for the output is: OP_RETURN
// OP_DATA_36 {0xaa21a9ed || witnessCommitment}. The leading
// prefix is refered to as the "witness magic bytes".
// prefix is referred to as the "witness magic bytes".
witnessCommitment = chainhash.DoubleHashB(witnessPreimage[:])
witnessScript := append(blockchain.WitnessMagicBytes, witnessCommitment...)

View file

@ -422,7 +422,7 @@ func (c *Client) Session() (*btcjson.SessionResult, error) {
return c.SessionAsync().Receive()
}
// FutureVersionResult is a future promise to delivere the result of a version
// FutureVersionResult is a future promise to deliver the result of a version
// RPC invocation (or an applicable error).
//
// NOTE: This is a btcsuite extension ported from

View file

@ -1106,7 +1106,7 @@ type ConnConfig struct {
// flag can be set to true to use basic HTTP POST requests instead.
HTTPPostMode bool
// EnableBCInfoHacks is an option provided to enable compatiblity hacks
// EnableBCInfoHacks is an option provided to enable compatibility hacks
// when connecting to blockchain.info RPC server
EnableBCInfoHacks bool
}

View file

@ -28,7 +28,7 @@ var (
"to use this feature")
)
// notificationState is used to track the current state of successfuly
// notificationState is used to track the current state of successfully
// registered notification so the state can be automatically re-established on
// reconnect.
type notificationState struct {

View file

@ -148,7 +148,7 @@ type FutureListUnspentResult chan *response
// Receive waits for the response promised by the future and returns all
// unspent wallet transaction outputs returned by the RPC call. If the
// future wac returnd by a call to ListUnspentMinAsync, ListUnspentMinMaxAsync,
// future wac returned by a call to ListUnspentMinAsync, ListUnspentMinMaxAsync,
// or ListUnspentMinMaxAddressesAsync, the range may be limited by the
// parameters of the RPC invocation.
func (r FutureListUnspentResult) Receive() ([]btcjson.ListUnspentResult, error) {
@ -1397,7 +1397,7 @@ func (r FutureGetBalanceResult) Receive() (btcutil.Amount, error) {
// FutureGetBalanceParseResult is same as FutureGetBalanceResult except
// that the result is expected to be a string which is then parsed into
// a float64 value
// This is required for compatiblity with servers like blockchain.info
// This is required for compatibility with servers like blockchain.info
type FutureGetBalanceParseResult chan *response
// Receive waits for the response promised by the future and returns the

View file

@ -196,7 +196,7 @@ var helpDescsEnUS = map[string]string{
"txrawresult-confirmations": "Number of confirmations of the block",
"txrawresult-time": "Transaction time in seconds since 1 Jan 1970 GMT",
"txrawresult-blocktime": "Block time in seconds since the 1 Jan 1970 GMT",
"txrawresult-size": "The size of the transation in bytes",
"txrawresult-size": "The size of the transaction in bytes",
"txrawresult-vsize": "The virtual size of the transaction in bytes",
"txrawresult-hash": "The wtxid of the transaction",
@ -221,7 +221,7 @@ var helpDescsEnUS = map[string]string{
"getblockverboseresult-size": "The size of the block",
"getblockverboseresult-height": "The height of the block in the block chain",
"getblockverboseresult-version": "The block version",
"getblockverboseresult-versionHex": "The block version in hexidecimal",
"getblockverboseresult-versionHex": "The block version in hexadecimal",
"getblockverboseresult-merkleroot": "Root hash of the merkle tree",
"getblockverboseresult-tx": "The transaction hashes (only when verbosetx=false)",
"getblockverboseresult-rawtx": "The transactions as JSON objects (only when verbosetx=true)",
@ -256,7 +256,7 @@ var helpDescsEnUS = map[string]string{
"getblockheaderverboseresult-confirmations": "The number of confirmations",
"getblockheaderverboseresult-height": "The height of the block in the block chain",
"getblockheaderverboseresult-version": "The block version",
"getblockheaderverboseresult-versionHex": "The block version in hexidecimal",
"getblockheaderverboseresult-versionHex": "The block version in hexadecimal",
"getblockheaderverboseresult-merkleroot": "Root hash of the merkle tree",
"getblockheaderverboseresult-time": "The block time in seconds since 1 Jan 1970 GMT",
"getblockheaderverboseresult-nonce": "The block nonce",

View file

@ -302,7 +302,7 @@
; Enable built-in CPU mining.
;
; NOTE: This is typically only useful for testing purposes such as testnet or
; simnet since the difficutly on mainnet is far too high for CPU mining to be
; simnet since the difficulty on mainnet is far too high for CPU mining to be
; worth your while.
; generate=false

View file

@ -2101,7 +2101,7 @@ out:
s.wg.Done()
}
// setupRPCListeners returns a slice of listners that are configured for use
// setupRPCListeners returns a slice of listeners that are configured for use
// with the RPC server depending on the configuration settings for listen
// addresses and TLS.
func setupRPCListeners() ([]net.Listener, error) {

View file

@ -129,9 +129,9 @@ func IsWitnessProgram(script []byte) bool {
// isWitnessProgram returns true if the passed script is a witness program, and
// false otherwise. A witness program MUST adhere to the following constraints:
// there must be excatly two pops (program version and the program itself), the
// there must be exactly two pops (program version and the program itself), the
// first opcode MUST be a small integer (0-16), the push data MUST be
// cannonical, and finally the size of the push data must be between 2 and 40
// canonical, and finally the size of the push data must be between 2 and 40
// bytes.
func isWitnessProgram(pops []parsedOpcode) bool {
return len(pops) == 2 &&

View file

@ -568,13 +568,13 @@ func TestCalcScriptInfo(t *testing.T) {
// unsupported address types are handled properly.
type bogusAddress struct{}
// EncodeAddress simply returns an empty string. It exists to satsify the
// EncodeAddress simply returns an empty string. It exists to satisfy the
// btcutil.Address interface.
func (b *bogusAddress) EncodeAddress() string {
return ""
}
// ScriptAddress simply returns an empty byte slice. It exists to satsify the
// ScriptAddress simply returns an empty byte slice. It exists to satisfy the
// btcutil.Address interface.
func (b *bogusAddress) ScriptAddress() []byte {
return nil
@ -585,7 +585,7 @@ func (b *bogusAddress) IsForNet(chainParams *chaincfg.Params) bool {
return true // why not?
}
// String simply returns an empty string. It exists to satsify the
// String simply returns an empty string. It exists to satisfy the
// btcutil.Address interface.
func (b *bogusAddress) String() string {
return ""

View file

@ -630,8 +630,8 @@ func WriteVarString(w io.Writer, pver uint32, str string) error {
// ReadVarBytes reads a variable length byte array. A byte array is encoded
// as a varInt containing the length of the array followed by the bytes
// themselves. An error is returned if the length is greater than the
// passed maxAllowed parameter which helps protect against memory exhuastion
// attacks and forced panics thorugh malformed messages. The fieldName
// passed maxAllowed parameter which helps protect against memory exhaustion
// attacks and forced panics through malformed messages. The fieldName
// parameter is only used for the error message so it provides more context in
// the error.
func ReadVarBytes(r io.Reader, pver uint32, maxAllowed uint32,

View file

@ -78,7 +78,7 @@ func TestGetDataWire(t *testing.T) {
t.Errorf("NewHashFromStr: %v", err)
}
// Transation 1 of Block 203707 hash.
// Transaction 1 of Block 203707 hash.
hashStr = "d28a3dc7392bf00a9855ee93dd9a81eff82a2c4fe57fbd42cfe71b487accfaf0"
txHash, err := chainhash.NewHashFromStr(hashStr)
if err != nil {

View file

@ -78,7 +78,7 @@ func TestInvWire(t *testing.T) {
t.Errorf("NewHashFromStr: %v", err)
}
// Transation 1 of Block 203707 hash.
// Transaction 1 of Block 203707 hash.
hashStr = "d28a3dc7392bf00a9855ee93dd9a81eff82a2c4fe57fbd42cfe71b487accfaf0"
txHash, err := chainhash.NewHashFromStr(hashStr)
if err != nil {

View file

@ -405,7 +405,7 @@ var merkleBlockOne = MsgMerkleBlock{
}
// merkleBlockOneBytes is the serialized bytes for a merkle block created from
// block one of the block chain where the first transation matches.
// block one of the block chain where the first transaction matches.
var merkleBlockOneBytes = []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,

View file

@ -69,7 +69,7 @@ func TestNotFoundWire(t *testing.T) {
t.Errorf("NewHashFromStr: %v", err)
}
// Transation 1 of Block 203707 hash.
// Transaction 1 of Block 203707 hash.
hashStr = "d28a3dc7392bf00a9855ee93dd9a81eff82a2c4fe57fbd42cfe71b487accfaf0"
txHash, err := chainhash.NewHashFromStr(hashStr)
if err != nil {

View file

@ -923,7 +923,7 @@ func writeOutPoint(w io.Writer, pver uint32, version int32, op *OutPoint) error
// script. It is encoded as a varInt containing the length of the array
// followed by the bytes themselves. An error is returned if the length is
// greater than the passed maxAllowed parameter which helps protect against
// memory exhuastion attacks and forced panics thorugh malformed messages. The
// memory exhaustion attacks and forced panics through malformed messages. The
// fieldName parameter is only used for the error message so it provides more
// context in the error.
func readScript(r io.Reader, pver uint32, maxAllowed uint32, fieldName string) ([]byte, error) {