multi: Fix several misspellings in the comments.

This commit corrects several typos in the comments found by misspell.
This commit is contained in:
Dave Collins 2016-02-25 11:17:12 -06:00
parent ef9c50be57
commit eb882f39f8
39 changed files with 71 additions and 71 deletions

View file

@ -882,7 +882,7 @@ func (a *AddrManager) Good(addr *wire.NetAddress) {
addrKey := NetAddressKey(addr)
oldBucket := -1
for i := range a.addrNew {
// we check for existance so we can record the first one
// we check for existence so we can record the first one
if _, ok := a.addrNew[i][addrKey]; ok {
delete(a.addrNew[i], addrKey)
ka.refs--

View file

@ -16,7 +16,7 @@ import (
"github.com/btcsuite/btcd/wire"
)
// naTest is used to describe a test to be perfomed against the NetAddressKey
// naTest is used to describe a test to be performed against the NetAddressKey
// method.
type naTest struct {
in wire.NetAddress

View file

@ -89,7 +89,7 @@ func TestHaveBlock(t *testing.T) {
// Block 100000 should be present (as an orphan).
{hash: "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506", want: true},
// Random hashes should not be availble.
// Random hashes should not be available.
{hash: "123", want: false},
}

View file

@ -163,7 +163,7 @@ func BigToCompact(n *big.Int) uint32 {
// CalcWork calculates a work value from difficulty bits. Bitcoin increases
// the difficulty for generating a block by decreasing the value which the
// generated hash must be less than. This difficulty target is stored in each
// block header using a compact representation as described in the documenation
// block header using a compact representation as described in the documentation
// for CompactToBig. The main chain is selected by choosing the chain that has
// the most proof of work (highest difficulty). Since a lower target difficulty
// value equates to higher actual difficulty, the work value which will be

View file

@ -34,7 +34,7 @@ func TstTimeSorter(times []time.Time) sort.Interface {
var TstCheckSerializedHeight = checkSerializedHeight
// TstSetMaxMedianTimeEntries makes the ability to set the maximum number of
// median tiem entries available to the test package.
// median time entries available to the test package.
func TstSetMaxMedianTimeEntries(val int) {
maxMedianTimeEntries = val
}

View file

@ -151,7 +151,7 @@ func IsFinalizedTransaction(tx *btcutil.Tx, blockHeight int32, blockTime time.Ti
return true
}
// At this point, the transaction's lock time hasn't occured yet, but
// At this point, the transaction's lock time hasn't occurred yet, but
// the transaction might still be finalized if the sequence number
// for all transaction inputs is maxed out.
for _, txIn := range msgTx.TxIn {

View file

@ -1469,7 +1469,7 @@ func newBlockManager(s *server) (*blockManager, error) {
}
bmgrLog.Infof("Block index generation complete")
// Initialize the chain state now that the intial block node index has
// Initialize the chain state now that the initial block node index has
// been generated.
bm.updateChainState(newestHash, height)
@ -1479,7 +1479,7 @@ func newBlockManager(s *server) (*blockManager, error) {
// removeRegressionDB removes the existing regression test database if running
// in regression test mode and it already exists.
func removeRegressionDB(dbPath string) error {
// Dont do anything if not in regression test mode.
// Don't do anything if not in regression test mode.
if !cfg.RegressionTest {
return nil
}

View file

@ -65,8 +65,8 @@ func GenerateSharedSecret(privkey *PrivateKey, pubkey *PublicKey) []byte {
// HMAC [32]byte
// }
//
// The primary aim is to ensure byte compatibility with Pyelliptic. Additionaly,
// refer to section 5.8.1 of ANSI X9.63 for rationale on this format.
// The primary aim is to ensure byte compatibility with Pyelliptic. Also, refer
// to section 5.8.1 of ANSI X9.63 for rationale on this format.
func Encrypt(pubkey *PublicKey, in []byte) ([]byte, error) {
ephemeral, err := NewPrivateKey(S256())
if err != nil {

View file

@ -41,7 +41,7 @@ package btcec
// 3) Since we're dealing with 32-bit values, 64-bits of overflow is a
// reasonable choice for #2
// 4) Given the need for 256-bits of precision and the properties stated in #1,
// #2, and #3, the representation which best accomodates this is 10 uint32s
// #2, and #3, the representation which best accommodates this is 10 uint32s
// with base 2^26 (26 bits * 10 = 260 bits, so the final word only needs 22
// bits) which leaves the desired 64 bits (32 * 10 = 320, 320 - 256 = 64) for
// overflow

View file

@ -26,7 +26,7 @@ func isOdd(a *big.Int) bool {
// the solution to use.
func decompressPoint(curve *KoblitzCurve, x *big.Int, ybit bool) (*big.Int, error) {
// TODO(oga) This will probably only work for secp256k1 due to
// optimisations.
// optimizations.
// Y = +-sqrt(x^3 + B)
x3 := new(big.Int).Mul(x, x)

View file

@ -221,7 +221,7 @@ type GetNetTotalsResult struct {
TimeMillis int64 `json:"timemillis"`
}
// ScriptSig models a signature script. It is defined seperately since it only
// ScriptSig models a signature script. It is defined separately since it only
// applies to non-coinbase. Therefore the field in the Vin structure needs
// to be a pointer.
type ScriptSig struct {
@ -229,7 +229,7 @@ type ScriptSig struct {
Hex string `json:"hex"`
}
// Vin models parts of the tx data. It is defined seperately since
// Vin models parts of the tx data. It is defined separately since
// getrawtransaction, decoderawtransaction, and searchrawtransaction use the
// same structure.
type Vin struct {
@ -322,7 +322,7 @@ func (v *VinPrevOut) MarshalJSON() ([]byte, error) {
return json.Marshal(txStruct)
}
// Vout models parts of the tx data. It is defined seperately since both
// Vout models parts of the tx data. It is defined separately since both
// getrawtransaction and decoderawtransaction use the same structure.
type Vout struct {
Value float64 `json:"value"`

View file

@ -65,7 +65,7 @@ func subStructUsage(structType reflect.Type) string {
}
// Create the name/value entry for the field while considering
// the type of the field. Not all possibile types are covered
// the type of the field. Not all possible types are covered
// here and when one of the types not specifically covered is
// encountered, the field name is simply reused for the value.
fieldName := strings.ToLower(rtf.Name)

View file

@ -11,7 +11,7 @@ import (
"github.com/btcsuite/btcd/btcjson"
)
// TestCmdMethod tests the CmdMethod function to ensure it retuns the expected
// TestCmdMethod tests the CmdMethod function to ensure it retunrs the expected
// methods and errors.
func TestCmdMethod(t *testing.T) {
t.Parallel()

View file

@ -253,7 +253,7 @@ func TestRegisteredCmdMethods(t *testing.T) {
t.Fatal("RegisteredCmdMethods: no methods")
}
// Ensure the returnd methods are sorted.
// Ensure the returned methods are sorted.
sortedMethods := make([]string, len(methods))
copy(sortedMethods, methods)
sort.Sort(sort.StringSlice(sortedMethods))

View file

@ -31,9 +31,9 @@ const (
// When in "CatchUp" mode, incoming requests to index newly solved
// blocks are backed up for later processing. Once we've finished
// catching up, we process these queued jobs, and then enter into
// "maintainence" mode.
// "maintenance" mode.
indexCatchUp indexState = iota
// When in "maintainence" mode, we have a single worker serially
// When in "maintenance" mode, we have a single worker serially
// processing incoming jobs to index newly solved blocks.
indexMaintain
)

View file

@ -188,7 +188,7 @@ func supportedSubsystems() []string {
subsystems = append(subsystems, subsysID)
}
// Sort the subsytems for stable display.
// Sort the subsystems for stable display.
sort.Strings(subsystems)
return subsystems
}

View file

@ -40,7 +40,7 @@ type Db interface {
// DropAfterBlockBySha will remove any blocks from the database after
// the given block. It terminates any existing transaction and performs
// its operations in an atomic transaction which is commited before
// its operations in an atomic transaction which is committed before
// the function returns.
DropAfterBlockBySha(*wire.ShaHash) (err error)
@ -121,7 +121,7 @@ type Db interface {
// index information for a particular block height. Additionally, it
// will update the stored meta-data related to the curent tip of the
// addr index. These two operations are performed in an atomic
// transaction which is commited before the function returns.
// transaction which is committed before the function returns.
// Addresses are indexed by the raw bytes of their base58 decoded
// hash160.
UpdateAddrIndexForBlock(blkSha *wire.ShaHash, height int32,
@ -194,7 +194,7 @@ func AddDBDriver(instance DriverDB) {
driverList = append(driverList, instance)
}
// CreateDB intializes and opens a database.
// CreateDB initializes and opens a database.
func CreateDB(dbtype string, args ...interface{}) (pbdb Db, err error) {
for _, drv := range driverList {
if drv.DbType == dbtype {

View file

@ -522,7 +522,7 @@ func (db *LevelDb) FetchTxsForAddr(addr btcutil.Address, skip int,
// index information for a particular block height. Additionally, it
// will update the stored meta-data related to the curent tip of the
// addr index. These two operations are performed in an atomic
// transaction which is commited before the function returns.
// transaction which is committed before the function returns.
// Transactions indexed by address are stored with the following format:
// * prefix || hash160 || blockHeight || txoffset || txlen
// Indexes are stored purely in the key, with blank data for the actual value

View file

@ -48,7 +48,7 @@ below.
Transactions
The Tx interface provides facilities for rolling back or commiting changes that
The Tx interface provides facilities for rolling back or committing changes that
took place while the transaction was active. It also provides the root metadata
bucket under which all keys, values, and nested buckets are stored. A
transaction can either be read-only or read-write and managed or unmanaged.

View file

@ -58,9 +58,9 @@ func SupportedDrivers() []string {
return supportedDBs
}
// Create intializes and opens a database for the specified type. The arguments
// are specific to the database type driver. See the documentation for the
// database driver for further details.
// Create initializes and opens a database for the specified type. The
// arguments are specific to the database type driver. See the documentation
// for the database driver for further details.
//
// ErrDbUnknownType will be returned if the the database type is not registered.
func Create(dbType string, args ...interface{}) (DB, error) {

View file

@ -328,7 +328,7 @@ func testCursorInterface(tc *testContext, bucket database.Bucket) bool {
return false
}
// Ensure foward iteration works as expected after seeking.
// Ensure forward iteration works as expected after seeking.
middleIdx := (len(sortedValues) - 1) / 2
seekKey := sortedValues[middleIdx].key
curIdx = middleIdx
@ -648,7 +648,7 @@ func testMetadataManualTxInterface(tc *testContext) bool {
//
// Otherwise, a read-write transaction is created, the values are
// written, standard bucket tests for read-write transactions are
// performed, and then the transaction is either commited or rolled
// performed, and then the transaction is either committed or rolled
// back depending on the flag.
bucket1Name := []byte("bucket1")
populateValues := func(writable, rollback bool, putValues []keyPair) bool {

View file

@ -450,7 +450,7 @@ type DB interface {
// Update invokes the passed function in the context of a managed
// read-write transaction. Any errors returned from the user-supplied
// function will cause the transaction to be rolled back and are
// returned from this function. Otherwise, the transaction is commited
// returned from this function. Otherwise, the transaction is committed
// when the user-supplied function returns a nil error.
//
// Calling Rollback or Commit on the transaction passed to the

View file

@ -49,7 +49,7 @@ var (
)
// torLookupIP uses Tor to resolve DNS via the SOCKS extension they provide for
// resolution over the Tor network. Tor itself doesnt support ipv6 so this
// resolution over the Tor network. Tor itself doesn't support ipv6 so this
// doesn't either.
func torLookupIP(host, proxy string) ([]net.IP, error) {
conn, err := net.Dial("tcp", proxy)

View file

@ -124,7 +124,7 @@ func (s *dynamicBanScore) int(t time.Time) uint32 {
// increase increases the persistent, the decaying or both scores by the values
// passed as parameters. The resulting score is calculated as if the action was
// carried out at the point time represented by the third paramter. The
// carried out at the point time represented by the third parameter. The
// resulting score is returned.
//
// This function is not safe for concurrent access.

4
log.go
View file

@ -26,7 +26,7 @@ const (
maxRejectReasonLen = 250
)
// Loggers per subsytem. Note that backendLog is a seelog logger that all of
// Loggers per subsystem. Note that backendLog is a seelog logger that all of
// the subsystem loggers route their messages to. When adding new subsystems,
// add a reference here, to the subsystemLoggers map, and the useLogger
// function.
@ -136,7 +136,7 @@ func useLogger(subsystemID string, logger btclog.Logger) {
}
// initSeelogLogger initializes a new seelog logger that is used as the backend
// for all logging subsytems.
// for all logging subsystems.
func initSeelogLogger(logFile string) {
config := `
<seelog type="adaptive" mininterval="2000000" maxinterval="100000000"

View file

@ -576,7 +576,7 @@ func (mp *txMemPool) FetchTransaction(txHash *wire.ShaHash) (*btcutil.Tx, error)
// FilterTransactionsByAddress returns all transactions currently in the
// mempool that either create an output to the passed address or spend a
// previously created ouput to the address.
// previously created output to the address.
func (mp *txMemPool) FilterTransactionsByAddress(addr btcutil.Address) ([]*btcutil.Tx, error) {
// Protect concurrent access.
mp.RLock()

View file

@ -122,7 +122,7 @@ func errToRejectErr(err error) (wire.RejectCode, string) {
// Return a generic rejected string if there is no error. This really
// should not happen unless the code elsewhere is not setting an error
// as it should be, but it's best to be safe and simply return a generic
// string rather than allowing the following code that derferences the
// string rather than allowing the following code that dereferences the
// err to panic.
if err == nil {
return wire.RejectInvalid, "rejected"

View file

@ -21,7 +21,7 @@ A quick overview of the major features peer provides are as follows:
- Full duplex reading and writing of bitcoin protocol messages
- Automatic handling of the initial handshake process including protocol
version negotiation
- Asynchronous message queueing of outbound messages with optional channel for
- Asynchronous message queuing of outbound messages with optional channel for
notification when the message is actually sent
- Flexible peer configuration
- Caller is responsible for creating outgoing connections and listening for
@ -144,7 +144,7 @@ raw message bytes using a format similar to hexdump -C.
Bitcoin Improvement Proposals
This package supports all BIPS supported by the wire packge.
This package supports all BIPS supported by the wire package.
(https://godoc.org/github.com/btcsuite/btcd/wire#hdr-Bitcoin_Improvement_Proposals)
*/
package peer

View file

@ -375,7 +375,7 @@ type HostToNetAddrFunc func(host string, port uint16,
// communications via the peer-to-peer protocol. It provides full duplex
// reading and writing, automatic handling of the initial handshake process,
// querying of usage statistics and other information about the remote peer such
// as its address, user agent, and protocol version, output message queueing,
// as its address, user agent, and protocol version, output message queuing,
// inventory trickling, and the ability to dynamically register and unregister
// callbacks for handling bitcoin protocol messages.
//
@ -1315,9 +1315,9 @@ out:
case sccReceiveMessage:
// Remove received messages from the expected
// reponse map. Since certain commands expect
// one of a group of responses, remove everyting
// in the expected group accordingly.
// response map. Since certain commands expect
// one of a group of responses, remove
// everything in the expected group accordingly.
switch msgCmd := msg.message.Command(); msgCmd {
case wire.CmdBlock:
fallthrough
@ -1654,10 +1654,10 @@ out:
log.Tracef("Peer input handler done for %s", p)
}
// queueHandler handles the queueing of outgoing data for the peer. This runs
// as a muxer for various sources of input so we can ensure that server and
// peer handlers will not block on us sending a message.
// We then pass the data on to outHandler to be actually written.
// queueHandler handles the queuing of outgoing data for the peer. This runs as
// a muxer for various sources of input so we can ensure that server and peer
// handlers will not block on us sending a message. That data is then passed on
// to outHandler to be actually written.
func (p *Peer) queueHandler() {
pendingMsgs := list.New()
invSendQueue := list.New()

View file

@ -202,7 +202,7 @@ func checkInputsStandard(tx *btcutil.Tx, txStore blockchain.TxStore) error {
return nil
}
// checkPkScriptStandard performs a series of checks on a transaction ouput
// checkPkScriptStandard performs a series of checks on a transaction output
// script (public key script) to ensure it is a "standard" public key script.
// A standard public key script is one that is a recognized form, and for
// multi-signature scripts, only contains from 1 to maxStandardMultiSigKeys

View file

@ -173,7 +173,7 @@ var rpcHandlersBeforeInit = map[string]commandHandler{
"verifymessage": handleVerifyMessage,
}
// list of commands that we recognise, but for which btcd has no support because
// list of commands that we recognize, but for which btcd has no support because
// it lacks support for wallet functionality. For these commands the user
// should ask a connected instance of btcwallet.
var rpcAskWallet = map[string]struct{}{
@ -678,7 +678,7 @@ func stringInSlice(a string, list []string) bool {
// createVinList returns a slice of JSON objects for the inputs of the passed
// transaction.
func createVinListPrevOut(s *rpcServer, mtx *wire.MsgTx, chainParams *chaincfg.Params, vinExtra int, filterAddrMap map[string]struct{}) []btcjson.VinPrevOut {
// Use a dynamically sized list to accomodate the address filter.
// Use a dynamically sized list to accommodate the address filter.
vinList := make([]btcjson.VinPrevOut, 0, len(mtx.TxIn))
// Coinbase transactions only have a single txin by definition.

View file

@ -31,7 +31,7 @@ const (
// websocketSendBufferSize is the number of elements the send channel
// can queue before blocking. Note that this only applies to requests
// handled directly in the websocket client input handler or the async
// handler since notifications have their own queueing mechanism
// handler since notifications have their own queuing mechanism
// independent of the send channel buffer.
websocketSendBufferSize = 50
)
@ -197,7 +197,7 @@ func (m *wsNotificationManager) queueHandler() {
func (m *wsNotificationManager) NotifyBlockConnected(block *btcutil.Block) {
// As NotifyBlockConnected will be called by the block manager
// and the RPC server may no longer be running, use a select
// statement to unblock enqueueing the notification once the RPC
// statement to unblock enqueuing the notification once the RPC
// server has begun shutting down.
select {
case m.queueNotification <- (*notificationBlockConnected)(block):
@ -210,7 +210,7 @@ func (m *wsNotificationManager) NotifyBlockConnected(block *btcutil.Block) {
func (m *wsNotificationManager) NotifyBlockDisconnected(block *btcutil.Block) {
// As NotifyBlockDisconnected will be called by the block manager
// and the RPC server may no longer be running, use a select
// statement to unblock enqueueing the notification once the RPC
// statement to unblock enqueuing the notification once the RPC
// server has begun shutting down.
select {
case m.queueNotification <- (*notificationBlockDisconnected)(block):
@ -230,7 +230,7 @@ func (m *wsNotificationManager) NotifyMempoolTx(tx *btcutil.Tx, isNew bool) {
// As NotifyMempoolTx will be called by mempool and the RPC server
// may no longer be running, use a select statement to unblock
// enqueueing the notification once the RPC server has begun
// enqueuing the notification once the RPC server has begun
// shutting down.
select {
case m.queueNotification <- n:
@ -1100,11 +1100,11 @@ out:
rpcsLog.Tracef("Websocket client input handler done for %s", c.addr)
}
// notificationQueueHandler handles the queueing of outgoing notifications for
// notificationQueueHandler handles the queuing of outgoing notifications for
// the websocket client. This runs as a muxer for various sources of input to
// ensure that queueing up notifications to be sent will not block. Otherwise,
// ensure that queuing up notifications to be sent will not block. Otherwise,
// slow clients could bog down the other systems (such as the mempool or block
// manager) which are queueing the data. The data is passed on to outHandler to
// manager) which are queuing the data. The data is passed on to outHandler to
// actually be written. It must be run as a goroutine.
func (c *wsClient) notificationQueueHandler() {
ntfnSentChan := make(chan bool, 1) // nonblocking sync
@ -1391,7 +1391,7 @@ func (c *wsClient) WaitForShutdown() {
// manager, websocket connection, remote address, and whether or not the client
// has already been authenticated (via HTTP Basic access authentication). The
// returned client is ready to start. Once started, the client will process
// incoming and outgoing messages in separate goroutines complete with queueing
// incoming and outgoing messages in separate goroutines complete with queuing
// and asynchrous handling for long-running operations.
func newWebsocketClient(server *rpcServer, conn *websocket.Conn,
remoteAddr string, authenticated bool, isAdmin bool) (*wsClient, error) {
@ -1813,7 +1813,7 @@ func recoverFromReorg(db database.Db, minBlock, maxBlock int32,
return hashList, nil
}
// descendantBlock returns the appropiate JSON-RPC error if a current block
// descendantBlock returns the appropriate JSON-RPC error if a current block
// fetched during a reorganize is not a direct child of the parent block hash.
func descendantBlock(prevHash *wire.ShaHash, curBlock *btcutil.Block) error {
curHash := &curBlock.MsgBlock().Header.PrevBlock

View file

@ -449,7 +449,7 @@ func (sp *serverPeer) OnTx(p *peer.Peer, msg *wire.MsgTx) {
// Queue the transaction up to be handled by the block manager and
// intentionally block further receives until the transaction is fully
// processed and known good or bad. This helps prevent a malicious peer
// from queueing up a bunch of bad transactions before disconnecting (or
// from queuing up a bunch of bad transactions before disconnecting (or
// being disconnected) and wasting memory.
sp.server.blockManager.QueueTx(tx, sp)
<-sp.txProcessed
@ -470,7 +470,7 @@ func (sp *serverPeer) OnBlock(p *peer.Peer, msg *wire.MsgBlock, buf []byte) {
// manager and intentionally block further receives
// until the bitcoin block is fully processed and known
// good or bad. This helps prevent a malicious peer
// from queueing up a bunch of bad blocks before
// from queuing up a bunch of bad blocks before
// disconnecting (or being disconnected) and wasting
// memory. Additionally, this behavior is depended on
// by at least the block acceptance test tool as the
@ -511,7 +511,7 @@ func (sp *serverPeer) OnGetData(p *peer.Peer, msg *wire.MsgGetData) {
// This incremental score decays each minute to half of its value.
sp.addBanScore(0, uint32(length)*99/wire.MaxInvPerMsg, "getdata")
// We wait on this wait channel periodically to prevent queueing
// We wait on this wait channel periodically to prevent queuing
// far more data than we can send in a reasonable time, wasting memory.
// The waiting occurs after the database fetch for the next one to
// provide a little pipelining.
@ -1440,7 +1440,7 @@ func newPeerConfig(sp *serverPeer) *peer.Config {
// Note: The reference client currently bans peers that send alerts
// not signed with its key. We could verify against their key, but
// since the reference client is currently unwilling to support
// other implementions' alert messages, we will not relay theirs.
// other implementations' alert messages, we will not relay theirs.
OnAlert: nil,
},
NewestBlock: sp.server.db.NewestSha,

View file

@ -17,7 +17,7 @@ import (
type ScriptFlags uint32
const (
// ScriptBip16 defines whether the bip16 threshhold has passed and thus
// ScriptBip16 defines whether the bip16 threshold has passed and thus
// pay-to-script hash transactions will be fully validated.
ScriptBip16 ScriptFlags = 1 << iota

View file

@ -71,7 +71,7 @@ var (
ErrStackElementTooBig = errors.New("element in script too large")
// ErrStackUnknownAddress is returned when ScriptToAddrHash does not
// recognise the pattern of the script and thus can not find the address
// recognize the pattern of the script and thus can not find the address
// for payment.
ErrStackUnknownAddress = errors.New("non-recognised address")

View file

@ -386,7 +386,7 @@ func getSigOpCount(pops []parsedOpcode, precise bool) int {
fallthrough
case OP_CHECKMULTISIGVERIFY:
// If we are being precise then look for familiar
// patterns for multisig, for now all we recognise is
// patterns for multisig, for now all we recognize is
// OP_1 - OP_16 to signify the number of pubkeys.
// Otherwise, we use the max of 20.
if precise && i > 0 &&

View file

@ -237,7 +237,7 @@ func (b *ScriptBuilder) Reset() *ScriptBuilder {
return b
}
// Script returns the currently built script. When any errors occured while
// Script returns the currently built script. When any errors occurred while
// building the script, the script will be returned up the point of the first
// error along with the error.
func (b *ScriptBuilder) Script() ([]byte, error) {

View file

@ -211,7 +211,7 @@ func mergeScripts(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int,
return mergeMultiSig(tx, idx, addresses, nRequired, pkScript,
sigScript, prevScript)
// It doesn't actualy make sense to merge anything other than multiig
// It doesn't actually make sense to merge anything other than multiig
// and scripthash (because it could contain multisig). Everything else
// has either zero signature, can't be spent, or has a single signature
// which is either present or not. The other two cases are handled
@ -374,7 +374,7 @@ func (sc ScriptClosure) GetScript(address btcutil.Address) ([]byte, error) {
// looked up by calling getKey() with the string of the given address.
// Any pay-to-script-hash signatures will be similarly looked up by calling
// getScript. If previousScript is provided then the results in previousScript
// will be merged in a type-dependant manner with the newly generated.
// will be merged in a type-dependent manner with the newly generated.
// signature script.
func SignTxOutput(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int,
pkScript []byte, hashType SigHashType, kdb KeyDB, sdb ScriptDB,

View file

@ -31,7 +31,7 @@ func (w *fixedWriter) Write(p []byte) (n int, err error) {
return
}
// Bytes returns the bytes alreayd written to the fixed writer.
// Bytes returns the bytes already written to the fixed writer.
func (w *fixedWriter) Bytes() []byte {
return w.b
}