2013-08-21 16:37:30 +02:00
|
|
|
/*
|
2014-01-09 20:12:20 +01:00
|
|
|
* Copyright (c) 2013, 2014 Conformal Systems LLC <info@conformal.com>
|
2013-08-21 16:37:30 +02:00
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and distribute this software for any
|
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package wallet
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"crypto/aes"
|
|
|
|
"crypto/cipher"
|
2013-09-09 20:14:57 +02:00
|
|
|
"crypto/ecdsa"
|
2013-09-03 06:10:32 +02:00
|
|
|
"crypto/rand"
|
2013-08-21 16:37:30 +02:00
|
|
|
"crypto/sha256"
|
|
|
|
"crypto/sha512"
|
|
|
|
"encoding/binary"
|
2013-11-20 16:17:49 +01:00
|
|
|
"encoding/hex"
|
2013-08-21 16:37:30 +02:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2014-05-23 04:16:50 +02:00
|
|
|
"io"
|
|
|
|
"math/big"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"code.google.com/p/go.crypto/ripemd160"
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
"github.com/conformal/btcec"
|
2014-05-23 04:16:50 +02:00
|
|
|
"github.com/conformal/btcnet"
|
2014-03-13 20:13:39 +01:00
|
|
|
"github.com/conformal/btcscript"
|
2013-08-21 16:37:30 +02:00
|
|
|
"github.com/conformal/btcutil"
|
|
|
|
"github.com/conformal/btcwire"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// Length in bytes of KDF output.
|
|
|
|
kdfOutputBytes = 32
|
|
|
|
|
|
|
|
// Maximum length in bytes of a comment that can have a size represented
|
|
|
|
// as a uint16.
|
|
|
|
maxCommentLen = (1 << 16) - 1
|
|
|
|
)
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
const (
|
|
|
|
defaultKdfComputeTime = 0.25
|
|
|
|
defaultKdfMaxMem = 32 * 1024 * 1024
|
|
|
|
)
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
// Possible errors when dealing with wallets.
|
|
|
|
var (
|
2014-01-20 18:56:27 +01:00
|
|
|
ErrAddressNotFound = errors.New("address not found")
|
2014-02-04 16:37:28 +01:00
|
|
|
ErrAlreadyEncrypted = errors.New("private key is already encrypted")
|
2014-01-20 18:56:27 +01:00
|
|
|
ErrChecksumMismatch = errors.New("checksum mismatch")
|
|
|
|
ErrDuplicate = errors.New("duplicate key or address")
|
|
|
|
ErrMalformedEntry = errors.New("malformed entry")
|
|
|
|
ErrWalletIsWatchingOnly = errors.New("wallet is watching-only")
|
|
|
|
ErrWalletLocked = errors.New("wallet is locked")
|
2014-01-27 15:30:42 +01:00
|
|
|
ErrWrongPassphrase = errors.New("wrong passphrase")
|
2013-08-21 16:37:30 +02:00
|
|
|
)
|
|
|
|
|
2014-04-08 03:04:23 +02:00
|
|
|
// '\xbaWALLET\x00'
|
|
|
|
var fileID = [8]byte{0xba, 0x57, 0x41, 0x4c, 0x4c, 0x45, 0x54, 0x00}
|
2013-09-03 06:10:32 +02:00
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
type entryHeader byte
|
|
|
|
|
|
|
|
const (
|
|
|
|
addrCommentHeader entryHeader = 1 << iota
|
|
|
|
txCommentHeader
|
|
|
|
deletedHeader
|
2014-03-13 20:13:39 +01:00
|
|
|
scriptHeader
|
2013-08-21 16:37:30 +02:00
|
|
|
addrHeader entryHeader = 0
|
|
|
|
)
|
|
|
|
|
|
|
|
// We want to use binaryRead and binaryWrite instead of binary.Read
|
|
|
|
// and binary.Write because those from the binary package do not return
|
|
|
|
// the number of bytes actually written or read. We need to return
|
|
|
|
// this value to correctly support the io.ReaderFrom and io.WriterTo
|
|
|
|
// interfaces.
|
|
|
|
func binaryRead(r io.Reader, order binary.ByteOrder, data interface{}) (n int64, err error) {
|
|
|
|
var read int
|
|
|
|
buf := make([]byte, binary.Size(data))
|
2014-04-16 23:22:39 +02:00
|
|
|
if read, err = io.ReadFull(r, buf); err != nil {
|
2013-08-21 16:37:30 +02:00
|
|
|
return int64(read), err
|
|
|
|
}
|
|
|
|
return int64(read), binary.Read(bytes.NewBuffer(buf), order, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// See comment for binaryRead().
|
|
|
|
func binaryWrite(w io.Writer, order binary.ByteOrder, data interface{}) (n int64, err error) {
|
2014-06-05 05:23:32 +02:00
|
|
|
buf := bytes.Buffer{}
|
2013-08-21 16:37:30 +02:00
|
|
|
if err = binary.Write(&buf, order, data); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
written, err := w.Write(buf.Bytes())
|
|
|
|
return int64(written), err
|
|
|
|
}
|
|
|
|
|
2013-11-04 17:50:32 +01:00
|
|
|
// pubkeyFromPrivkey creates an encoded pubkey based on a
|
|
|
|
// 32-byte privkey. The returned pubkey is 33 bytes if compressed,
|
|
|
|
// or 65 bytes if uncompressed.
|
|
|
|
func pubkeyFromPrivkey(privkey []byte, compress bool) (pubkey []byte) {
|
2014-04-18 00:02:33 +02:00
|
|
|
_, pk := btcec.PrivKeyFromBytes(btcec.S256(), privkey)
|
2013-11-04 17:50:32 +01:00
|
|
|
|
|
|
|
if compress {
|
2014-04-18 00:02:33 +02:00
|
|
|
return pk.SerializeCompressed()
|
2013-11-04 17:50:32 +01:00
|
|
|
}
|
2014-04-18 00:02:33 +02:00
|
|
|
return pk.SerializeUncompressed()
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
func keyOneIter(passphrase, salt []byte, memReqts uint64) []byte {
|
|
|
|
saltedpass := append(passphrase, salt...)
|
|
|
|
lutbl := make([]byte, memReqts)
|
|
|
|
|
|
|
|
// Seed for lookup table
|
2014-01-06 18:24:29 +01:00
|
|
|
seed := sha512.Sum512(saltedpass)
|
|
|
|
copy(lutbl[:sha512.Size], seed[:])
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
for nByte := 0; nByte < (int(memReqts) - sha512.Size); nByte += sha512.Size {
|
2014-01-06 18:24:29 +01:00
|
|
|
hash := sha512.Sum512(lutbl[nByte : nByte+sha512.Size])
|
2013-08-21 16:37:30 +02:00
|
|
|
copy(lutbl[nByte+sha512.Size:nByte+2*sha512.Size], hash[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
x := lutbl[cap(lutbl)-sha512.Size:]
|
|
|
|
|
|
|
|
seqCt := uint32(memReqts / sha512.Size)
|
|
|
|
nLookups := seqCt / 2
|
|
|
|
for i := uint32(0); i < nLookups; i++ {
|
|
|
|
// Armory ignores endianness here. We assume LE.
|
|
|
|
newIdx := binary.LittleEndian.Uint32(x[cap(x)-4:]) % seqCt
|
|
|
|
|
|
|
|
// Index of hash result at newIdx
|
|
|
|
vIdx := newIdx * sha512.Size
|
|
|
|
v := lutbl[vIdx : vIdx+sha512.Size]
|
|
|
|
|
|
|
|
// XOR hash x with hash v
|
|
|
|
for j := 0; j < sha512.Size; j++ {
|
|
|
|
x[j] ^= v[j]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save new hash to x
|
2014-01-06 18:24:29 +01:00
|
|
|
hash := sha512.Sum512(x)
|
2013-08-21 16:37:30 +02:00
|
|
|
copy(x, hash[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
return x[:kdfOutputBytes]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Key implements the key derivation function used by Armory
|
|
|
|
// based on the ROMix algorithm described in Colin Percival's paper
|
|
|
|
// "Stronger Key Derivation via Sequential Memory-Hard Functions"
|
|
|
|
// (http://www.tarsnap.com/scrypt/scrypt.pdf).
|
2013-09-03 06:10:32 +02:00
|
|
|
func Key(passphrase []byte, params *kdfParameters) []byte {
|
2013-08-21 16:37:30 +02:00
|
|
|
masterKey := passphrase
|
2013-09-03 06:10:32 +02:00
|
|
|
for i := uint32(0); i < params.nIter; i++ {
|
|
|
|
masterKey = keyOneIter(masterKey, params.salt[:], params.mem)
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
return masterKey
|
|
|
|
}
|
|
|
|
|
2013-11-04 17:50:32 +01:00
|
|
|
func pad(size int, b []byte) []byte {
|
|
|
|
// Prevent a possible panic if the input exceeds the expected size.
|
|
|
|
if len(b) > size {
|
|
|
|
size = len(b)
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2013-11-04 17:50:32 +01:00
|
|
|
|
|
|
|
p := make([]byte, size)
|
|
|
|
copy(p[size-len(b):], b)
|
|
|
|
return p
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
// ChainedPrivKey deterministically generates a new private key using a
|
2013-09-03 06:10:32 +02:00
|
|
|
// previous address and chaincode. privkey and chaincode must be 32
|
2014-01-16 17:50:08 +01:00
|
|
|
// bytes long, and pubkey may either be 33 or 65 bytes.
|
2013-09-03 06:10:32 +02:00
|
|
|
func ChainedPrivKey(privkey, pubkey, chaincode []byte) ([]byte, error) {
|
|
|
|
if len(privkey) != 32 {
|
2013-09-09 20:14:57 +02:00
|
|
|
return nil, fmt.Errorf("invalid privkey length %d (must be 32)",
|
2013-09-03 06:10:32 +02:00
|
|
|
len(privkey))
|
|
|
|
}
|
|
|
|
if len(chaincode) != 32 {
|
2013-09-09 20:14:57 +02:00
|
|
|
return nil, fmt.Errorf("invalid chaincode length %d (must be 32)",
|
2013-09-03 06:10:32 +02:00
|
|
|
len(chaincode))
|
|
|
|
}
|
2014-05-20 15:12:43 +02:00
|
|
|
switch n := len(pubkey); n {
|
|
|
|
case btcec.PubKeyBytesLenUncompressed, btcec.PubKeyBytesLenCompressed:
|
|
|
|
// Correct length
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("invalid pubkey length %d", n)
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
xorbytes := make([]byte, 32)
|
2014-01-16 00:25:19 +01:00
|
|
|
chainMod := btcwire.DoubleSha256(pubkey)
|
2013-09-09 20:14:57 +02:00
|
|
|
for i := range xorbytes {
|
2013-09-03 06:10:32 +02:00
|
|
|
xorbytes[i] = chainMod[i] ^ chaincode[i]
|
|
|
|
}
|
|
|
|
chainXor := new(big.Int).SetBytes(xorbytes)
|
|
|
|
privint := new(big.Int).SetBytes(privkey)
|
|
|
|
|
|
|
|
t := new(big.Int).Mul(chainXor, privint)
|
|
|
|
b := t.Mod(t, btcec.S256().N).Bytes()
|
2013-11-04 17:50:32 +01:00
|
|
|
return pad(32, b), nil
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2014-01-16 17:50:08 +01:00
|
|
|
// ChainedPubKey deterministically generates a new public key using a
|
|
|
|
// previous public key and chaincode. pubkey must be 33 or 65 bytes, and
|
|
|
|
// chaincode must be 32 bytes long.
|
|
|
|
func ChainedPubKey(pubkey, chaincode []byte) ([]byte, error) {
|
2014-05-20 15:12:43 +02:00
|
|
|
var compressed bool
|
|
|
|
switch n := len(pubkey); n {
|
|
|
|
case btcec.PubKeyBytesLenUncompressed:
|
|
|
|
compressed = false
|
|
|
|
case btcec.PubKeyBytesLenCompressed:
|
|
|
|
compressed = true
|
|
|
|
default:
|
|
|
|
// Incorrect serialized pubkey length
|
|
|
|
return nil, fmt.Errorf("invalid pubkey length %d", n)
|
2014-01-16 17:50:08 +01:00
|
|
|
}
|
|
|
|
if len(chaincode) != 32 {
|
|
|
|
return nil, fmt.Errorf("invalid chaincode length %d (must be 32)",
|
|
|
|
len(chaincode))
|
|
|
|
}
|
|
|
|
|
|
|
|
xorbytes := make([]byte, 32)
|
|
|
|
chainMod := btcwire.DoubleSha256(pubkey)
|
|
|
|
for i := range xorbytes {
|
|
|
|
xorbytes[i] = chainMod[i] ^ chaincode[i]
|
|
|
|
}
|
|
|
|
|
|
|
|
oldPk, err := btcec.ParsePubKey(pubkey, btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
newX, newY := btcec.S256().ScalarMult(oldPk.X, oldPk.Y, xorbytes)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-04-09 02:18:52 +02:00
|
|
|
newPk := &btcec.PublicKey{
|
2014-01-16 17:50:08 +01:00
|
|
|
Curve: btcec.S256(),
|
|
|
|
X: newX,
|
|
|
|
Y: newY,
|
|
|
|
}
|
|
|
|
|
2014-05-20 15:12:43 +02:00
|
|
|
if compressed {
|
|
|
|
return newPk.SerializeCompressed(), nil
|
2014-01-16 17:50:08 +01:00
|
|
|
}
|
2014-05-20 15:12:43 +02:00
|
|
|
return newPk.SerializeUncompressed(), nil
|
2014-01-16 17:50:08 +01:00
|
|
|
}
|
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
type version struct {
|
|
|
|
major byte
|
|
|
|
minor byte
|
|
|
|
bugfix byte
|
|
|
|
autoincrement byte
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enforce that version satisifies the io.ReaderFrom and
|
|
|
|
// io.WriterTo interfaces.
|
|
|
|
var _ io.ReaderFrom = &version{}
|
|
|
|
var _ io.WriterTo = &version{}
|
|
|
|
|
|
|
|
// ReaderFromVersion is an io.ReaderFrom and io.WriterTo that
|
|
|
|
// can specify any particular wallet file format for reading
|
|
|
|
// depending on the wallet file version.
|
|
|
|
type ReaderFromVersion interface {
|
|
|
|
ReadFromVersion(version, io.Reader) (int64, error)
|
|
|
|
io.WriterTo
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v version) String() string {
|
|
|
|
str := fmt.Sprintf("%d.%d", v.major, v.minor)
|
|
|
|
if v.bugfix != 0x00 || v.autoincrement != 0x00 {
|
|
|
|
str += fmt.Sprintf(".%d", v.bugfix)
|
|
|
|
}
|
|
|
|
if v.autoincrement != 0x00 {
|
|
|
|
str += fmt.Sprintf(".%d", v.autoincrement)
|
|
|
|
}
|
|
|
|
return str
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v version) Uint32() uint32 {
|
|
|
|
return uint32(v.major)<<6 | uint32(v.minor)<<4 | uint32(v.bugfix)<<2 | uint32(v.autoincrement)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *version) ReadFrom(r io.Reader) (int64, error) {
|
|
|
|
// Read 4 bytes for the version.
|
2014-04-16 23:22:39 +02:00
|
|
|
var versBytes [4]byte
|
|
|
|
n, err := io.ReadFull(r, versBytes[:])
|
2013-12-06 21:37:07 +01:00
|
|
|
if err != nil {
|
|
|
|
return int64(n), err
|
|
|
|
}
|
|
|
|
v.major = versBytes[0]
|
|
|
|
v.minor = versBytes[1]
|
|
|
|
v.bugfix = versBytes[2]
|
|
|
|
v.autoincrement = versBytes[3]
|
|
|
|
return int64(n), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *version) WriteTo(w io.Writer) (int64, error) {
|
|
|
|
// Write 4 bytes for the version.
|
|
|
|
versBytes := []byte{
|
|
|
|
v.major,
|
|
|
|
v.minor,
|
|
|
|
v.bugfix,
|
|
|
|
v.autoincrement,
|
|
|
|
}
|
|
|
|
n, err := w.Write(versBytes)
|
|
|
|
return int64(n), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// LT returns whether v is an earlier version than v2.
|
|
|
|
func (v version) LT(v2 version) bool {
|
|
|
|
switch {
|
|
|
|
case v.major < v2.major:
|
|
|
|
return true
|
|
|
|
|
|
|
|
case v.minor < v2.minor:
|
|
|
|
return true
|
|
|
|
|
|
|
|
case v.bugfix < v2.bugfix:
|
|
|
|
return true
|
|
|
|
|
|
|
|
case v.autoincrement < v2.autoincrement:
|
|
|
|
return true
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// EQ returns whether v2 is an equal version to v.
|
|
|
|
func (v version) EQ(v2 version) bool {
|
|
|
|
switch {
|
|
|
|
case v.major != v2.major:
|
|
|
|
return false
|
|
|
|
|
|
|
|
case v.minor != v2.minor:
|
|
|
|
return false
|
|
|
|
|
|
|
|
case v.bugfix != v2.bugfix:
|
|
|
|
return false
|
|
|
|
|
|
|
|
case v.autoincrement != v2.autoincrement:
|
|
|
|
return false
|
|
|
|
|
|
|
|
default:
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GT returns whether v is a later version than v2.
|
|
|
|
func (v version) GT(v2 version) bool {
|
|
|
|
switch {
|
|
|
|
case v.major > v2.major:
|
|
|
|
return true
|
|
|
|
|
|
|
|
case v.minor > v2.minor:
|
|
|
|
return true
|
|
|
|
|
|
|
|
case v.bugfix > v2.bugfix:
|
|
|
|
return true
|
|
|
|
|
|
|
|
case v.autoincrement > v2.autoincrement:
|
|
|
|
return true
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Various versions.
|
|
|
|
var (
|
|
|
|
// VersArmory is the latest version used by Armory.
|
|
|
|
VersArmory = version{1, 35, 0, 0}
|
|
|
|
|
|
|
|
// Vers20LastBlocks is the version where wallet files now hold
|
|
|
|
// the 20 most recently seen block hashes.
|
|
|
|
Vers20LastBlocks = version{1, 36, 0, 0}
|
|
|
|
|
2014-02-04 16:37:28 +01:00
|
|
|
// VersUnsetNeedsPrivkeyFlag is the bugfix version where the
|
|
|
|
// createPrivKeyNextUnlock address flag is correctly unset
|
|
|
|
// after creating and encrypting its private key after unlock.
|
|
|
|
// Otherwise, re-creating private keys will occur too early
|
|
|
|
// in the address chain and fail due to encrypting an already
|
|
|
|
// encrypted address. Wallet versions at or before this
|
|
|
|
// version include a special case to allow the duplicate
|
|
|
|
// encrypt.
|
|
|
|
VersUnsetNeedsPrivkeyFlag = version{1, 36, 1, 0}
|
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
// VersCurrent is the current wallet file version.
|
2014-02-04 16:37:28 +01:00
|
|
|
VersCurrent = VersUnsetNeedsPrivkeyFlag
|
2013-12-06 21:37:07 +01:00
|
|
|
)
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
type varEntries struct {
|
|
|
|
wallet *Wallet
|
|
|
|
entries []io.WriterTo
|
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
func (v *varEntries) WriteTo(w io.Writer) (n int64, err error) {
|
2014-04-09 02:18:52 +02:00
|
|
|
ss := v.entries
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
var written int64
|
|
|
|
for _, s := range ss {
|
|
|
|
var err error
|
|
|
|
if written, err = s.WriteTo(w); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
}
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *varEntries) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
// Remove any previous entries.
|
2014-04-09 02:18:52 +02:00
|
|
|
v.entries = nil
|
|
|
|
wts := v.entries
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
// Keep reading entries until an EOF is reached.
|
|
|
|
for {
|
|
|
|
var header entryHeader
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &header); err != nil {
|
|
|
|
// EOF here is not an error.
|
|
|
|
if err == io.EOF {
|
|
|
|
return n + read, nil
|
|
|
|
}
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
var wt io.WriterTo
|
2013-08-21 16:37:30 +02:00
|
|
|
switch header {
|
|
|
|
case addrHeader:
|
|
|
|
var entry addrEntry
|
2014-04-09 02:18:52 +02:00
|
|
|
entry.addr.wallet = v.wallet
|
2013-08-21 16:37:30 +02:00
|
|
|
if read, err = entry.ReadFrom(r); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
wt = &entry
|
2014-03-13 20:13:39 +01:00
|
|
|
case scriptHeader:
|
|
|
|
var entry scriptEntry
|
2014-04-09 02:18:52 +02:00
|
|
|
entry.script.wallet = v.wallet
|
2014-03-13 20:13:39 +01:00
|
|
|
if read, err = entry.ReadFrom(r); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
wt = &entry
|
2013-08-21 16:37:30 +02:00
|
|
|
case addrCommentHeader:
|
|
|
|
var entry addrCommentEntry
|
|
|
|
if read, err = entry.ReadFrom(r); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
wt = &entry
|
|
|
|
case txCommentHeader:
|
|
|
|
var entry txCommentEntry
|
|
|
|
if read, err = entry.ReadFrom(r); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
wt = &entry
|
|
|
|
case deletedHeader:
|
|
|
|
var entry deletedEntry
|
|
|
|
if read, err = entry.ReadFrom(r); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
default:
|
2013-09-09 20:14:57 +02:00
|
|
|
return n, fmt.Errorf("unknown entry header: %d", uint8(header))
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
if wt != nil {
|
|
|
|
wts = append(wts, wt)
|
2014-04-09 02:18:52 +02:00
|
|
|
v.entries = wts
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-27 19:50:51 +02:00
|
|
|
// Wallet uses a custom network parameters type so it can be an io.ReaderFrom.
|
|
|
|
// Due to the way and order that wallets are currently serialized and how
|
|
|
|
// address reading requires the wallet's network parameters, setting and
|
|
|
|
// erroring on unknown wallet networks must happen on the read itself and not
|
|
|
|
// after the fact. This is admitidly a hack, but with a bip32 keystore on the
|
|
|
|
// horizon I'm not too motivated to clean this up.
|
|
|
|
type netParams btcnet.Params
|
|
|
|
|
|
|
|
func (net *netParams) ReadFrom(r io.Reader) (int64, error) {
|
|
|
|
var buf [4]byte
|
|
|
|
uint32Bytes := buf[:4]
|
|
|
|
|
|
|
|
n, err := io.ReadFull(r, uint32Bytes)
|
|
|
|
n64 := int64(n)
|
|
|
|
if err != nil {
|
|
|
|
return n64, err
|
|
|
|
}
|
|
|
|
|
|
|
|
switch btcwire.BitcoinNet(binary.LittleEndian.Uint32(uint32Bytes)) {
|
|
|
|
case btcwire.MainNet:
|
|
|
|
*net = *(*netParams)(&btcnet.MainNetParams)
|
|
|
|
case btcwire.TestNet3:
|
|
|
|
*net = *(*netParams)(&btcnet.TestNet3Params)
|
2014-05-30 22:53:19 +02:00
|
|
|
case btcwire.SimNet:
|
|
|
|
*net = *(*netParams)(&btcnet.SimNetParams)
|
2014-05-27 19:50:51 +02:00
|
|
|
default:
|
|
|
|
return n64, errors.New("unknown network")
|
|
|
|
}
|
|
|
|
return n64, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (net *netParams) WriteTo(w io.Writer) (int64, error) {
|
|
|
|
var buf [4]byte
|
|
|
|
uint32Bytes := buf[:4]
|
|
|
|
|
|
|
|
binary.LittleEndian.PutUint32(uint32Bytes, uint32(net.Net))
|
|
|
|
n, err := w.Write(uint32Bytes)
|
|
|
|
n64 := int64(n)
|
|
|
|
return n64, err
|
|
|
|
}
|
|
|
|
|
2014-03-13 20:13:39 +01:00
|
|
|
// Stringified byte slices for use as map lookup keys.
|
2014-03-11 02:28:40 +01:00
|
|
|
type addressKey string
|
2013-11-04 17:50:32 +01:00
|
|
|
type transactionHashKey string
|
2014-03-13 20:13:39 +01:00
|
|
|
|
2013-11-04 17:50:32 +01:00
|
|
|
type comment []byte
|
|
|
|
|
2014-03-11 02:28:40 +01:00
|
|
|
func getAddressKey(addr btcutil.Address) addressKey {
|
|
|
|
return addressKey(addr.ScriptAddress())
|
|
|
|
}
|
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
// Wallet represents an btcwallet wallet in memory. It implements
|
|
|
|
// the io.ReaderFrom and io.WriterTo interfaces to read from and
|
|
|
|
// write to any type of byte streams, including files.
|
2013-08-21 16:37:30 +02:00
|
|
|
type Wallet struct {
|
2014-02-04 16:37:28 +01:00
|
|
|
vers version
|
2014-05-27 19:50:51 +02:00
|
|
|
net *netParams
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
flags walletFlags
|
|
|
|
createDate int64
|
|
|
|
name [32]byte
|
|
|
|
desc [256]byte
|
|
|
|
highestUsed int64
|
|
|
|
kdfParams kdfParameters
|
|
|
|
keyGenerator btcAddress
|
|
|
|
|
|
|
|
// These are non-standard and fit in the extra 1024 bytes between the
|
|
|
|
// root address and the appended entries.
|
2013-12-06 21:37:07 +01:00
|
|
|
recent recentBlocks
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
|
2014-03-11 02:28:40 +01:00
|
|
|
addrMap map[addressKey]walletAddress
|
|
|
|
addrCommentMap map[addressKey]comment
|
2013-11-04 17:50:32 +01:00
|
|
|
txCommentMap map[transactionHashKey]comment
|
2013-08-21 16:37:30 +02:00
|
|
|
|
2014-01-15 23:29:01 +01:00
|
|
|
// The rest of the fields in this struct are not serialized.
|
2014-01-24 18:31:08 +01:00
|
|
|
passphrase []byte
|
2014-01-17 15:35:52 +01:00
|
|
|
secret []byte
|
2014-03-11 02:28:40 +01:00
|
|
|
chainIdxMap map[int64]btcutil.Address
|
|
|
|
importedAddrs []walletAddress
|
2014-01-17 01:28:34 +01:00
|
|
|
lastChainIdx int64
|
|
|
|
missingKeysStart int64
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
// NewWallet creates and initializes a new Wallet. name's and
|
2013-09-03 06:10:32 +02:00
|
|
|
// desc's binary representation must not exceed 32 and 256 bytes,
|
|
|
|
// respectively. All address private keys are encrypted with passphrase.
|
2014-01-17 01:28:34 +01:00
|
|
|
// The wallet is returned locked.
|
2014-05-23 04:16:50 +02:00
|
|
|
func NewWallet(name, desc string, passphrase []byte, net *btcnet.Params,
|
2014-01-15 23:29:01 +01:00
|
|
|
createdAt *BlockStamp, keypoolSize uint) (*Wallet, error) {
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Check sizes of inputs.
|
|
|
|
if len([]byte(name)) > 32 {
|
2013-09-03 06:10:32 +02:00
|
|
|
return nil, errors.New("name exceeds 32 byte maximum size")
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
if len([]byte(desc)) > 256 {
|
2013-09-03 06:10:32 +02:00
|
|
|
return nil, errors.New("desc exceeds 256 byte maximum size")
|
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Randomly-generate rootkey and chaincode.
|
2013-09-03 06:10:32 +02:00
|
|
|
rootkey, chaincode := make([]byte, 32), make([]byte, 32)
|
2013-11-13 23:25:50 +01:00
|
|
|
if _, err := rand.Read(rootkey); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if _, err := rand.Read(chaincode); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
|
|
|
|
// Compute AES key and encrypt root address.
|
2013-11-15 17:59:37 +01:00
|
|
|
kdfp, err := computeKdfParameters(defaultKdfComputeTime, defaultKdfMaxMem)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-09-03 06:10:32 +02:00
|
|
|
aeskey := Key([]byte(passphrase), kdfp)
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Create and fill wallet.
|
2013-09-03 06:10:32 +02:00
|
|
|
w := &Wallet{
|
2014-02-04 16:37:28 +01:00
|
|
|
vers: VersCurrent,
|
2014-05-27 19:50:51 +02:00
|
|
|
net: (*netParams)(net),
|
2013-09-03 06:10:32 +02:00
|
|
|
flags: walletFlags{
|
|
|
|
useEncryption: true,
|
|
|
|
watchingOnly: false,
|
|
|
|
},
|
2014-04-09 02:18:52 +02:00
|
|
|
createDate: time.Now().Unix(),
|
|
|
|
highestUsed: rootKeyChainIdx,
|
|
|
|
kdfParams: *kdfp,
|
2013-12-06 21:37:07 +01:00
|
|
|
recent: recentBlocks{
|
|
|
|
lastHeight: createdAt.Height,
|
|
|
|
hashes: []*btcwire.ShaHash{
|
|
|
|
&createdAt.Hash,
|
|
|
|
},
|
|
|
|
},
|
2014-03-11 02:28:40 +01:00
|
|
|
addrMap: make(map[addressKey]walletAddress),
|
|
|
|
addrCommentMap: make(map[addressKey]comment),
|
2013-12-06 21:37:07 +01:00
|
|
|
txCommentMap: make(map[transactionHashKey]comment),
|
2014-03-11 02:28:40 +01:00
|
|
|
chainIdxMap: make(map[int64]btcutil.Address),
|
2013-12-06 21:37:07 +01:00
|
|
|
lastChainIdx: rootKeyChainIdx,
|
2014-01-17 15:35:52 +01:00
|
|
|
secret: aeskey,
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
copy(w.name[:], []byte(name))
|
|
|
|
copy(w.desc[:], []byte(desc))
|
2013-09-03 06:10:32 +02:00
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// Create new root address from key and chaincode.
|
|
|
|
root, err := newRootBtcAddress(w, rootkey, nil, chaincode,
|
|
|
|
createdAt)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify root address keypairs.
|
|
|
|
if err := root.verifyKeypairs(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := root.encrypt(aeskey); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
w.keyGenerator = *root
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// Add root address to maps.
|
2014-04-09 02:18:52 +02:00
|
|
|
rootAddr := w.keyGenerator.Address()
|
2014-03-11 02:28:40 +01:00
|
|
|
w.addrMap[getAddressKey(rootAddr)] = &w.keyGenerator
|
|
|
|
w.chainIdxMap[rootKeyChainIdx] = rootAddr
|
2013-09-03 06:10:32 +02:00
|
|
|
|
2013-11-21 17:57:28 +01:00
|
|
|
// Fill keypool.
|
2014-01-17 15:35:52 +01:00
|
|
|
if err := w.extendKeypool(keypoolSize, createdAt); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wallet must be returned locked.
|
|
|
|
if err := w.Lock(); err != nil {
|
2013-11-21 17:57:28 +01:00
|
|
|
return nil, err
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
return w, nil
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
// Name returns the name of a wallet. This name is used as the
|
|
|
|
// account name for btcwallet JSON methods.
|
2013-09-03 23:16:07 +02:00
|
|
|
func (w *Wallet) Name() string {
|
2013-11-22 20:46:23 +01:00
|
|
|
last := len(w.name[:])
|
|
|
|
for i, b := range w.name[:] {
|
|
|
|
if b == 0x00 {
|
|
|
|
last = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return string(w.name[:last])
|
2013-09-03 23:16:07 +02:00
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
// ReadFrom reads data from a io.Reader and saves it to a Wallet,
|
|
|
|
// returning the number of bytes read and any errors encountered.
|
2013-09-03 06:10:32 +02:00
|
|
|
func (w *Wallet) ReadFrom(r io.Reader) (n int64, err error) {
|
2013-08-21 16:37:30 +02:00
|
|
|
var read int64
|
|
|
|
|
2014-05-27 19:50:51 +02:00
|
|
|
w.net = &netParams{}
|
2014-03-11 02:28:40 +01:00
|
|
|
w.addrMap = make(map[addressKey]walletAddress)
|
|
|
|
w.addrCommentMap = make(map[addressKey]comment)
|
|
|
|
w.chainIdxMap = make(map[int64]btcutil.Address)
|
2013-11-04 17:50:32 +01:00
|
|
|
w.txCommentMap = make(map[transactionHashKey]comment)
|
2013-09-03 06:10:32 +02:00
|
|
|
|
|
|
|
var id [8]byte
|
2014-04-09 02:18:52 +02:00
|
|
|
appendedEntries := varEntries{wallet: w}
|
|
|
|
w.keyGenerator.wallet = w
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
// Iterate through each entry needing to be read. If data
|
|
|
|
// implements io.ReaderFrom, use its ReadFrom func. Otherwise,
|
|
|
|
// data is a pointer to a fixed sized value.
|
|
|
|
datas := []interface{}{
|
2013-09-03 06:10:32 +02:00
|
|
|
&id,
|
2014-02-04 16:37:28 +01:00
|
|
|
&w.vers,
|
2014-05-27 19:50:51 +02:00
|
|
|
w.net,
|
2013-09-03 06:10:32 +02:00
|
|
|
&w.flags,
|
2014-02-03 21:11:17 +01:00
|
|
|
make([]byte, 6), // Bytes for Armory unique ID
|
2013-09-03 06:10:32 +02:00
|
|
|
&w.createDate,
|
|
|
|
&w.name,
|
|
|
|
&w.desc,
|
|
|
|
&w.highestUsed,
|
|
|
|
&w.kdfParams,
|
|
|
|
make([]byte, 256),
|
|
|
|
&w.keyGenerator,
|
2013-12-06 21:37:07 +01:00
|
|
|
newUnusedSpace(1024, &w.recent),
|
2013-09-03 06:10:32 +02:00
|
|
|
&appendedEntries,
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
for _, data := range datas {
|
|
|
|
var err error
|
2013-12-06 21:37:07 +01:00
|
|
|
switch d := data.(type) {
|
|
|
|
case ReaderFromVersion:
|
2014-02-04 16:37:28 +01:00
|
|
|
read, err = d.ReadFromVersion(w.vers, r)
|
2013-12-06 21:37:07 +01:00
|
|
|
|
|
|
|
case io.ReaderFrom:
|
|
|
|
read, err = d.ReadFrom(r)
|
|
|
|
|
|
|
|
default:
|
|
|
|
read, err = binaryRead(r, binary.LittleEndian, d)
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
n += read
|
|
|
|
if err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
if id != fileID {
|
2013-09-09 20:14:57 +02:00
|
|
|
return n, errors.New("unknown file ID")
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-11-20 02:18:11 +01:00
|
|
|
// Add root address to address map.
|
2014-04-09 02:18:52 +02:00
|
|
|
rootAddr := w.keyGenerator.Address()
|
2014-03-11 02:28:40 +01:00
|
|
|
w.addrMap[getAddressKey(rootAddr)] = &w.keyGenerator
|
2014-01-06 18:24:29 +01:00
|
|
|
w.chainIdxMap[rootKeyChainIdx] = rootAddr
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
// Fill unserializied fields.
|
2014-04-09 02:18:52 +02:00
|
|
|
wts := appendedEntries.entries
|
2013-08-21 16:37:30 +02:00
|
|
|
for _, wt := range wts {
|
2013-11-20 02:18:11 +01:00
|
|
|
switch e := wt.(type) {
|
2013-08-21 16:37:30 +02:00
|
|
|
case *addrEntry:
|
2014-04-09 02:18:52 +02:00
|
|
|
addr := e.addr.Address()
|
2014-03-11 02:28:40 +01:00
|
|
|
w.addrMap[getAddressKey(addr)] = &e.addr
|
2014-04-09 02:18:52 +02:00
|
|
|
if e.addr.Imported() {
|
2013-11-20 02:18:11 +01:00
|
|
|
w.importedAddrs = append(w.importedAddrs, &e.addr)
|
|
|
|
} else {
|
2014-01-06 18:24:29 +01:00
|
|
|
w.chainIdxMap[e.addr.chainIndex] = addr
|
2013-11-20 02:18:11 +01:00
|
|
|
if w.lastChainIdx < e.addr.chainIndex {
|
|
|
|
w.lastChainIdx = e.addr.chainIndex
|
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-11-20 02:18:11 +01:00
|
|
|
|
2014-03-06 01:36:03 +01:00
|
|
|
// If the private keys have not been created yet, mark the
|
2014-01-17 01:28:34 +01:00
|
|
|
// earliest so all can be created on next wallet unlock.
|
|
|
|
if e.addr.flags.createPrivKeyNextUnlock {
|
2014-01-20 18:56:27 +01:00
|
|
|
switch {
|
|
|
|
case w.missingKeysStart == 0:
|
|
|
|
fallthrough
|
|
|
|
case e.addr.chainIndex < w.missingKeysStart:
|
2014-01-17 01:28:34 +01:00
|
|
|
w.missingKeysStart = e.addr.chainIndex
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-13 20:13:39 +01:00
|
|
|
case *scriptEntry:
|
2014-04-09 02:18:52 +02:00
|
|
|
addr := e.script.Address()
|
2014-03-13 20:13:39 +01:00
|
|
|
w.addrMap[getAddressKey(addr)] = &e.script
|
|
|
|
// script are always imported.
|
|
|
|
w.importedAddrs = append(w.importedAddrs, &e.script)
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
case *addrCommentEntry:
|
2014-05-27 19:50:51 +02:00
|
|
|
addr, err := e.address(w.Net())
|
2014-04-16 23:22:39 +02:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2014-03-11 02:28:40 +01:00
|
|
|
w.addrCommentMap[getAddressKey(addr)] =
|
|
|
|
comment(e.comment)
|
2013-11-20 02:18:11 +01:00
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
case *txCommentEntry:
|
2013-11-20 02:18:11 +01:00
|
|
|
txKey := transactionHashKey(e.txHash[:])
|
|
|
|
w.txCommentMap[txKey] = comment(e.comment)
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
default:
|
2013-09-09 20:14:57 +02:00
|
|
|
return n, errors.New("unknown appended entry")
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// WriteTo serializes a Wallet and writes it to a io.Writer,
|
|
|
|
// returning the number of bytes written and any errors encountered.
|
|
|
|
func (w *Wallet) WriteTo(wtr io.Writer) (n int64, err error) {
|
2013-11-20 02:18:11 +01:00
|
|
|
var wts []io.WriterTo
|
|
|
|
var chainedAddrs = make([]io.WriterTo, len(w.chainIdxMap)-1)
|
|
|
|
var importedAddrs []io.WriterTo
|
2014-03-11 02:28:40 +01:00
|
|
|
for _, wAddr := range w.addrMap {
|
|
|
|
switch btcAddr := wAddr.(type) {
|
|
|
|
case *btcAddress:
|
|
|
|
e := &addrEntry{
|
|
|
|
addr: *btcAddr,
|
|
|
|
}
|
2014-04-09 02:18:52 +02:00
|
|
|
copy(e.pubKeyHash160[:], btcAddr.AddrHash())
|
|
|
|
if btcAddr.Imported() {
|
2014-03-11 02:28:40 +01:00
|
|
|
// No order for imported addresses.
|
|
|
|
importedAddrs = append(importedAddrs, e)
|
|
|
|
} else if btcAddr.chainIndex >= 0 {
|
|
|
|
// Chained addresses are sorted. This is
|
|
|
|
// kind of nice but probably isn't necessary.
|
|
|
|
chainedAddrs[btcAddr.chainIndex] = e
|
|
|
|
}
|
2014-03-13 20:13:39 +01:00
|
|
|
|
|
|
|
case *scriptAddress:
|
|
|
|
e := &scriptEntry{
|
|
|
|
script: *btcAddr,
|
|
|
|
}
|
2014-04-09 02:18:52 +02:00
|
|
|
copy(e.scriptHash160[:], btcAddr.AddrHash())
|
2014-03-13 20:13:39 +01:00
|
|
|
// scripts are always imported
|
|
|
|
importedAddrs = append(importedAddrs, e)
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
}
|
2013-11-20 02:18:11 +01:00
|
|
|
wts = append(chainedAddrs, importedAddrs...)
|
2014-01-06 18:24:29 +01:00
|
|
|
for addr, comment := range w.addrCommentMap {
|
2013-09-03 06:10:32 +02:00
|
|
|
e := &addrCommentEntry{
|
2013-11-04 17:50:32 +01:00
|
|
|
comment: []byte(comment),
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2014-03-13 20:13:39 +01:00
|
|
|
// addresskey is the pubkey hash as a string, we can cast it
|
2014-03-11 02:28:40 +01:00
|
|
|
// safely (though a little distasteful).
|
|
|
|
copy(e.pubKeyHash160[:], []byte(addr))
|
2013-09-03 06:10:32 +02:00
|
|
|
wts = append(wts, e)
|
|
|
|
}
|
|
|
|
for hash, comment := range w.txCommentMap {
|
|
|
|
e := &txCommentEntry{
|
2013-11-04 17:50:32 +01:00
|
|
|
comment: []byte(comment),
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2013-11-04 17:50:32 +01:00
|
|
|
copy(e.txHash[:], []byte(hash))
|
2013-09-03 06:10:32 +02:00
|
|
|
wts = append(wts, e)
|
|
|
|
}
|
2014-04-09 02:18:52 +02:00
|
|
|
appendedEntries := varEntries{wallet: w, entries: wts}
|
2013-09-03 06:10:32 +02:00
|
|
|
|
|
|
|
// Iterate through each entry needing to be written. If data
|
|
|
|
// implements io.WriterTo, use its WriteTo func. Otherwise,
|
|
|
|
// data is a pointer to a fixed size value.
|
|
|
|
datas := []interface{}{
|
|
|
|
&fileID,
|
2013-12-06 21:37:07 +01:00
|
|
|
&VersCurrent,
|
2014-05-27 19:50:51 +02:00
|
|
|
w.net,
|
2013-09-03 06:10:32 +02:00
|
|
|
&w.flags,
|
2014-02-04 16:37:28 +01:00
|
|
|
make([]byte, 6), // Bytes for Armory unique ID
|
2013-09-03 06:10:32 +02:00
|
|
|
&w.createDate,
|
|
|
|
&w.name,
|
|
|
|
&w.desc,
|
|
|
|
&w.highestUsed,
|
|
|
|
&w.kdfParams,
|
|
|
|
make([]byte, 256),
|
|
|
|
&w.keyGenerator,
|
2013-12-06 21:37:07 +01:00
|
|
|
newUnusedSpace(1024, &w.recent),
|
2013-09-03 06:10:32 +02:00
|
|
|
&appendedEntries,
|
|
|
|
}
|
|
|
|
var written int64
|
|
|
|
for _, data := range datas {
|
|
|
|
if s, ok := data.(io.WriterTo); ok {
|
|
|
|
written, err = s.WriteTo(wtr)
|
|
|
|
} else {
|
|
|
|
written, err = binaryWrite(wtr, binary.LittleEndian, data)
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
if err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
// Unlock derives an AES key from passphrase and wallet's KDF
|
2013-11-11 21:30:50 +01:00
|
|
|
// parameters and unlocks the root key of the wallet. If
|
|
|
|
// the unlock was successful, the wallet's secret key is saved,
|
2014-01-17 01:28:34 +01:00
|
|
|
// allowing the decryption of any encrypted private key. Any
|
|
|
|
// addresses created while the wallet was locked without private
|
|
|
|
// keys are created at this time.
|
2013-09-03 06:10:32 +02:00
|
|
|
func (w *Wallet) Unlock(passphrase []byte) error {
|
2014-01-20 18:56:27 +01:00
|
|
|
if w.flags.watchingOnly {
|
|
|
|
return ErrWalletIsWatchingOnly
|
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Derive key from KDF parameters and passphrase.
|
2013-09-03 06:10:32 +02:00
|
|
|
key := Key(passphrase, &w.kdfParams)
|
2013-08-21 16:37:30 +02:00
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Unlock root address with derived key.
|
|
|
|
if _, err := w.keyGenerator.unlock(key); err != nil {
|
2013-08-21 19:25:22 +02:00
|
|
|
return err
|
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
|
2014-01-24 18:31:08 +01:00
|
|
|
// If unlock was successful, save the passphrase and aes key.
|
|
|
|
w.passphrase = passphrase
|
2014-01-17 15:35:52 +01:00
|
|
|
w.secret = key
|
2014-01-17 01:28:34 +01:00
|
|
|
|
2014-01-17 15:35:52 +01:00
|
|
|
return w.createMissingPrivateKeys()
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Lock performs a best try effort to remove and zero all secret keys
|
|
|
|
// associated with the wallet.
|
2013-09-03 06:10:32 +02:00
|
|
|
func (w *Wallet) Lock() (err error) {
|
2014-01-20 18:56:27 +01:00
|
|
|
if w.flags.watchingOnly {
|
|
|
|
return ErrWalletIsWatchingOnly
|
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Remove clear text passphrase from wallet.
|
2014-03-20 17:35:44 +01:00
|
|
|
if w.IsLocked() {
|
2013-11-11 21:30:50 +01:00
|
|
|
err = ErrWalletLocked
|
|
|
|
} else {
|
2014-01-24 18:31:08 +01:00
|
|
|
zero(w.passphrase)
|
|
|
|
w.passphrase = nil
|
2014-01-17 15:35:52 +01:00
|
|
|
zero(w.secret)
|
|
|
|
w.secret = nil
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Remove clear text private keys from all address entries.
|
|
|
|
for _, addr := range w.addrMap {
|
2014-03-17 15:24:14 +01:00
|
|
|
if baddr, ok := addr.(*btcAddress); ok {
|
2014-03-14 01:26:03 +01:00
|
|
|
_ = baddr.lock()
|
|
|
|
}
|
2013-08-21 19:25:22 +02:00
|
|
|
}
|
2013-09-03 06:10:32 +02:00
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-01-24 18:31:08 +01:00
|
|
|
// Passphrase returns the passphrase for an unlocked wallet, or
|
|
|
|
// ErrWalletLocked if the wallet is locked. This should only
|
|
|
|
// be used for creating wallets for new accounts with the same
|
|
|
|
// passphrase as other btcwallet account wallets.
|
|
|
|
//
|
|
|
|
// The returned byte slice points to internal wallet memory and
|
|
|
|
// will be zeroed when the wallet is locked.
|
|
|
|
func (w *Wallet) Passphrase() ([]byte, error) {
|
|
|
|
if len(w.passphrase) != 0 {
|
|
|
|
return w.passphrase, nil
|
|
|
|
}
|
|
|
|
return nil, ErrWalletLocked
|
|
|
|
}
|
|
|
|
|
2014-01-27 15:30:42 +01:00
|
|
|
// ChangePassphrase creates a new AES key from a new passphrase and
|
|
|
|
// re-encrypts all encrypted private keys with the new key.
|
|
|
|
func (w *Wallet) ChangePassphrase(new []byte) error {
|
|
|
|
if w.flags.watchingOnly {
|
|
|
|
return ErrWalletIsWatchingOnly
|
|
|
|
}
|
|
|
|
|
2014-03-20 17:35:44 +01:00
|
|
|
if w.IsLocked() {
|
2014-01-27 15:30:42 +01:00
|
|
|
return ErrWalletLocked
|
|
|
|
}
|
|
|
|
|
|
|
|
oldkey := w.secret
|
|
|
|
newkey := Key(new, &w.kdfParams)
|
|
|
|
|
2014-03-11 02:28:40 +01:00
|
|
|
for _, wa := range w.addrMap {
|
|
|
|
// Only btcAddresses curently have private keys.
|
|
|
|
a, ok := wa.(*btcAddress)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2014-01-27 15:30:42 +01:00
|
|
|
if err := a.changeEncryptionKey(oldkey, newkey); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// zero old secrets.
|
|
|
|
zero(w.passphrase)
|
|
|
|
zero(w.secret)
|
|
|
|
|
|
|
|
// Save new secrets.
|
|
|
|
w.passphrase = new
|
|
|
|
w.secret = newkey
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
func zero(b []byte) {
|
|
|
|
for i := range b {
|
|
|
|
b[i] = 0
|
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// IsLocked returns whether a wallet is unlocked (in which case the
|
|
|
|
// key is saved in memory), or locked.
|
2014-01-17 15:35:52 +01:00
|
|
|
func (w *Wallet) IsLocked() bool {
|
|
|
|
return len(w.secret) != 32
|
2013-08-21 20:46:20 +02:00
|
|
|
}
|
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
// NextChainedAddress attempts to get the next chained address.
|
|
|
|
// If there are addresses available in the keypool, the next address
|
|
|
|
// is used. If not and the wallet is unlocked, the keypool is extended.
|
|
|
|
// If locked, a new address's pubkey is chained off the last pubkey
|
|
|
|
// and added to the wallet.
|
2014-03-11 02:28:40 +01:00
|
|
|
func (w *Wallet) NextChainedAddress(bs *BlockStamp, keypoolSize uint) (btcutil.Address, error) {
|
2014-02-03 16:21:47 +01:00
|
|
|
addr, err := w.nextChainedAddress(bs, keypoolSize)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create and return payment address for address hash.
|
2014-04-09 02:18:52 +02:00
|
|
|
return addr.Address(), nil
|
2014-02-03 16:21:47 +01:00
|
|
|
}
|
|
|
|
|
2014-03-11 02:28:40 +01:00
|
|
|
func (w *Wallet) ChangeAddress(bs *BlockStamp, keypoolSize uint) (btcutil.Address, error) {
|
2014-02-03 16:21:47 +01:00
|
|
|
addr, err := w.nextChainedAddress(bs, keypoolSize)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
addr.flags.change = true
|
2014-01-15 23:29:01 +01:00
|
|
|
|
2014-02-03 16:21:47 +01:00
|
|
|
// Create and return payment address for address hash.
|
2014-04-09 02:18:52 +02:00
|
|
|
return addr.Address(), nil
|
2014-02-03 16:21:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (w *Wallet) nextChainedAddress(bs *BlockStamp, keypoolSize uint) (*btcAddress, error) {
|
2013-11-11 21:30:50 +01:00
|
|
|
// Attempt to get address hash of next chained address.
|
2014-01-17 01:28:34 +01:00
|
|
|
nextAPKH, ok := w.chainIdxMap[w.highestUsed+1]
|
2013-11-20 02:18:11 +01:00
|
|
|
if !ok {
|
2013-11-21 17:57:28 +01:00
|
|
|
// Extending the keypool requires an unlocked wallet.
|
2014-03-20 17:35:44 +01:00
|
|
|
if w.IsLocked() {
|
|
|
|
if err := w.extendLockedWallet(bs); err != nil {
|
2014-01-17 01:28:34 +01:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
2014-03-20 17:35:44 +01:00
|
|
|
// Key is available, extend keypool.
|
|
|
|
if err := w.extendKeypool(keypoolSize, bs); err != nil {
|
2014-01-17 01:28:34 +01:00
|
|
|
return nil, err
|
|
|
|
}
|
2013-11-21 17:57:28 +01:00
|
|
|
}
|
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
// Should be added to the internal maps, try lookup again.
|
|
|
|
nextAPKH, ok = w.chainIdxMap[w.highestUsed+1]
|
2013-11-21 17:57:28 +01:00
|
|
|
if !ok {
|
2014-01-06 18:24:29 +01:00
|
|
|
return nil, errors.New("chain index map inproperly updated")
|
2013-11-21 17:57:28 +01:00
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
|
|
|
|
// Look up address.
|
2014-03-11 02:28:40 +01:00
|
|
|
addr, ok := w.addrMap[getAddressKey(nextAPKH)]
|
2013-11-21 17:57:28 +01:00
|
|
|
if !ok {
|
2014-01-06 18:24:29 +01:00
|
|
|
return nil, errors.New("cannot find generated address")
|
2013-09-09 20:14:57 +02:00
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
|
2014-03-11 02:28:40 +01:00
|
|
|
btcAddr, ok := addr.(*btcAddress)
|
|
|
|
if !ok {
|
|
|
|
return nil, errors.New("found non-pubkey chained address")
|
|
|
|
}
|
|
|
|
|
2013-11-21 17:57:28 +01:00
|
|
|
w.highestUsed++
|
|
|
|
|
2014-03-11 02:28:40 +01:00
|
|
|
return btcAddr, nil
|
2013-12-31 19:11:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// LastChainedAddress returns the most recently requested chained
|
|
|
|
// address from calling NextChainedAddress, or the root address if
|
|
|
|
// no chained addresses have been requested.
|
2014-03-11 02:28:40 +01:00
|
|
|
func (w *Wallet) LastChainedAddress() btcutil.Address {
|
2014-01-06 18:24:29 +01:00
|
|
|
return w.chainIdxMap[w.highestUsed]
|
2013-09-09 20:14:57 +02:00
|
|
|
}
|
|
|
|
|
2013-11-21 17:57:28 +01:00
|
|
|
// extendKeypool grows the keypool by n addresses.
|
2014-01-17 15:35:52 +01:00
|
|
|
func (w *Wallet) extendKeypool(n uint, bs *BlockStamp) error {
|
2013-11-21 17:57:28 +01:00
|
|
|
// Get last chained address. New chained addresses will be
|
|
|
|
// chained off of this address's chaincode and private key.
|
2014-01-06 18:24:29 +01:00
|
|
|
a := w.chainIdxMap[w.lastChainIdx]
|
2014-03-11 02:28:40 +01:00
|
|
|
waddr, ok := w.addrMap[getAddressKey(a)]
|
2013-11-21 17:57:28 +01:00
|
|
|
if !ok {
|
|
|
|
return errors.New("expected last chained address not found")
|
|
|
|
}
|
2014-03-14 01:26:03 +01:00
|
|
|
|
2014-03-20 17:35:44 +01:00
|
|
|
if w.IsLocked() {
|
2014-01-17 16:29:44 +01:00
|
|
|
return ErrWalletLocked
|
|
|
|
}
|
2014-03-11 02:28:40 +01:00
|
|
|
|
|
|
|
addr, ok := waddr.(*btcAddress)
|
|
|
|
if !ok {
|
|
|
|
return errors.New("found non-pubkey chained address")
|
|
|
|
}
|
2014-03-14 01:26:03 +01:00
|
|
|
|
|
|
|
privkey, err := addr.unlock(w.secret)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-11-21 17:57:28 +01:00
|
|
|
cc := addr.chaincode[:]
|
|
|
|
|
|
|
|
// Create n encrypted addresses and add each to the wallet's
|
|
|
|
// bookkeeping maps.
|
|
|
|
for i := uint(0); i < n; i++ {
|
2014-04-09 02:18:52 +02:00
|
|
|
privkey, err = ChainedPrivKey(privkey, addr.pubKeyBytes(), cc)
|
2013-11-21 17:57:28 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-04-09 02:18:52 +02:00
|
|
|
newaddr, err := newBtcAddress(w, privkey, nil, bs, true)
|
2013-11-21 17:57:28 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-12-03 18:33:37 +01:00
|
|
|
if err := newaddr.verifyKeypairs(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-01-17 15:35:52 +01:00
|
|
|
if err = newaddr.encrypt(w.secret); err != nil {
|
2013-11-21 17:57:28 +01:00
|
|
|
return err
|
|
|
|
}
|
2014-04-09 02:18:52 +02:00
|
|
|
a := newaddr.Address()
|
2014-03-11 02:28:40 +01:00
|
|
|
w.addrMap[getAddressKey(a)] = newaddr
|
2013-11-21 17:57:28 +01:00
|
|
|
newaddr.chainIndex = addr.chainIndex + 1
|
2014-01-06 18:24:29 +01:00
|
|
|
w.chainIdxMap[newaddr.chainIndex] = a
|
2013-11-21 17:57:28 +01:00
|
|
|
w.lastChainIdx++
|
2014-03-11 02:28:40 +01:00
|
|
|
|
2013-11-21 17:57:28 +01:00
|
|
|
// armory does this.. but all the chaincodes are equal so why
|
|
|
|
// not use the root's?
|
|
|
|
copy(newaddr.chaincode[:], cc)
|
|
|
|
addr = newaddr
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
// extendLockedWallet creates one new address without a private key
|
|
|
|
// (allowing for extending the address chain from a locked wallet)
|
|
|
|
// chained from the last used chained address and adds the address to
|
|
|
|
// the wallet's internal bookkeeping structures. This function should
|
|
|
|
// not be called unless the keypool has been depleted.
|
|
|
|
func (w *Wallet) extendLockedWallet(bs *BlockStamp) error {
|
|
|
|
a := w.chainIdxMap[w.lastChainIdx]
|
2014-03-11 02:28:40 +01:00
|
|
|
waddr, ok := w.addrMap[getAddressKey(a)]
|
2014-01-17 01:28:34 +01:00
|
|
|
if !ok {
|
|
|
|
return errors.New("expected last chained address not found")
|
|
|
|
}
|
|
|
|
|
2014-03-11 02:28:40 +01:00
|
|
|
addr, ok := waddr.(*btcAddress)
|
|
|
|
if !ok {
|
|
|
|
return errors.New("found non-pubkey chained address")
|
|
|
|
}
|
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
cc := addr.chaincode[:]
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
nextPubkey, err := ChainedPubKey(addr.pubKeyBytes(), cc)
|
2014-01-17 01:28:34 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-04-09 02:18:52 +02:00
|
|
|
newaddr, err := newBtcAddressWithoutPrivkey(w, nextPubkey, nil, bs)
|
2014-01-17 01:28:34 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-04-09 02:18:52 +02:00
|
|
|
a = newaddr.Address()
|
2014-03-11 02:28:40 +01:00
|
|
|
w.addrMap[getAddressKey(a)] = newaddr
|
2014-01-17 01:28:34 +01:00
|
|
|
newaddr.chainIndex = addr.chainIndex + 1
|
|
|
|
w.chainIdxMap[newaddr.chainIndex] = a
|
|
|
|
w.lastChainIdx++
|
|
|
|
copy(newaddr.chaincode[:], cc)
|
|
|
|
|
|
|
|
if w.missingKeysStart == 0 {
|
|
|
|
w.missingKeysStart = newaddr.chainIndex
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-01-17 15:35:52 +01:00
|
|
|
func (w *Wallet) createMissingPrivateKeys() error {
|
2014-01-17 01:28:34 +01:00
|
|
|
idx := w.missingKeysStart
|
|
|
|
if idx == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup previous address.
|
|
|
|
apkh, ok := w.chainIdxMap[idx-1]
|
|
|
|
if !ok {
|
|
|
|
return errors.New("missing previous chained address")
|
|
|
|
}
|
2014-03-11 02:28:40 +01:00
|
|
|
prevWAddr := w.addrMap[getAddressKey(apkh)]
|
2014-03-20 17:35:44 +01:00
|
|
|
if w.IsLocked() {
|
2014-01-17 16:29:44 +01:00
|
|
|
return ErrWalletLocked
|
|
|
|
}
|
2014-03-11 02:28:40 +01:00
|
|
|
|
|
|
|
prevAddr, ok := prevWAddr.(*btcAddress)
|
|
|
|
if !ok {
|
|
|
|
return errors.New("found non-pubkey chained address")
|
|
|
|
}
|
|
|
|
|
2014-01-17 15:35:52 +01:00
|
|
|
prevPrivKey, err := prevAddr.unlock(w.secret)
|
2014-01-17 01:28:34 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := idx; ; i++ {
|
|
|
|
// Get the next private key for the ith address in the address chain.
|
2014-04-09 02:18:52 +02:00
|
|
|
ithPrivKey, err := ChainedPrivKey(prevPrivKey,
|
|
|
|
prevAddr.pubKeyBytes(), prevAddr.chaincode[:])
|
2014-01-17 01:28:34 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the address with the missing private key, set, and
|
|
|
|
// encrypt.
|
|
|
|
apkh, ok := w.chainIdxMap[i]
|
|
|
|
if !ok {
|
|
|
|
// Finished.
|
|
|
|
break
|
|
|
|
}
|
2014-03-11 02:28:40 +01:00
|
|
|
waddr := w.addrMap[getAddressKey(apkh)]
|
|
|
|
addr, ok := waddr.(*btcAddress)
|
|
|
|
if !ok {
|
|
|
|
return errors.New("found non-pubkey chained address")
|
|
|
|
}
|
2014-01-17 01:28:34 +01:00
|
|
|
addr.privKeyCT = ithPrivKey
|
2014-01-17 15:35:52 +01:00
|
|
|
if err := addr.encrypt(w.secret); err != nil {
|
2014-02-04 16:37:28 +01:00
|
|
|
// Avoid bug: see comment for VersUnsetNeedsPrivkeyFlag.
|
|
|
|
if err != ErrAlreadyEncrypted || !w.vers.LT(VersUnsetNeedsPrivkeyFlag) {
|
|
|
|
return err
|
|
|
|
}
|
2014-01-17 01:28:34 +01:00
|
|
|
}
|
2014-02-04 16:37:28 +01:00
|
|
|
addr.flags.createPrivKeyNextUnlock = false
|
2014-01-17 01:28:34 +01:00
|
|
|
|
|
|
|
// Set previous address and private key for next iteration.
|
|
|
|
prevAddr = addr
|
|
|
|
prevPrivKey = ithPrivKey
|
|
|
|
}
|
|
|
|
|
|
|
|
w.missingKeysStart = 0
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// Address returns an WalletAddress structure for an address in a wallet.
|
|
|
|
// This address may be typecast into other interfaces (like PubKeyAddress
|
|
|
|
// and ScriptAddress) if specific information e.g. keys is required.
|
|
|
|
func (w *Wallet) Address(a btcutil.Address) (WalletAddress, error) {
|
2013-11-11 21:30:50 +01:00
|
|
|
// Look up address by address hash.
|
2014-03-13 20:13:39 +01:00
|
|
|
btcaddr, ok := w.addrMap[getAddressKey(a)]
|
2013-11-04 17:50:32 +01:00
|
|
|
if !ok {
|
2014-01-06 18:24:29 +01:00
|
|
|
return nil, ErrAddressNotFound
|
2013-11-04 17:50:32 +01:00
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
return btcaddr, nil
|
2013-11-04 17:50:32 +01:00
|
|
|
}
|
|
|
|
|
2014-05-27 19:50:51 +02:00
|
|
|
// Net returns the bitcoin network parameters for this wallet.
|
|
|
|
func (w *Wallet) Net() *btcnet.Params {
|
|
|
|
return (*btcnet.Params)(w.net)
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2014-03-27 16:32:56 +01:00
|
|
|
// SetSyncStatus sets the sync status for a single wallet address. This
|
|
|
|
// may error if the address is not found in the wallet.
|
|
|
|
//
|
|
|
|
// When marking an address as unsynced, only the type Unsynced matters.
|
|
|
|
// The value is ignored.
|
|
|
|
func (w *Wallet) SetSyncStatus(a btcutil.Address, s SyncStatus) error {
|
2014-03-17 15:24:14 +01:00
|
|
|
wa, ok := w.addrMap[getAddressKey(a)]
|
|
|
|
if !ok {
|
|
|
|
return ErrAddressNotFound
|
|
|
|
}
|
2014-03-27 16:32:56 +01:00
|
|
|
wa.setSyncStatus(s)
|
2014-03-17 15:24:14 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetSyncedWith marks already synced addresses in the wallet to be in
|
|
|
|
// sync with the recently-seen block described by the blockstamp.
|
|
|
|
// Unsynced addresses are unaffected by this method and must be marked
|
|
|
|
// as in sync with MarkAddressSynced or MarkAllSynced to be considered
|
|
|
|
// in sync with bs.
|
2014-05-30 22:17:51 +02:00
|
|
|
//
|
|
|
|
// If bs is nil, the entire wallet is marked unsynced.
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
func (w *Wallet) SetSyncedWith(bs *BlockStamp) {
|
2014-05-30 22:17:51 +02:00
|
|
|
if bs == nil {
|
|
|
|
w.recent.hashes = w.recent.hashes[:0]
|
|
|
|
w.recent.lastHeight = w.keyGenerator.firstBlock
|
|
|
|
w.keyGenerator.setSyncStatus(Unsynced(w.keyGenerator.firstBlock))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
// Check if we're trying to rollback the last seen history.
|
|
|
|
// If so, and this bs is already saved, remove anything
|
|
|
|
// after and return. Otherwire, remove previous hashes.
|
|
|
|
if bs.Height < w.recent.lastHeight {
|
|
|
|
maybeIdx := len(w.recent.hashes) - 1 - int(w.recent.lastHeight-bs.Height)
|
|
|
|
if maybeIdx >= 0 && maybeIdx < len(w.recent.hashes) &&
|
|
|
|
*w.recent.hashes[maybeIdx] == bs.Hash {
|
|
|
|
|
|
|
|
w.recent.lastHeight = bs.Height
|
|
|
|
// subslice out the removed hashes.
|
|
|
|
w.recent.hashes = w.recent.hashes[:maybeIdx]
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.recent.hashes = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if bs.Height != w.recent.lastHeight+1 {
|
|
|
|
w.recent.hashes = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
w.recent.lastHeight = bs.Height
|
2014-04-16 23:22:39 +02:00
|
|
|
|
|
|
|
blockSha := bs.Hash
|
2013-12-06 21:37:07 +01:00
|
|
|
if len(w.recent.hashes) == 20 {
|
|
|
|
// Make room for the most recent hash.
|
|
|
|
copy(w.recent.hashes, w.recent.hashes[1:])
|
|
|
|
|
|
|
|
// Set new block in the last position.
|
2014-04-16 23:22:39 +02:00
|
|
|
w.recent.hashes[19] = &blockSha
|
2013-12-06 21:37:07 +01:00
|
|
|
} else {
|
2014-04-16 23:22:39 +02:00
|
|
|
w.recent.hashes = append(w.recent.hashes, &blockSha)
|
2013-12-06 21:37:07 +01:00
|
|
|
}
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
}
|
|
|
|
|
2014-03-17 15:24:14 +01:00
|
|
|
// SyncHeight returns the sync height of a wallet, or the earliest
|
|
|
|
// block height of any unsynced imported address if there are any
|
|
|
|
// addresses marked as unsynced, whichever is smaller. This is the
|
|
|
|
// height that rescans on an entire wallet should begin at to fully
|
|
|
|
// sync all wallet addresses.
|
2014-05-30 22:17:51 +02:00
|
|
|
func (w *Wallet) SyncHeight() int32 {
|
2014-06-03 19:10:42 +02:00
|
|
|
var height int32
|
|
|
|
switch h, ok := w.keyGenerator.SyncStatus().(PartialSync); {
|
|
|
|
case ok && int32(h) > w.recent.lastHeight:
|
|
|
|
height = int32(h)
|
|
|
|
default:
|
|
|
|
height = w.recent.lastHeight
|
|
|
|
}
|
2014-03-17 15:24:14 +01:00
|
|
|
for _, a := range w.addrMap {
|
2014-03-27 16:32:56 +01:00
|
|
|
var syncHeight int32
|
2014-04-09 02:18:52 +02:00
|
|
|
switch e := a.SyncStatus().(type) {
|
2014-03-27 16:32:56 +01:00
|
|
|
case Unsynced:
|
|
|
|
syncHeight = int32(e)
|
|
|
|
case PartialSync:
|
|
|
|
syncHeight = int32(e)
|
|
|
|
case FullSync:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if syncHeight < height {
|
|
|
|
height = syncHeight
|
2014-03-17 15:24:14 +01:00
|
|
|
|
|
|
|
// Can't go lower than 0.
|
|
|
|
if height == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
}
|
2014-03-17 15:24:14 +01:00
|
|
|
return height
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
}
|
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
// NewIterateRecentBlocks returns an iterator for recently-seen blocks.
|
|
|
|
// The iterator starts at the most recently-added block, and Prev should
|
|
|
|
// be used to access earlier blocks.
|
|
|
|
func (w *Wallet) NewIterateRecentBlocks() RecentBlockIterator {
|
|
|
|
return w.recent.NewIterator()
|
|
|
|
}
|
|
|
|
|
2014-05-22 00:50:47 +02:00
|
|
|
// ImportPrivateKey imports a WIF private key into the keystore. The imported
|
|
|
|
// address is created using either a compressed or uncompressed serialized
|
|
|
|
// public key, depending on the CompressPubKey bool of the WIF.
|
|
|
|
func (w *Wallet) ImportPrivateKey(wif *btcutil.WIF, bs *BlockStamp) (btcutil.Address, error) {
|
2014-01-20 18:56:27 +01:00
|
|
|
if w.flags.watchingOnly {
|
|
|
|
return nil, ErrWalletIsWatchingOnly
|
|
|
|
}
|
|
|
|
|
2014-01-15 20:07:08 +01:00
|
|
|
// First, must check that the key being imported will not result
|
|
|
|
// in a duplicate address.
|
2014-05-22 00:50:47 +02:00
|
|
|
pkh := btcutil.Hash160(wif.SerializePubKey())
|
2014-03-11 02:28:40 +01:00
|
|
|
if _, ok := w.addrMap[addressKey(pkh)]; ok {
|
2014-01-20 18:56:27 +01:00
|
|
|
return nil, ErrDuplicate
|
2014-01-15 20:07:08 +01:00
|
|
|
}
|
|
|
|
|
2014-01-17 15:35:52 +01:00
|
|
|
// The wallet must be unlocked to encrypt the imported private key.
|
2014-03-20 17:35:44 +01:00
|
|
|
if w.IsLocked() {
|
2014-01-20 18:56:27 +01:00
|
|
|
return nil, ErrWalletLocked
|
2013-11-20 02:18:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create new address with this private key.
|
2014-05-22 00:50:47 +02:00
|
|
|
privKey := wif.PrivKey.Serialize()
|
|
|
|
btcaddr, err := newBtcAddress(w, privKey, nil, bs, wif.CompressPubKey)
|
2013-11-20 02:18:11 +01:00
|
|
|
if err != nil {
|
2014-01-20 18:56:27 +01:00
|
|
|
return nil, err
|
2013-11-20 02:18:11 +01:00
|
|
|
}
|
2014-01-06 18:24:29 +01:00
|
|
|
btcaddr.chainIndex = importedKeyChainIdx
|
2013-11-20 02:18:11 +01:00
|
|
|
|
2014-03-17 15:24:14 +01:00
|
|
|
// Mark as unsynced if import height is below currently-synced
|
|
|
|
// height.
|
|
|
|
if len(w.recent.hashes) != 0 && bs.Height < w.recent.lastHeight {
|
|
|
|
btcaddr.flags.unsynced = true
|
|
|
|
}
|
|
|
|
|
2013-11-20 02:18:11 +01:00
|
|
|
// Encrypt imported address with the derived AES key.
|
2014-01-17 15:35:52 +01:00
|
|
|
if err = btcaddr.encrypt(w.secret); err != nil {
|
2014-01-20 18:56:27 +01:00
|
|
|
return nil, err
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-11-20 02:18:11 +01:00
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
addr := btcaddr.Address()
|
2013-11-20 02:18:11 +01:00
|
|
|
// Add address to wallet's bookkeeping structures. Adding to
|
|
|
|
// the map will result in the imported address being serialized
|
|
|
|
// on the next WriteTo call.
|
2014-03-11 02:28:40 +01:00
|
|
|
w.addrMap[getAddressKey(addr)] = btcaddr
|
2014-01-06 18:24:29 +01:00
|
|
|
w.importedAddrs = append(w.importedAddrs, btcaddr)
|
2013-11-20 02:18:11 +01:00
|
|
|
|
2014-01-20 18:56:27 +01:00
|
|
|
// Create and return address.
|
2014-03-11 02:28:40 +01:00
|
|
|
return addr, nil
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2014-03-13 20:13:39 +01:00
|
|
|
// ImportScript creates a new scriptAddress with a user-provided script
|
|
|
|
// and adds it to the wallet.
|
|
|
|
func (w *Wallet) ImportScript(script []byte, bs *BlockStamp) (btcutil.Address, error) {
|
|
|
|
if w.flags.watchingOnly {
|
|
|
|
return nil, ErrWalletIsWatchingOnly
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := w.addrMap[addressKey(btcutil.Hash160(script))]; ok {
|
|
|
|
return nil, ErrDuplicate
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create new address with this private key.
|
2014-04-09 02:18:52 +02:00
|
|
|
scriptaddr, err := newScriptAddress(w, script, bs)
|
2014-03-13 20:13:39 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2014-03-17 15:24:14 +01:00
|
|
|
// Mark as unsynced if import height is below currently-synced
|
|
|
|
// height.
|
|
|
|
if len(w.recent.hashes) != 0 && bs.Height < w.recent.lastHeight {
|
|
|
|
scriptaddr.flags.unsynced = true
|
|
|
|
}
|
|
|
|
|
2014-03-13 20:13:39 +01:00
|
|
|
// Add address to wallet's bookkeeping structures. Adding to
|
|
|
|
// the map will result in the imported address being serialized
|
|
|
|
// on the next WriteTo call.
|
2014-04-09 02:18:52 +02:00
|
|
|
addr := scriptaddr.Address()
|
2014-03-13 20:13:39 +01:00
|
|
|
w.addrMap[getAddressKey(addr)] = scriptaddr
|
|
|
|
w.importedAddrs = append(w.importedAddrs, scriptaddr)
|
|
|
|
|
|
|
|
// Create and return address.
|
|
|
|
return addr, nil
|
|
|
|
}
|
|
|
|
|
2013-12-09 23:03:51 +01:00
|
|
|
// CreateDate returns the Unix time of the wallet creation time. This
|
|
|
|
// is used to compare the wallet creation time against block headers and
|
|
|
|
// set a better minimum block height of where to being rescans.
|
|
|
|
func (w *Wallet) CreateDate() int64 {
|
|
|
|
return w.createDate
|
|
|
|
}
|
|
|
|
|
2014-01-20 18:56:27 +01:00
|
|
|
// ExportWatchingWallet creates and returns a new wallet with the same
|
|
|
|
// addresses in w, but as a watching-only wallet without any private keys.
|
|
|
|
// New addresses created by the watching wallet will match the new addresses
|
|
|
|
// created the original wallet (thanks to public key address chaining), but
|
|
|
|
// will be missing the associated private keys.
|
|
|
|
func (w *Wallet) ExportWatchingWallet() (*Wallet, error) {
|
|
|
|
// Don't continue if wallet is already a watching-only wallet.
|
|
|
|
if w.flags.watchingOnly {
|
|
|
|
return nil, ErrWalletIsWatchingOnly
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy members of w into a new wallet, but mark as watching-only and
|
|
|
|
// do not include any private keys.
|
|
|
|
ww := &Wallet{
|
2014-02-04 16:37:28 +01:00
|
|
|
vers: w.vers,
|
|
|
|
net: w.net,
|
2014-01-20 18:56:27 +01:00
|
|
|
flags: walletFlags{
|
|
|
|
useEncryption: false,
|
|
|
|
watchingOnly: true,
|
|
|
|
},
|
2014-04-09 02:18:52 +02:00
|
|
|
name: w.name,
|
|
|
|
desc: w.desc,
|
|
|
|
createDate: w.createDate,
|
|
|
|
highestUsed: w.highestUsed,
|
2014-01-20 18:56:27 +01:00
|
|
|
recent: recentBlocks{
|
|
|
|
lastHeight: w.recent.lastHeight,
|
|
|
|
},
|
|
|
|
|
2014-03-11 02:28:40 +01:00
|
|
|
addrMap: make(map[addressKey]walletAddress),
|
|
|
|
addrCommentMap: make(map[addressKey]comment),
|
2014-01-20 18:56:27 +01:00
|
|
|
txCommentMap: make(map[transactionHashKey]comment),
|
|
|
|
|
2014-03-14 01:26:03 +01:00
|
|
|
// todo oga make me a list
|
2014-03-11 02:28:40 +01:00
|
|
|
chainIdxMap: make(map[int64]btcutil.Address),
|
2014-01-20 18:56:27 +01:00
|
|
|
lastChainIdx: w.lastChainIdx,
|
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
kgwc := w.keyGenerator.watchingCopy(ww)
|
|
|
|
ww.keyGenerator = *(kgwc.(*btcAddress))
|
2014-01-20 18:56:27 +01:00
|
|
|
if len(w.recent.hashes) != 0 {
|
|
|
|
ww.recent.hashes = make([]*btcwire.ShaHash, 0, len(w.recent.hashes))
|
|
|
|
for _, hash := range w.recent.hashes {
|
2014-04-16 23:22:39 +02:00
|
|
|
hashCpy := *hash
|
2014-01-20 18:56:27 +01:00
|
|
|
ww.recent.hashes = append(ww.recent.hashes, &hashCpy)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for apkh, addr := range w.addrMap {
|
2014-04-09 02:18:52 +02:00
|
|
|
if !addr.Imported() {
|
2014-03-11 02:28:40 +01:00
|
|
|
// Must be a btcAddress if !imported.
|
|
|
|
btcAddr := addr.(*btcAddress)
|
|
|
|
|
|
|
|
ww.chainIdxMap[btcAddr.chainIndex] =
|
2014-04-09 02:18:52 +02:00
|
|
|
addr.Address()
|
2014-01-21 23:05:54 +01:00
|
|
|
}
|
2014-03-11 02:28:40 +01:00
|
|
|
apkhCopy := apkh
|
2014-04-09 02:18:52 +02:00
|
|
|
ww.addrMap[apkhCopy] = addr.watchingCopy(ww)
|
2014-01-20 18:56:27 +01:00
|
|
|
}
|
|
|
|
for apkh, cmt := range w.addrCommentMap {
|
|
|
|
cmtCopy := make(comment, len(cmt))
|
|
|
|
copy(cmtCopy, cmt)
|
|
|
|
ww.addrCommentMap[apkh] = cmtCopy
|
|
|
|
}
|
|
|
|
if len(w.importedAddrs) != 0 {
|
2014-03-11 02:28:40 +01:00
|
|
|
ww.importedAddrs = make([]walletAddress, 0,
|
|
|
|
len(w.importedAddrs))
|
2014-01-20 18:56:27 +01:00
|
|
|
for _, addr := range w.importedAddrs {
|
2014-04-09 02:18:52 +02:00
|
|
|
ww.importedAddrs = append(ww.importedAddrs, addr.watchingCopy(ww))
|
2014-01-20 18:56:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ww, nil
|
|
|
|
}
|
|
|
|
|
2014-03-27 16:32:56 +01:00
|
|
|
// SyncStatus is the interface type for all sync variants.
|
|
|
|
type SyncStatus interface {
|
|
|
|
ImplementsSyncStatus()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unsynced is a type representing an unsynced address. When this is
|
|
|
|
// returned by a wallet method, the value is the recorded first seen
|
|
|
|
// block height.
|
|
|
|
type Unsynced int32
|
|
|
|
|
|
|
|
// ImplementsSyncStatus is implemented to make Unsynced a SyncStatus.
|
|
|
|
func (u Unsynced) ImplementsSyncStatus() {}
|
|
|
|
|
|
|
|
// PartialSync is a type representing a partially synced address (for
|
|
|
|
// example, due to the result of a partially-completed rescan).
|
|
|
|
type PartialSync int32
|
|
|
|
|
|
|
|
// ImplementsSyncStatus is implemented to make PartialSync a SyncStatus.
|
|
|
|
func (p PartialSync) ImplementsSyncStatus() {}
|
|
|
|
|
|
|
|
// FullSync is a type representing an address that is in sync with the
|
|
|
|
// recently seen blocks.
|
|
|
|
type FullSync struct{}
|
|
|
|
|
|
|
|
// ImplementsSyncStatus is implemented to make FullSync a SyncStatus.
|
|
|
|
func (f FullSync) ImplementsSyncStatus() {}
|
|
|
|
|
2014-03-06 01:34:44 +01:00
|
|
|
// AddressInfo is an interface that provides acces to information regarding an
|
|
|
|
// address managed by a wallet. Concrete implementations of this type may
|
|
|
|
// provide further fields to provide information specific to that type of
|
|
|
|
// address.
|
2014-04-09 02:18:52 +02:00
|
|
|
type WalletAddress interface {
|
2014-03-06 01:34:44 +01:00
|
|
|
// Address returns a btcutil.Address for the backing address.
|
|
|
|
Address() btcutil.Address
|
2014-04-09 02:18:52 +02:00
|
|
|
// AddrHash returns the key or script hash related to the address
|
|
|
|
AddrHash() string
|
2014-03-06 01:34:44 +01:00
|
|
|
// FirstBlock returns the first block an address could be in.
|
|
|
|
FirstBlock() int32
|
|
|
|
// Compressed returns true if the backing address was imported instead
|
|
|
|
// of being part of an address chain.
|
|
|
|
Imported() bool
|
|
|
|
// Compressed returns true if the backing address was created for a
|
|
|
|
// change output of a transaction.
|
|
|
|
Change() bool
|
|
|
|
// Compressed returns true if the backing address is compressed.
|
|
|
|
Compressed() bool
|
2014-03-27 16:32:56 +01:00
|
|
|
// SyncStatus returns the current synced state of an address.
|
|
|
|
SyncStatus() SyncStatus
|
2014-03-06 01:34:44 +01:00
|
|
|
}
|
|
|
|
|
2013-12-02 20:56:06 +01:00
|
|
|
// SortedActiveAddresses returns all wallet addresses that have been
|
2013-11-11 21:30:50 +01:00
|
|
|
// requested to be generated. These do not include unused addresses in
|
|
|
|
// the key pool. Use this when ordered addresses are needed. Otherwise,
|
2013-12-02 20:56:06 +01:00
|
|
|
// ActiveAddresses is preferred.
|
2014-04-09 02:18:52 +02:00
|
|
|
func (w *Wallet) SortedActiveAddresses() []WalletAddress {
|
|
|
|
addrs := make([]WalletAddress, 0,
|
2013-11-20 02:18:11 +01:00
|
|
|
w.highestUsed+int64(len(w.importedAddrs))+1)
|
|
|
|
for i := int64(rootKeyChainIdx); i <= w.highestUsed; i++ {
|
2014-01-06 18:24:29 +01:00
|
|
|
a := w.chainIdxMap[i]
|
2014-04-09 02:18:52 +02:00
|
|
|
info, ok := w.addrMap[getAddressKey(a)]
|
|
|
|
if ok {
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
addrs = append(addrs, info)
|
2013-09-09 20:14:57 +02:00
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-11-20 02:18:11 +01:00
|
|
|
for _, addr := range w.importedAddrs {
|
2014-04-09 02:18:52 +02:00
|
|
|
addrs = append(addrs, addr)
|
2013-11-20 02:18:11 +01:00
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
return addrs
|
|
|
|
}
|
|
|
|
|
2013-12-02 20:56:06 +01:00
|
|
|
// ActiveAddresses returns a map between active payment addresses
|
2013-11-11 21:30:50 +01:00
|
|
|
// and their full info. These do not include unused addresses in the
|
2013-12-02 20:56:06 +01:00
|
|
|
// key pool. If addresses must be sorted, use SortedActiveAddresses.
|
2014-04-09 02:18:52 +02:00
|
|
|
func (w *Wallet) ActiveAddresses() map[btcutil.Address]WalletAddress {
|
|
|
|
addrs := make(map[btcutil.Address]WalletAddress)
|
2013-11-20 02:18:11 +01:00
|
|
|
for i := int64(rootKeyChainIdx); i <= w.highestUsed; i++ {
|
2014-01-06 18:24:29 +01:00
|
|
|
a := w.chainIdxMap[i]
|
2014-04-09 02:18:52 +02:00
|
|
|
addr := w.addrMap[getAddressKey(a)]
|
|
|
|
addrs[addr.Address()] = addr
|
2013-11-06 17:23:30 +01:00
|
|
|
}
|
2013-11-20 02:18:11 +01:00
|
|
|
for _, addr := range w.importedAddrs {
|
2014-04-09 02:18:52 +02:00
|
|
|
addrs[addr.Address()] = addr
|
2013-11-20 02:18:11 +01:00
|
|
|
}
|
2013-11-06 17:23:30 +01:00
|
|
|
return addrs
|
|
|
|
}
|
|
|
|
|
2014-01-21 17:19:08 +01:00
|
|
|
// ExtendActiveAddresses gets or creates the next n addresses from the
|
|
|
|
// address chain and marks each as active. This is used to recover
|
|
|
|
// deterministic (not imported) addresses from a wallet backup, or to
|
|
|
|
// keep the active addresses in sync between an encrypted wallet with
|
|
|
|
// private keys and an exported watching wallet without.
|
|
|
|
//
|
|
|
|
// A slice is returned with the btcutil.Address of each new address.
|
|
|
|
// The blockchain must be rescanned for these addresses.
|
|
|
|
func (w *Wallet) ExtendActiveAddresses(n int, keypoolSize uint) ([]btcutil.Address, error) {
|
|
|
|
if n <= 0 {
|
|
|
|
return nil, errors.New("n is not positive")
|
|
|
|
}
|
|
|
|
|
2014-03-11 02:28:40 +01:00
|
|
|
last := w.addrMap[getAddressKey(w.chainIdxMap[w.highestUsed])]
|
2014-04-09 02:18:52 +02:00
|
|
|
bs := &BlockStamp{Height: last.FirstBlock()}
|
2014-01-21 17:19:08 +01:00
|
|
|
|
|
|
|
addrs := make([]btcutil.Address, 0, n)
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
addr, err := w.NextChainedAddress(bs, keypoolSize)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
addrs = append(addrs, addr)
|
|
|
|
}
|
|
|
|
return addrs, nil
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
type walletFlags struct {
|
|
|
|
useEncryption bool
|
|
|
|
watchingOnly bool
|
|
|
|
}
|
|
|
|
|
2014-01-20 18:56:27 +01:00
|
|
|
func (wf *walletFlags) ReadFrom(r io.Reader) (int64, error) {
|
|
|
|
var b [8]byte
|
2014-04-16 23:22:39 +02:00
|
|
|
n, err := io.ReadFull(r, b[:])
|
2014-01-20 18:56:27 +01:00
|
|
|
if err != nil {
|
|
|
|
return int64(n), err
|
|
|
|
}
|
|
|
|
|
|
|
|
wf.useEncryption = b[0]&(1<<0) != 0
|
|
|
|
wf.watchingOnly = b[0]&(1<<1) != 0
|
|
|
|
|
|
|
|
return int64(n), nil
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2014-01-20 18:56:27 +01:00
|
|
|
func (wf *walletFlags) WriteTo(w io.Writer) (int64, error) {
|
|
|
|
var b [8]byte
|
2013-09-03 06:10:32 +02:00
|
|
|
if wf.useEncryption {
|
2014-01-20 18:56:27 +01:00
|
|
|
b[0] |= 1 << 0
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
if wf.watchingOnly {
|
2014-01-20 18:56:27 +01:00
|
|
|
b[0] |= 1 << 1
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2014-01-20 18:56:27 +01:00
|
|
|
n, err := w.Write(b[:])
|
|
|
|
return int64(n), err
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
type addrFlags struct {
|
2013-11-04 17:50:32 +01:00
|
|
|
hasPrivKey bool
|
|
|
|
hasPubKey bool
|
|
|
|
encrypted bool
|
2014-01-20 18:56:27 +01:00
|
|
|
createPrivKeyNextUnlock bool
|
2013-11-04 17:50:32 +01:00
|
|
|
compressed bool
|
2014-02-03 16:21:47 +01:00
|
|
|
change bool
|
2014-03-17 15:24:14 +01:00
|
|
|
unsynced bool
|
2014-03-27 16:32:56 +01:00
|
|
|
partialSync bool
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
func (af *addrFlags) ReadFrom(r io.Reader) (int64, error) {
|
2013-09-03 06:10:32 +02:00
|
|
|
var b [8]byte
|
2014-04-16 23:22:39 +02:00
|
|
|
n, err := io.ReadFull(r, b[:])
|
2013-09-03 06:10:32 +02:00
|
|
|
if err != nil {
|
2014-01-17 01:28:34 +01:00
|
|
|
return int64(n), err
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
af.hasPrivKey = b[0]&(1<<0) != 0
|
|
|
|
af.hasPubKey = b[0]&(1<<1) != 0
|
|
|
|
af.encrypted = b[0]&(1<<2) != 0
|
|
|
|
af.createPrivKeyNextUnlock = b[0]&(1<<3) != 0
|
|
|
|
af.compressed = b[0]&(1<<4) != 0
|
2014-02-03 16:21:47 +01:00
|
|
|
af.change = b[0]&(1<<5) != 0
|
2014-03-17 15:24:14 +01:00
|
|
|
af.unsynced = b[0]&(1<<6) != 0
|
2014-03-27 16:32:56 +01:00
|
|
|
af.partialSync = b[0]&(1<<7) != 0
|
2014-01-17 01:28:34 +01:00
|
|
|
|
|
|
|
// Currently (at least until watching-only wallets are implemented)
|
|
|
|
// btcwallet shall refuse to open any unencrypted addresses. This
|
|
|
|
// check only makes sense if there is a private key to encrypt, which
|
|
|
|
// there may not be if the keypool was extended from just the last
|
|
|
|
// public key and no private keys were written.
|
|
|
|
if af.hasPrivKey && !af.encrypted {
|
|
|
|
return int64(n), errors.New("private key is unencrypted")
|
2013-11-04 17:50:32 +01:00
|
|
|
}
|
2013-09-03 06:10:32 +02:00
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
return int64(n), nil
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
func (af *addrFlags) WriteTo(w io.Writer) (int64, error) {
|
2013-09-03 06:10:32 +02:00
|
|
|
var b [8]byte
|
|
|
|
if af.hasPrivKey {
|
|
|
|
b[0] |= 1 << 0
|
|
|
|
}
|
|
|
|
if af.hasPubKey {
|
|
|
|
b[0] |= 1 << 1
|
|
|
|
}
|
2014-01-17 01:28:34 +01:00
|
|
|
if af.hasPrivKey && !af.encrypted {
|
2013-09-03 06:10:32 +02:00
|
|
|
// We only support encrypted privkeys.
|
2014-01-17 01:28:34 +01:00
|
|
|
return 0, errors.New("address must be encrypted")
|
|
|
|
}
|
|
|
|
if af.encrypted {
|
|
|
|
b[0] |= 1 << 2
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2013-11-04 17:50:32 +01:00
|
|
|
if af.createPrivKeyNextUnlock {
|
|
|
|
b[0] |= 1 << 3
|
|
|
|
}
|
|
|
|
if af.compressed {
|
|
|
|
b[0] |= 1 << 4
|
|
|
|
}
|
2014-02-03 16:21:47 +01:00
|
|
|
if af.change {
|
|
|
|
b[0] |= 1 << 5
|
|
|
|
}
|
2014-03-17 15:24:14 +01:00
|
|
|
if af.unsynced {
|
|
|
|
b[0] |= 1 << 6
|
|
|
|
}
|
2014-03-27 16:32:56 +01:00
|
|
|
if af.partialSync {
|
|
|
|
b[0] |= 1 << 7
|
|
|
|
}
|
2013-09-03 06:10:32 +02:00
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
n, err := w.Write(b[:])
|
|
|
|
return int64(n), err
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
// recentBlocks holds at most the last 20 seen block hashes as well as
|
|
|
|
// the block height of the most recently seen block.
|
|
|
|
type recentBlocks struct {
|
|
|
|
hashes []*btcwire.ShaHash
|
|
|
|
lastHeight int32
|
|
|
|
}
|
|
|
|
|
|
|
|
type blockIterator struct {
|
|
|
|
height int32
|
|
|
|
index int
|
|
|
|
rb *recentBlocks
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rb *recentBlocks) ReadFromVersion(v version, r io.Reader) (int64, error) {
|
|
|
|
if !v.LT(Vers20LastBlocks) {
|
|
|
|
// Use current version.
|
|
|
|
return rb.ReadFrom(r)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Old file versions only saved the most recently seen
|
|
|
|
// block height and hash, not the last 20.
|
|
|
|
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
// Read height.
|
2014-04-16 23:22:39 +02:00
|
|
|
var heightBytes [4]byte // 4 bytes for a int32
|
|
|
|
n, err := io.ReadFull(r, heightBytes[:])
|
|
|
|
read += int64(n)
|
2013-12-06 21:37:07 +01:00
|
|
|
if err != nil {
|
2014-04-16 23:22:39 +02:00
|
|
|
return read, err
|
2013-12-06 21:37:07 +01:00
|
|
|
}
|
2014-04-16 23:22:39 +02:00
|
|
|
rb.lastHeight = int32(binary.LittleEndian.Uint32(heightBytes[:]))
|
2013-12-06 21:37:07 +01:00
|
|
|
|
|
|
|
// If height is -1, the last synced block is unknown, so don't try
|
|
|
|
// to read a block hash.
|
|
|
|
if rb.lastHeight == -1 {
|
|
|
|
rb.hashes = nil
|
|
|
|
return read, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read block hash.
|
2014-04-16 23:22:39 +02:00
|
|
|
var syncedBlockHash btcwire.ShaHash
|
|
|
|
n, err = io.ReadFull(r, syncedBlockHash[:])
|
|
|
|
read += int64(n)
|
2013-12-06 21:37:07 +01:00
|
|
|
if err != nil {
|
2014-04-16 23:22:39 +02:00
|
|
|
return read, err
|
2013-12-06 21:37:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
rb.hashes = []*btcwire.ShaHash{
|
|
|
|
&syncedBlockHash,
|
|
|
|
}
|
|
|
|
|
|
|
|
return read, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rb *recentBlocks) ReadFrom(r io.Reader) (int64, error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
// Read number of saved blocks. This should not exceed 20.
|
2014-04-16 23:22:39 +02:00
|
|
|
var nBlockBytes [4]byte // 4 bytes for a uint32
|
|
|
|
n, err := io.ReadFull(r, nBlockBytes[:])
|
|
|
|
read += int64(n)
|
2013-12-06 21:37:07 +01:00
|
|
|
if err != nil {
|
2014-04-16 23:22:39 +02:00
|
|
|
return read, err
|
2013-12-06 21:37:07 +01:00
|
|
|
}
|
2014-04-16 23:22:39 +02:00
|
|
|
nBlocks := binary.LittleEndian.Uint32(nBlockBytes[:])
|
2013-12-06 21:37:07 +01:00
|
|
|
if nBlocks > 20 {
|
|
|
|
return read, errors.New("number of last seen blocks exceeds maximum of 20")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read most recently seen block height.
|
2014-04-16 23:22:39 +02:00
|
|
|
var heightBytes [4]byte // 4 bytes for a int32
|
|
|
|
n, err = io.ReadFull(r, heightBytes[:])
|
|
|
|
read += int64(n)
|
2013-12-06 21:37:07 +01:00
|
|
|
if err != nil {
|
2014-04-16 23:22:39 +02:00
|
|
|
return read, err
|
2013-12-06 21:37:07 +01:00
|
|
|
}
|
2014-04-16 23:22:39 +02:00
|
|
|
height := int32(binary.LittleEndian.Uint32(heightBytes[:]))
|
2013-12-06 21:37:07 +01:00
|
|
|
|
|
|
|
// height should not be -1 (or any other negative number)
|
|
|
|
// since at this point we should be reading in at least one
|
|
|
|
// known block.
|
|
|
|
if height < 0 {
|
|
|
|
return read, errors.New("expected a block but specified height is negative")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set last seen height.
|
|
|
|
rb.lastHeight = height
|
|
|
|
|
|
|
|
// Read nBlocks block hashes. Hashes are expected to be in
|
|
|
|
// order of oldest to newest, but there's no way to check
|
|
|
|
// that here.
|
|
|
|
rb.hashes = make([]*btcwire.ShaHash, 0, nBlocks)
|
|
|
|
for i := uint32(0); i < nBlocks; i++ {
|
2014-04-16 23:22:39 +02:00
|
|
|
var blockSha btcwire.ShaHash
|
|
|
|
n, err := io.ReadFull(r, blockSha[:])
|
|
|
|
read += int64(n)
|
2013-12-06 21:37:07 +01:00
|
|
|
if err != nil {
|
2014-04-16 23:22:39 +02:00
|
|
|
return read, err
|
2013-12-06 21:37:07 +01:00
|
|
|
}
|
2014-04-16 23:22:39 +02:00
|
|
|
rb.hashes = append(rb.hashes, &blockSha)
|
2013-12-06 21:37:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return read, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rb *recentBlocks) WriteTo(w io.Writer) (int64, error) {
|
|
|
|
var written int64
|
|
|
|
|
|
|
|
// Write number of saved blocks. This should not exceed 20.
|
|
|
|
nBlocks := uint32(len(rb.hashes))
|
|
|
|
if nBlocks > 20 {
|
|
|
|
return written, errors.New("number of last seen blocks exceeds maximum of 20")
|
|
|
|
}
|
|
|
|
if nBlocks != 0 && rb.lastHeight < 0 {
|
|
|
|
return written, errors.New("number of block hashes is positive, but height is negative")
|
|
|
|
}
|
2014-04-16 23:22:39 +02:00
|
|
|
var nBlockBytes [4]byte // 4 bytes for a uint32
|
|
|
|
binary.LittleEndian.PutUint32(nBlockBytes[:], nBlocks)
|
|
|
|
n, err := w.Write(nBlockBytes[:])
|
|
|
|
written += int64(n)
|
2013-12-06 21:37:07 +01:00
|
|
|
if err != nil {
|
2014-04-16 23:22:39 +02:00
|
|
|
return written, err
|
2013-12-06 21:37:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write most recently seen block height.
|
2014-04-16 23:22:39 +02:00
|
|
|
var heightBytes [4]byte // 4 bytes for a int32
|
|
|
|
binary.LittleEndian.PutUint32(heightBytes[:], uint32(rb.lastHeight))
|
|
|
|
n, err = w.Write(heightBytes[:])
|
|
|
|
written += int64(n)
|
2013-12-06 21:37:07 +01:00
|
|
|
if err != nil {
|
2014-04-16 23:22:39 +02:00
|
|
|
return written, err
|
2013-12-06 21:37:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write block hashes.
|
|
|
|
for _, hash := range rb.hashes {
|
|
|
|
n, err := w.Write(hash[:])
|
2014-04-16 23:22:39 +02:00
|
|
|
written += int64(n)
|
2013-12-06 21:37:07 +01:00
|
|
|
if err != nil {
|
2014-04-16 23:22:39 +02:00
|
|
|
return written, err
|
2013-12-06 21:37:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return written, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// RecentBlockIterator is a type to iterate through recent-seen
|
|
|
|
// blocks.
|
|
|
|
type RecentBlockIterator interface {
|
|
|
|
Next() bool
|
|
|
|
Prev() bool
|
|
|
|
BlockStamp() *BlockStamp
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rb *recentBlocks) NewIterator() RecentBlockIterator {
|
2014-05-30 22:17:51 +02:00
|
|
|
if rb.lastHeight == -1 || len(rb.hashes) == 0 {
|
2013-12-06 21:37:07 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &blockIterator{
|
|
|
|
height: rb.lastHeight,
|
|
|
|
index: len(rb.hashes) - 1,
|
|
|
|
rb: rb,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *blockIterator) Next() bool {
|
|
|
|
if it.index+1 >= len(it.rb.hashes) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
it.index += 1
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *blockIterator) Prev() bool {
|
|
|
|
if it.index-1 < 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
it.index -= 1
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *blockIterator) BlockStamp() *BlockStamp {
|
|
|
|
return &BlockStamp{
|
|
|
|
Height: it.rb.lastHeight - int32(len(it.rb.hashes)-1-it.index),
|
|
|
|
Hash: *it.rb.hashes[it.index],
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// unusedSpace is a wrapper type to read or write one or more types
|
|
|
|
// that btcwallet fits into an unused space left by Armory's wallet file
|
|
|
|
// format.
|
|
|
|
type unusedSpace struct {
|
|
|
|
nBytes int // number of unused bytes that armory left.
|
|
|
|
rfvs []ReaderFromVersion
|
|
|
|
}
|
|
|
|
|
|
|
|
func newUnusedSpace(nBytes int, rfvs ...ReaderFromVersion) *unusedSpace {
|
|
|
|
return &unusedSpace{
|
|
|
|
nBytes: nBytes,
|
|
|
|
rfvs: rfvs,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *unusedSpace) ReadFromVersion(v version, r io.Reader) (int64, error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
for _, rfv := range u.rfvs {
|
|
|
|
n, err := rfv.ReadFromVersion(v, r)
|
|
|
|
if err != nil {
|
|
|
|
return read + n, err
|
|
|
|
}
|
|
|
|
read += n
|
|
|
|
if read > int64(u.nBytes) {
|
|
|
|
return read, errors.New("read too much from armory's unused space")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read rest of actually unused bytes.
|
|
|
|
unused := make([]byte, u.nBytes-int(read))
|
2014-04-16 23:22:39 +02:00
|
|
|
n, err := io.ReadFull(r, unused)
|
2013-12-06 21:37:07 +01:00
|
|
|
return read + int64(n), err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *unusedSpace) WriteTo(w io.Writer) (int64, error) {
|
|
|
|
var written int64
|
|
|
|
|
|
|
|
for _, wt := range u.rfvs {
|
|
|
|
n, err := wt.WriteTo(w)
|
|
|
|
if err != nil {
|
|
|
|
return written + n, err
|
|
|
|
}
|
|
|
|
written += n
|
|
|
|
if written > int64(u.nBytes) {
|
|
|
|
return written, errors.New("wrote too much to armory's unused space")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write rest of actually unused bytes.
|
|
|
|
unused := make([]byte, u.nBytes-int(written))
|
|
|
|
n, err := w.Write(unused)
|
|
|
|
return written + int64(n), err
|
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// walletAddress is the internal interface used to abstracted around the
|
|
|
|
// different address types.
|
2014-03-11 02:28:40 +01:00
|
|
|
type walletAddress interface {
|
|
|
|
io.ReaderFrom
|
|
|
|
io.WriterTo
|
2014-04-09 02:18:52 +02:00
|
|
|
WalletAddress
|
|
|
|
watchingCopy(*Wallet) walletAddress
|
2014-03-27 16:32:56 +01:00
|
|
|
setSyncStatus(SyncStatus)
|
2014-03-11 02:28:40 +01:00
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
type btcAddress struct {
|
2014-04-09 02:18:52 +02:00
|
|
|
wallet *Wallet
|
|
|
|
address btcutil.Address
|
2014-03-27 16:32:56 +01:00
|
|
|
flags addrFlags
|
|
|
|
chaincode [32]byte
|
|
|
|
chainIndex int64
|
|
|
|
chainDepth int64 // unused
|
|
|
|
initVector [16]byte
|
|
|
|
privKey [32]byte
|
2014-04-09 02:18:52 +02:00
|
|
|
pubKey *btcec.PublicKey
|
2014-03-27 16:32:56 +01:00
|
|
|
firstSeen int64
|
|
|
|
lastSeen int64
|
|
|
|
firstBlock int32
|
|
|
|
partialSyncHeight int32 // This is reappropriated from armory's `lastBlock` field.
|
|
|
|
privKeyCT []byte // non-nil if unlocked.
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-11-20 02:18:11 +01:00
|
|
|
const (
|
|
|
|
// Root address has a chain index of -1. Each subsequent
|
|
|
|
// chained address increments the index.
|
|
|
|
rootKeyChainIdx = -1
|
|
|
|
|
|
|
|
// Imported private keys are not part of the chain, and have a
|
|
|
|
// special index of -2.
|
|
|
|
importedKeyChainIdx = -2
|
|
|
|
)
|
|
|
|
|
2013-11-04 17:50:32 +01:00
|
|
|
const (
|
|
|
|
pubkeyCompressed byte = 0x2
|
|
|
|
pubkeyUncompressed byte = 0x4
|
|
|
|
)
|
|
|
|
|
|
|
|
type publicKey []byte
|
|
|
|
|
|
|
|
func (k *publicKey) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
var format byte
|
|
|
|
read, err = binaryRead(r, binary.LittleEndian, &format)
|
|
|
|
if err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
// Remove the oddness from the format
|
|
|
|
noodd := format
|
|
|
|
noodd &= ^byte(0x1)
|
|
|
|
|
|
|
|
var s []byte
|
|
|
|
switch noodd {
|
|
|
|
case pubkeyUncompressed:
|
|
|
|
// Read the remaining 64 bytes.
|
|
|
|
s = make([]byte, 64)
|
|
|
|
|
|
|
|
case pubkeyCompressed:
|
|
|
|
// Read the remaining 32 bytes.
|
|
|
|
s = make([]byte, 32)
|
|
|
|
|
|
|
|
default:
|
|
|
|
return n, errors.New("unrecognized pubkey format")
|
|
|
|
}
|
|
|
|
|
|
|
|
read, err = binaryRead(r, binary.LittleEndian, &s)
|
|
|
|
if err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
*k = append([]byte{format}, s...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *publicKey) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
return binaryWrite(w, binary.LittleEndian, []byte(*k))
|
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// AddressPubKeyInfo implements AddressInfo and additionally provides the
|
|
|
|
// pubkey for a pubkey-based address.
|
|
|
|
type PubKeyAddress interface {
|
|
|
|
WalletAddress
|
|
|
|
// PubKey returns the public key associated with the address.
|
|
|
|
PubKey() *btcec.PublicKey
|
|
|
|
// ExportPubKey returns the public key associated with the address
|
|
|
|
// serialised as a hex encoded string.
|
|
|
|
ExportPubKey() string
|
|
|
|
// PrivKey returns the private key for the address.
|
|
|
|
// It can fail if the wallet is watching only, the wallet is locked,
|
|
|
|
// or the address doesn't have any keys.
|
|
|
|
PrivKey() (*ecdsa.PrivateKey, error)
|
2014-05-22 00:50:47 +02:00
|
|
|
// ExportPrivKey exports the WIF private key.
|
|
|
|
ExportPrivKey() (*btcutil.WIF, error)
|
2014-04-09 02:18:52 +02:00
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// newBtcAddress initializes and returns a new address. privkey must
|
|
|
|
// be 32 bytes. iv must be 16 bytes, or nil (in which case it is
|
|
|
|
// randomly generated).
|
2014-04-09 02:18:52 +02:00
|
|
|
func newBtcAddress(wallet *Wallet, privkey, iv []byte, bs *BlockStamp, compressed bool) (addr *btcAddress, err error) {
|
2013-09-03 06:10:32 +02:00
|
|
|
if len(privkey) != 32 {
|
2013-09-09 20:14:57 +02:00
|
|
|
return nil, errors.New("private key is not 32 bytes")
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
addr, err = newBtcAddressWithoutPrivkey(wallet,
|
|
|
|
pubkeyFromPrivkey(privkey, compressed), iv, bs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2014-04-09 02:18:52 +02:00
|
|
|
|
|
|
|
addr.flags.createPrivKeyNextUnlock = false
|
|
|
|
addr.flags.hasPrivKey = true
|
2014-01-17 01:28:34 +01:00
|
|
|
addr.privKeyCT = privkey
|
2013-09-03 06:10:32 +02:00
|
|
|
|
|
|
|
return addr, nil
|
|
|
|
}
|
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
// newBtcAddressWithoutPrivkey initializes and returns a new address with an
|
|
|
|
// unknown (at the time) private key that must be found later. pubkey must be
|
|
|
|
// 33 or 65 bytes, and iv must be 16 bytes or empty (in which case it is
|
|
|
|
// randomly generated).
|
2014-04-09 02:18:52 +02:00
|
|
|
func newBtcAddressWithoutPrivkey(wallet *Wallet, pubkey, iv []byte, bs *BlockStamp) (addr *btcAddress, err error) {
|
2014-01-17 01:28:34 +01:00
|
|
|
var compressed bool
|
2014-05-20 15:12:43 +02:00
|
|
|
switch n := len(pubkey); n {
|
|
|
|
case btcec.PubKeyBytesLenCompressed:
|
2014-01-17 01:28:34 +01:00
|
|
|
compressed = true
|
2014-05-20 15:12:43 +02:00
|
|
|
case btcec.PubKeyBytesLenUncompressed:
|
2014-01-17 01:28:34 +01:00
|
|
|
compressed = false
|
|
|
|
default:
|
2014-05-20 15:12:43 +02:00
|
|
|
return nil, fmt.Errorf("invalid pubkey length %d", n)
|
2014-01-17 01:28:34 +01:00
|
|
|
}
|
|
|
|
if len(iv) == 0 {
|
|
|
|
iv = make([]byte, 16)
|
|
|
|
if _, err := rand.Read(iv); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else if len(iv) != 16 {
|
|
|
|
return nil, errors.New("init vector must be nil or 16 bytes large")
|
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
pk, err := btcec.ParsePubKey(pubkey, btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
address, err := btcutil.NewAddressPubKeyHash(btcutil.Hash160(pubkey),
|
|
|
|
wallet.Net())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
addr = &btcAddress{
|
|
|
|
flags: addrFlags{
|
|
|
|
hasPrivKey: false,
|
|
|
|
hasPubKey: true,
|
2014-02-03 16:21:47 +01:00
|
|
|
encrypted: false,
|
2014-01-17 01:28:34 +01:00
|
|
|
createPrivKeyNextUnlock: true,
|
|
|
|
compressed: compressed,
|
2014-02-03 16:21:47 +01:00
|
|
|
change: false,
|
2014-03-17 15:24:14 +01:00
|
|
|
unsynced: false,
|
2014-01-17 01:28:34 +01:00
|
|
|
},
|
2014-04-09 02:18:52 +02:00
|
|
|
wallet: wallet,
|
|
|
|
address: address,
|
2014-01-17 01:28:34 +01:00
|
|
|
firstSeen: time.Now().Unix(),
|
|
|
|
firstBlock: bs.Height,
|
2014-04-09 02:18:52 +02:00
|
|
|
pubKey: pk,
|
2014-01-17 01:28:34 +01:00
|
|
|
}
|
|
|
|
copy(addr.initVector[:], iv)
|
|
|
|
|
|
|
|
return addr, nil
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// newRootBtcAddress generates a new address, also setting the
|
|
|
|
// chaincode and chain index to represent this address as a root
|
|
|
|
// address.
|
2014-04-09 02:18:52 +02:00
|
|
|
func newRootBtcAddress(wallet *Wallet, privKey, iv, chaincode []byte,
|
2013-11-20 02:18:11 +01:00
|
|
|
bs *BlockStamp) (addr *btcAddress, err error) {
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
if len(chaincode) != 32 {
|
2013-09-09 20:14:57 +02:00
|
|
|
return nil, errors.New("chaincode is not 32 bytes")
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-11-20 02:18:11 +01:00
|
|
|
// Create new btcAddress with provided inputs. This will
|
|
|
|
// always use a compressed pubkey.
|
2014-04-09 02:18:52 +02:00
|
|
|
addr, err = newBtcAddress(wallet, privKey, iv, bs, true)
|
2013-09-03 06:10:32 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
copy(addr.chaincode[:], chaincode)
|
2013-11-20 02:18:11 +01:00
|
|
|
addr.chainIndex = rootKeyChainIdx
|
2013-09-03 06:10:32 +02:00
|
|
|
|
|
|
|
return addr, err
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-12-03 18:33:37 +01:00
|
|
|
// verifyKeypairs creates a signature using the parsed private key and
|
|
|
|
// verifies the signature with the parsed public key. If either of these
|
|
|
|
// steps fail, the keypair generation failed and any funds sent to this
|
|
|
|
// address will be unspendable. This step requires an unencrypted or
|
|
|
|
// unlocked btcAddress.
|
|
|
|
func (a *btcAddress) verifyKeypairs() error {
|
2014-01-17 01:28:34 +01:00
|
|
|
if len(a.privKeyCT) != 32 {
|
2013-12-03 18:33:37 +01:00
|
|
|
return errors.New("private key unavailable")
|
|
|
|
}
|
|
|
|
|
2013-12-03 18:45:27 +01:00
|
|
|
privkey := &ecdsa.PrivateKey{
|
2014-04-09 02:18:52 +02:00
|
|
|
PublicKey: *a.pubKey.ToECDSA(),
|
2014-01-17 01:28:34 +01:00
|
|
|
D: new(big.Int).SetBytes(a.privKeyCT),
|
2013-12-03 18:45:27 +01:00
|
|
|
}
|
2013-12-03 18:33:37 +01:00
|
|
|
|
|
|
|
data := "String to sign."
|
|
|
|
r, s, err := ecdsa.Sign(rand.Reader, privkey, []byte(data))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ok := ecdsa.Verify(&privkey.PublicKey, []byte(data), r, s)
|
|
|
|
if !ok {
|
|
|
|
return errors.New("ecdsa verification failed")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
// ReadFrom reads an encrypted address from an io.Reader.
|
2013-09-09 20:14:57 +02:00
|
|
|
func (a *btcAddress) ReadFrom(r io.Reader) (n int64, err error) {
|
2013-08-21 16:37:30 +02:00
|
|
|
var read int64
|
|
|
|
|
|
|
|
// Checksums
|
|
|
|
var chkPubKeyHash uint32
|
|
|
|
var chkChaincode uint32
|
|
|
|
var chkInitVector uint32
|
|
|
|
var chkPrivKey uint32
|
|
|
|
var chkPubKey uint32
|
2014-04-09 02:18:52 +02:00
|
|
|
var pubKeyHash [ripemd160.Size]byte
|
|
|
|
var pubKey publicKey
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
// Read serialized wallet into addr fields and checksums.
|
|
|
|
datas := []interface{}{
|
2014-04-09 02:18:52 +02:00
|
|
|
&pubKeyHash,
|
2013-08-21 16:37:30 +02:00
|
|
|
&chkPubKeyHash,
|
2013-09-03 06:10:32 +02:00
|
|
|
make([]byte, 4), // version
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.flags,
|
|
|
|
&a.chaincode,
|
2013-08-21 16:37:30 +02:00
|
|
|
&chkChaincode,
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.chainIndex,
|
|
|
|
&a.chainDepth,
|
|
|
|
&a.initVector,
|
2013-08-21 16:37:30 +02:00
|
|
|
&chkInitVector,
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.privKey,
|
2013-08-21 16:37:30 +02:00
|
|
|
&chkPrivKey,
|
2014-04-09 02:18:52 +02:00
|
|
|
&pubKey,
|
2013-08-21 16:37:30 +02:00
|
|
|
&chkPubKey,
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.firstSeen,
|
|
|
|
&a.lastSeen,
|
|
|
|
&a.firstBlock,
|
2014-03-27 16:32:56 +01:00
|
|
|
&a.partialSyncHeight,
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
for _, data := range datas {
|
2013-09-03 06:10:32 +02:00
|
|
|
if rf, ok := data.(io.ReaderFrom); ok {
|
|
|
|
read, err = rf.ReadFrom(r)
|
|
|
|
} else {
|
|
|
|
read, err = binaryRead(r, binary.LittleEndian, data)
|
|
|
|
}
|
|
|
|
if err != nil {
|
2013-08-21 16:37:30 +02:00
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify checksums, correct errors where possible.
|
|
|
|
checks := []struct {
|
|
|
|
data []byte
|
|
|
|
chk uint32
|
|
|
|
}{
|
2014-04-09 02:18:52 +02:00
|
|
|
{pubKeyHash[:], chkPubKeyHash},
|
2013-09-09 20:14:57 +02:00
|
|
|
{a.chaincode[:], chkChaincode},
|
|
|
|
{a.initVector[:], chkInitVector},
|
|
|
|
{a.privKey[:], chkPrivKey},
|
2014-04-09 02:18:52 +02:00
|
|
|
{pubKey, chkPubKey},
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-09-09 20:14:57 +02:00
|
|
|
for i := range checks {
|
2013-08-21 16:37:30 +02:00
|
|
|
if err = verifyAndFix(checks[i].data, checks[i].chk); err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
if !a.flags.hasPubKey {
|
|
|
|
return n, errors.New("read in an address without a public key")
|
|
|
|
}
|
|
|
|
pk, err := btcec.ParsePubKey(pubKey, btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
a.pubKey = pk
|
|
|
|
|
|
|
|
addr, err := btcutil.NewAddressPubKeyHash(pubKeyHash[:], a.wallet.Net())
|
|
|
|
if err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
a.address = addr
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
func (a *btcAddress) WriteTo(w io.Writer) (n int64, err error) {
|
2013-08-21 16:37:30 +02:00
|
|
|
var written int64
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
var pubKey publicKey = a.pubKeyBytes()
|
|
|
|
|
|
|
|
hash := a.address.ScriptAddress()
|
2013-08-21 16:37:30 +02:00
|
|
|
datas := []interface{}{
|
2014-04-09 02:18:52 +02:00
|
|
|
&hash,
|
|
|
|
walletHash(hash),
|
2013-09-03 06:10:32 +02:00
|
|
|
make([]byte, 4), //version
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.flags,
|
|
|
|
&a.chaincode,
|
|
|
|
walletHash(a.chaincode[:]),
|
|
|
|
&a.chainIndex,
|
|
|
|
&a.chainDepth,
|
|
|
|
&a.initVector,
|
|
|
|
walletHash(a.initVector[:]),
|
|
|
|
&a.privKey,
|
|
|
|
walletHash(a.privKey[:]),
|
2014-04-09 02:18:52 +02:00
|
|
|
&pubKey,
|
|
|
|
walletHash(pubKey),
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.firstSeen,
|
|
|
|
&a.lastSeen,
|
|
|
|
&a.firstBlock,
|
2014-03-27 16:32:56 +01:00
|
|
|
&a.partialSyncHeight,
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
for _, data := range datas {
|
2013-09-03 06:10:32 +02:00
|
|
|
if wt, ok := data.(io.WriterTo); ok {
|
|
|
|
written, err = wt.WriteTo(w)
|
|
|
|
} else {
|
|
|
|
written, err = binaryWrite(w, binary.LittleEndian, data)
|
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
if err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
}
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// encrypt attempts to encrypt an address's clear text private key,
|
|
|
|
// failing if the address is already encrypted or if the private key is
|
|
|
|
// not 32 bytes. If successful, the encryption flag is set.
|
|
|
|
func (a *btcAddress) encrypt(key []byte) error {
|
|
|
|
if a.flags.encrypted {
|
2014-02-04 16:37:28 +01:00
|
|
|
return ErrAlreadyEncrypted
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2014-01-17 01:28:34 +01:00
|
|
|
if len(a.privKeyCT) != 32 {
|
2013-09-09 20:14:57 +02:00
|
|
|
return errors.New("invalid clear text private key")
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
aesBlockEncrypter, err := aes.NewCipher(key)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
aesEncrypter := cipher.NewCFBEncrypter(aesBlockEncrypter, a.initVector[:])
|
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
aesEncrypter.XORKeyStream(a.privKey[:], a.privKeyCT)
|
2013-09-03 06:10:32 +02:00
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
a.flags.hasPrivKey = true
|
2013-09-03 06:10:32 +02:00
|
|
|
a.flags.encrypted = true
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// lock removes the reference this address holds to its clear text
|
|
|
|
// private key. This function fails if the address is not encrypted.
|
|
|
|
func (a *btcAddress) lock() error {
|
|
|
|
if !a.flags.encrypted {
|
2013-09-09 20:14:57 +02:00
|
|
|
return errors.New("unable to lock unencrypted address")
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
zero(a.privKeyCT)
|
|
|
|
a.privKeyCT = nil
|
2013-09-03 06:10:32 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-01-17 01:28:34 +01:00
|
|
|
// unlock decrypts and stores a pointer to an address's private key,
|
2013-09-03 06:10:32 +02:00
|
|
|
// failing if the address is not encrypted, or the provided key is
|
2013-11-11 21:30:50 +01:00
|
|
|
// incorrect. The returned clear text private key will always be a copy
|
|
|
|
// that may be safely used by the caller without worrying about it being
|
|
|
|
// zeroed during an address lock.
|
|
|
|
func (a *btcAddress) unlock(key []byte) (privKeyCT []byte, err error) {
|
2013-09-03 06:10:32 +02:00
|
|
|
if !a.flags.encrypted {
|
2013-11-11 21:30:50 +01:00
|
|
|
return nil, errors.New("unable to unlock unencrypted address")
|
|
|
|
}
|
|
|
|
|
|
|
|
// If secret is already saved, return a copy without performing a full
|
|
|
|
// unlock.
|
2014-01-17 01:28:34 +01:00
|
|
|
if len(a.privKeyCT) == 32 {
|
2013-11-11 21:30:50 +01:00
|
|
|
privKeyCT := make([]byte, 32)
|
2014-01-17 01:28:34 +01:00
|
|
|
copy(privKeyCT, a.privKeyCT)
|
2013-11-11 21:30:50 +01:00
|
|
|
return privKeyCT, nil
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Decrypt private key with AES key.
|
2013-09-03 06:10:32 +02:00
|
|
|
aesBlockDecrypter, err := aes.NewCipher(key)
|
2013-08-21 16:37:30 +02:00
|
|
|
if err != nil {
|
2013-11-11 21:30:50 +01:00
|
|
|
return nil, err
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-09-03 06:10:32 +02:00
|
|
|
aesDecrypter := cipher.NewCFBDecrypter(aesBlockDecrypter, a.initVector[:])
|
2013-11-11 21:30:50 +01:00
|
|
|
privkey := make([]byte, 32)
|
|
|
|
aesDecrypter.XORKeyStream(privkey, a.privKey[:])
|
2013-08-21 16:37:30 +02:00
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
x, y := btcec.S256().ScalarBaseMult(privkey)
|
2014-04-09 02:18:52 +02:00
|
|
|
if x.Cmp(a.pubKey.X) != 0 || y.Cmp(a.pubKey.Y) != 0 {
|
2014-01-27 15:30:42 +01:00
|
|
|
return nil, ErrWrongPassphrase
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
privkeyCopy := make([]byte, 32)
|
|
|
|
copy(privkeyCopy, privkey)
|
2014-01-17 01:28:34 +01:00
|
|
|
a.privKeyCT = privkey
|
2013-11-11 21:30:50 +01:00
|
|
|
return privkeyCopy, nil
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2014-01-27 15:30:42 +01:00
|
|
|
// changeEncryptionKey re-encrypts the private keys for an address
|
|
|
|
// with a new AES encryption key. oldkey must be the old AES encryption key
|
|
|
|
// and is used to decrypt the private key.
|
2013-09-09 20:14:57 +02:00
|
|
|
func (a *btcAddress) changeEncryptionKey(oldkey, newkey []byte) error {
|
2014-01-27 15:30:42 +01:00
|
|
|
// Address must have a private key and be encrypted to continue.
|
|
|
|
if !a.flags.hasPrivKey {
|
|
|
|
return errors.New("no private key")
|
|
|
|
}
|
|
|
|
if !a.flags.encrypted {
|
|
|
|
return errors.New("address is not encrypted")
|
|
|
|
}
|
|
|
|
|
|
|
|
privKeyCT, err := a.unlock(oldkey)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
aesBlockEncrypter, err := aes.NewCipher(newkey)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
newIV := make([]byte, len(a.initVector))
|
|
|
|
if _, err := rand.Read(newIV); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
copy(a.initVector[:], newIV)
|
|
|
|
aesEncrypter := cipher.NewCFBEncrypter(aesBlockEncrypter, a.initVector[:])
|
|
|
|
aesEncrypter.XORKeyStream(a.privKey[:], privKeyCT)
|
|
|
|
|
|
|
|
return nil
|
2013-09-09 20:14:57 +02:00
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// Address returns the pub key address, implementing AddressInfo.
|
|
|
|
func (a *btcAddress) Address() btcutil.Address {
|
|
|
|
return a.address
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// AddrHash returns the pub key hash, implementing WalletAddress.
|
|
|
|
func (a *btcAddress) AddrHash() string {
|
|
|
|
return string(a.address.ScriptAddress())
|
|
|
|
}
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// FirstBlock returns the first block the address is seen in, implementing
|
|
|
|
// AddressInfo.
|
|
|
|
func (a *btcAddress) FirstBlock() int32 {
|
|
|
|
return a.firstBlock
|
|
|
|
}
|
|
|
|
|
|
|
|
// Imported returns the pub if the address was imported, or a chained address,
|
|
|
|
// implementing AddressInfo.
|
|
|
|
func (a *btcAddress) Imported() bool {
|
|
|
|
return a.chainIndex == importedKeyChainIdx
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddrHash returns true if the address was created as a change address,
|
|
|
|
// implementing AddressInfo.
|
|
|
|
func (a *btcAddress) Change() bool {
|
|
|
|
return a.flags.change
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddrHash returns true if the address backing key is compressed,
|
|
|
|
// implementing AddressInfo.
|
|
|
|
func (a *btcAddress) Compressed() bool {
|
|
|
|
return a.flags.compressed
|
|
|
|
}
|
|
|
|
|
|
|
|
// SyncStatus returns a SyncStatus type for how the address is currently
|
|
|
|
// synced. For an Unsynced type, the value is the recorded first seen
|
|
|
|
// block height of the address.
|
|
|
|
func (a *btcAddress) SyncStatus() SyncStatus {
|
|
|
|
switch {
|
|
|
|
case a.flags.unsynced && !a.flags.partialSync:
|
|
|
|
return Unsynced(a.firstBlock)
|
|
|
|
case a.flags.unsynced && a.flags.partialSync:
|
|
|
|
return PartialSync(a.partialSyncHeight)
|
|
|
|
default:
|
|
|
|
return FullSync{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// PubKey returns the hex encoded pubkey for the address. Implementing
|
|
|
|
// PubKeyAddress.
|
|
|
|
func (a *btcAddress) PubKey() *btcec.PublicKey {
|
|
|
|
return a.pubKey
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a *btcAddress) pubKeyBytes() []byte {
|
|
|
|
if a.Compressed() {
|
|
|
|
return a.pubKey.SerializeCompressed()
|
|
|
|
} else {
|
|
|
|
return a.pubKey.SerializeUncompressed()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ExportPubKey returns the public key associated with the address serialised as
|
|
|
|
// a hex encoded string. Implemnts PubKeyAddress
|
|
|
|
func (a *btcAddress) ExportPubKey() string {
|
|
|
|
return hex.EncodeToString(a.pubKeyBytes())
|
|
|
|
}
|
|
|
|
|
2014-04-11 20:52:50 +02:00
|
|
|
// PrivKey implements PubKeyAddress by returning the private key, or an error
|
2014-04-09 02:18:52 +02:00
|
|
|
// if th wallet is locked, watching only or the private key is missing.
|
|
|
|
func (a *btcAddress) PrivKey() (*ecdsa.PrivateKey, error) {
|
|
|
|
if a.wallet.flags.watchingOnly {
|
|
|
|
return nil, ErrWalletIsWatchingOnly
|
|
|
|
}
|
|
|
|
|
|
|
|
if !a.flags.hasPrivKey {
|
|
|
|
return nil, errors.New("no private key for address")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wallet must be unlocked to decrypt the private key.
|
|
|
|
if a.wallet.IsLocked() {
|
|
|
|
return nil, ErrWalletLocked
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unlock address with wallet secret. unlock returns a copy of
|
|
|
|
// the clear text private key, and may be used safely even
|
|
|
|
// during an address lock.
|
|
|
|
privKeyCT, err := a.unlock(a.wallet.secret)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &ecdsa.PrivateKey{
|
|
|
|
PublicKey: *a.pubKey.ToECDSA(),
|
|
|
|
D: new(big.Int).SetBytes(privKeyCT),
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2014-05-22 00:50:47 +02:00
|
|
|
// ExportPrivKey exports the private key as a WIF for encoding as a string
|
|
|
|
// in the Wallet Import Formt.
|
|
|
|
func (a *btcAddress) ExportPrivKey() (*btcutil.WIF, error) {
|
2014-04-09 02:18:52 +02:00
|
|
|
pk, err := a.PrivKey()
|
|
|
|
if err != nil {
|
2014-05-22 00:50:47 +02:00
|
|
|
return nil, err
|
2014-04-09 02:18:52 +02:00
|
|
|
}
|
2014-05-27 19:50:51 +02:00
|
|
|
// NewWIF only errors if the network is nil. In this case, panic,
|
|
|
|
// as our program's assumptions are so broken that this needs to be
|
|
|
|
// caught immediately, and a stack trace here is more useful than
|
|
|
|
// elsewhere.
|
|
|
|
wif, err := btcutil.NewWIF((*btcec.PrivateKey)(pk), a.wallet.Net(), a.Compressed())
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return wif, nil
|
2014-04-09 02:18:52 +02:00
|
|
|
}
|
|
|
|
|
2014-01-20 18:56:27 +01:00
|
|
|
// watchingCopy creates a copy of an address without a private key.
|
|
|
|
// This is used to fill a watching a wallet with addresses from a
|
|
|
|
// normal wallet.
|
2014-04-09 02:18:52 +02:00
|
|
|
func (a *btcAddress) watchingCopy(wallet *Wallet) walletAddress {
|
2014-01-20 18:56:27 +01:00
|
|
|
return &btcAddress{
|
2014-04-09 02:18:52 +02:00
|
|
|
wallet: wallet,
|
|
|
|
address: a.address,
|
2014-01-20 18:56:27 +01:00
|
|
|
flags: addrFlags{
|
|
|
|
hasPrivKey: false,
|
2014-04-09 02:18:52 +02:00
|
|
|
hasPubKey: true,
|
2014-01-20 18:56:27 +01:00
|
|
|
encrypted: false,
|
|
|
|
createPrivKeyNextUnlock: false,
|
|
|
|
compressed: a.flags.compressed,
|
2014-02-03 16:21:47 +01:00
|
|
|
change: a.flags.change,
|
2014-03-17 15:24:14 +01:00
|
|
|
unsynced: a.flags.unsynced,
|
2014-01-20 18:56:27 +01:00
|
|
|
},
|
2014-03-27 16:32:56 +01:00
|
|
|
chaincode: a.chaincode,
|
|
|
|
chainIndex: a.chainIndex,
|
|
|
|
chainDepth: a.chainDepth,
|
|
|
|
pubKey: a.pubKey,
|
|
|
|
firstSeen: a.firstSeen,
|
|
|
|
lastSeen: a.lastSeen,
|
|
|
|
firstBlock: a.firstBlock,
|
|
|
|
partialSyncHeight: a.partialSyncHeight,
|
2014-01-20 18:56:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-27 16:32:56 +01:00
|
|
|
// setSyncStatus sets the address flags and possibly the partial sync height
|
|
|
|
// depending on the type of s.
|
|
|
|
func (a *btcAddress) setSyncStatus(s SyncStatus) {
|
|
|
|
switch e := s.(type) {
|
|
|
|
case Unsynced:
|
|
|
|
a.flags.unsynced = true
|
|
|
|
a.flags.partialSync = false
|
|
|
|
a.partialSyncHeight = 0
|
|
|
|
|
|
|
|
case PartialSync:
|
|
|
|
a.flags.unsynced = true
|
|
|
|
a.flags.partialSync = true
|
|
|
|
a.partialSyncHeight = int32(e)
|
|
|
|
|
|
|
|
case FullSync:
|
|
|
|
a.flags.unsynced = false
|
|
|
|
a.flags.partialSync = false
|
|
|
|
a.partialSyncHeight = 0
|
|
|
|
}
|
2014-03-17 15:24:14 +01:00
|
|
|
}
|
|
|
|
|
2014-03-13 20:13:39 +01:00
|
|
|
// note that there is no encrypted bit here since if we had a script encrypted
|
|
|
|
// and then used it on the blockchain this provides a simple known plaintext in
|
|
|
|
// the wallet file. It was determined that the script in a p2sh transaction is
|
|
|
|
// not a secret and any sane situation would also require a signature (which
|
|
|
|
// does have a secret).
|
|
|
|
type scriptFlags struct {
|
2014-03-27 16:32:56 +01:00
|
|
|
hasScript bool
|
|
|
|
change bool
|
|
|
|
unsynced bool
|
|
|
|
partialSync bool
|
2014-03-13 20:13:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ReadFrom implements the io.ReaderFrom interface by reading from r into sf.
|
|
|
|
func (sf *scriptFlags) ReadFrom(r io.Reader) (int64, error) {
|
|
|
|
var b [8]byte
|
2014-04-16 23:22:39 +02:00
|
|
|
n, err := io.ReadFull(r, b[:])
|
2014-03-13 20:13:39 +01:00
|
|
|
if err != nil {
|
|
|
|
return int64(n), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// We match bits from addrFlags for similar fields. hence hasScript uses
|
|
|
|
// the same bit as hasPubKey and the change bit is the same for both.
|
|
|
|
sf.hasScript = b[0]&(1<<1) != 0
|
|
|
|
sf.change = b[0]&(1<<5) != 0
|
2014-03-17 15:24:14 +01:00
|
|
|
sf.unsynced = b[0]&(1<<6) != 0
|
2014-03-27 16:32:56 +01:00
|
|
|
sf.partialSync = b[0]&(1<<7) != 0
|
2014-03-13 20:13:39 +01:00
|
|
|
|
|
|
|
return int64(n), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTo implements the io.WriteTo interface by writing sf into w.
|
|
|
|
func (sf *scriptFlags) WriteTo(w io.Writer) (int64, error) {
|
|
|
|
var b [8]byte
|
|
|
|
if sf.hasScript {
|
|
|
|
b[0] |= 1 << 1
|
|
|
|
}
|
|
|
|
if sf.change {
|
|
|
|
b[0] |= 1 << 5
|
|
|
|
}
|
2014-03-17 15:24:14 +01:00
|
|
|
if sf.unsynced {
|
|
|
|
b[0] |= 1 << 6
|
|
|
|
}
|
2014-03-27 16:32:56 +01:00
|
|
|
if sf.partialSync {
|
|
|
|
b[0] |= 1 << 7
|
|
|
|
}
|
2014-03-13 20:13:39 +01:00
|
|
|
|
|
|
|
n, err := w.Write(b[:])
|
|
|
|
return int64(n), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// p2SHScript represents the variable length script entry in a wallet.
|
|
|
|
type p2SHScript []byte
|
|
|
|
|
|
|
|
// ReadFrom implements the ReaderFrom interface by reading the P2SH script from
|
|
|
|
// r in the format <4 bytes little endian length><script bytes>
|
|
|
|
func (a *p2SHScript) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
//read length
|
2014-04-16 23:22:39 +02:00
|
|
|
var lenBytes [4]byte
|
2014-03-13 20:13:39 +01:00
|
|
|
|
2014-04-16 23:22:39 +02:00
|
|
|
read, err := io.ReadFull(r, lenBytes[:])
|
2014-03-13 20:13:39 +01:00
|
|
|
n += int64(read)
|
|
|
|
if err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
2014-04-16 23:22:39 +02:00
|
|
|
length := binary.LittleEndian.Uint32(lenBytes[:])
|
2014-03-13 20:13:39 +01:00
|
|
|
|
|
|
|
script := make([]byte, length)
|
|
|
|
|
2014-04-16 23:22:39 +02:00
|
|
|
read, err = io.ReadFull(r, script)
|
2014-03-13 20:13:39 +01:00
|
|
|
n += int64(read)
|
|
|
|
if err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
*a = script
|
|
|
|
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTo implements the WriterTo interface by writing the P2SH script to w in
|
|
|
|
// the format <4 bytes little endian length><script bytes>
|
|
|
|
func (a *p2SHScript) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
// Prepare and write 32-bit little-endian length header
|
2014-04-16 23:22:39 +02:00
|
|
|
var lenBytes [4]byte
|
|
|
|
binary.LittleEndian.PutUint32(lenBytes[:], uint32(len(*a)))
|
2014-03-13 20:13:39 +01:00
|
|
|
|
2014-04-16 23:22:39 +02:00
|
|
|
written, err := w.Write(lenBytes[:])
|
2014-03-13 20:13:39 +01:00
|
|
|
n += int64(written)
|
|
|
|
if err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now write the bytes themselves.
|
|
|
|
written, err = w.Write(*a)
|
|
|
|
|
|
|
|
return n + int64(written), err
|
|
|
|
}
|
|
|
|
|
|
|
|
type scriptAddress struct {
|
2014-04-09 02:18:52 +02:00
|
|
|
wallet *Wallet
|
|
|
|
address btcutil.Address
|
|
|
|
class btcscript.ScriptClass
|
|
|
|
addresses []btcutil.Address
|
|
|
|
reqSigs int
|
2014-03-27 16:32:56 +01:00
|
|
|
flags scriptFlags
|
|
|
|
script p2SHScript // variable length
|
|
|
|
firstSeen int64
|
|
|
|
lastSeen int64
|
|
|
|
firstBlock int32
|
|
|
|
partialSyncHeight int32
|
2014-03-13 20:13:39 +01:00
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// ScriptAddress is an interface representing a Pay-to-Script-Hash style of
|
|
|
|
// bitcoind address.
|
|
|
|
type ScriptAddress interface {
|
|
|
|
WalletAddress
|
|
|
|
// Returns the script associated with the address.
|
|
|
|
Script() []byte
|
|
|
|
// Returns the class of the script associated with the address.
|
|
|
|
ScriptClass() btcscript.ScriptClass
|
|
|
|
// Returns the addresses that are required to sign transactions from the
|
|
|
|
// script address.
|
|
|
|
Addresses() []btcutil.Address
|
|
|
|
// Returns the number of signatures required by the script address.
|
|
|
|
RequiredSigs() int
|
|
|
|
}
|
|
|
|
|
2014-03-13 20:13:39 +01:00
|
|
|
// newScriptAddress initializes and returns a new P2SH address.
|
|
|
|
// iv must be 16 bytes, or nil (in which case it is randomly generated).
|
2014-04-09 02:18:52 +02:00
|
|
|
func newScriptAddress(wallet *Wallet, script []byte, bs *BlockStamp) (addr *scriptAddress, err error) {
|
|
|
|
class, addresses, reqSigs, err :=
|
|
|
|
btcscript.ExtractPkScriptAddrs(script, wallet.Net())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
scriptHash := btcutil.Hash160(script)
|
|
|
|
|
|
|
|
address, err := btcutil.NewAddressScriptHashFromHash(scriptHash,
|
|
|
|
wallet.Net())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2014-03-13 20:13:39 +01:00
|
|
|
addr = &scriptAddress{
|
2014-04-09 02:18:52 +02:00
|
|
|
wallet: wallet,
|
|
|
|
address: address,
|
|
|
|
addresses: addresses,
|
|
|
|
class: class,
|
|
|
|
reqSigs: reqSigs,
|
2014-03-13 20:13:39 +01:00
|
|
|
flags: scriptFlags{
|
|
|
|
hasScript: true,
|
|
|
|
change: false,
|
|
|
|
},
|
|
|
|
script: script,
|
|
|
|
firstSeen: time.Now().Unix(),
|
|
|
|
firstBlock: bs.Height,
|
|
|
|
}
|
|
|
|
|
|
|
|
return addr, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadFrom reads an script address from an io.Reader.
|
|
|
|
func (a *scriptAddress) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
// Checksums
|
|
|
|
var chkScriptHash uint32
|
|
|
|
var chkScript uint32
|
2014-04-09 02:18:52 +02:00
|
|
|
var scriptHash [ripemd160.Size]byte
|
2014-03-13 20:13:39 +01:00
|
|
|
|
|
|
|
// Read serialized wallet into addr fields and checksums.
|
|
|
|
datas := []interface{}{
|
2014-04-09 02:18:52 +02:00
|
|
|
&scriptHash,
|
2014-03-13 20:13:39 +01:00
|
|
|
&chkScriptHash,
|
|
|
|
make([]byte, 4), // version
|
|
|
|
&a.flags,
|
|
|
|
&a.script,
|
|
|
|
&chkScript,
|
|
|
|
&a.firstSeen,
|
|
|
|
&a.lastSeen,
|
|
|
|
&a.firstBlock,
|
2014-03-27 16:32:56 +01:00
|
|
|
&a.partialSyncHeight,
|
2014-03-13 20:13:39 +01:00
|
|
|
}
|
|
|
|
for _, data := range datas {
|
|
|
|
if rf, ok := data.(io.ReaderFrom); ok {
|
|
|
|
read, err = rf.ReadFrom(r)
|
|
|
|
} else {
|
|
|
|
read, err = binaryRead(r, binary.LittleEndian, data)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify checksums, correct errors where possible.
|
|
|
|
checks := []struct {
|
|
|
|
data []byte
|
|
|
|
chk uint32
|
|
|
|
}{
|
2014-04-09 02:18:52 +02:00
|
|
|
{scriptHash[:], chkScriptHash},
|
2014-03-13 20:13:39 +01:00
|
|
|
{a.script, chkScript},
|
|
|
|
}
|
|
|
|
for i := range checks {
|
|
|
|
if err = verifyAndFix(checks[i].data, checks[i].chk); err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
address, err := btcutil.NewAddressScriptHashFromHash(scriptHash[:],
|
|
|
|
a.wallet.Net())
|
|
|
|
if err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
a.address = address
|
|
|
|
|
|
|
|
if !a.flags.hasScript {
|
|
|
|
return n, errors.New("read in an addresss with no script")
|
|
|
|
}
|
|
|
|
|
|
|
|
class, addresses, reqSigs, err :=
|
|
|
|
btcscript.ExtractPkScriptAddrs(a.script, a.wallet.Net())
|
|
|
|
if err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
a.class = class
|
|
|
|
a.addresses = addresses
|
|
|
|
a.reqSigs = reqSigs
|
|
|
|
|
2014-03-13 20:13:39 +01:00
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTo implements io.WriterTo by writing the scriptAddress to w.
|
|
|
|
func (a *scriptAddress) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
var written int64
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
hash := a.address.ScriptAddress()
|
2014-03-13 20:13:39 +01:00
|
|
|
datas := []interface{}{
|
2014-04-09 02:18:52 +02:00
|
|
|
&hash,
|
|
|
|
walletHash(hash),
|
2014-03-13 20:13:39 +01:00
|
|
|
make([]byte, 4), //version
|
|
|
|
&a.flags,
|
|
|
|
&a.script,
|
|
|
|
walletHash(a.script),
|
|
|
|
&a.firstSeen,
|
|
|
|
&a.lastSeen,
|
|
|
|
&a.firstBlock,
|
2014-03-27 16:32:56 +01:00
|
|
|
&a.partialSyncHeight,
|
2014-03-13 20:13:39 +01:00
|
|
|
}
|
|
|
|
for _, data := range datas {
|
|
|
|
if wt, ok := data.(io.WriterTo); ok {
|
|
|
|
written, err = wt.WriteTo(w)
|
|
|
|
} else {
|
|
|
|
written, err = binaryWrite(w, binary.LittleEndian, data)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
}
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// address returns a btcutil.AddressScriptHash for a btcAddress.
|
2014-04-09 02:18:52 +02:00
|
|
|
func (sa *scriptAddress) Address() btcutil.Address {
|
|
|
|
return sa.address
|
2014-03-13 20:13:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// AddrHash returns the script hash, implementing AddressInfo.
|
2014-04-09 02:18:52 +02:00
|
|
|
func (sa *scriptAddress) AddrHash() string {
|
|
|
|
return string(sa.address.ScriptAddress())
|
2014-03-13 20:13:39 +01:00
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// FirstBlock returns the first blockheight the address is known at.
|
|
|
|
func (sa *scriptAddress) FirstBlock() int32 {
|
|
|
|
return sa.firstBlock
|
2014-03-13 20:13:39 +01:00
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// Imported currently always returns true since script addresses are always
|
|
|
|
// imported addressed and not part of any chain.
|
|
|
|
func (sa *scriptAddress) Imported() bool {
|
|
|
|
return true
|
2014-03-13 20:13:39 +01:00
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// Change returns true if the address was created as a change address.
|
|
|
|
func (sa *scriptAddress) Change() bool {
|
|
|
|
return sa.flags.change
|
2014-03-13 20:13:39 +01:00
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// Compressed returns false since script addresses are never compressed.
|
|
|
|
// Implements WalletAddress.
|
|
|
|
func (sa *scriptAddress) Compressed() bool {
|
2014-03-13 20:13:39 +01:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// Script returns the script that is represented by the address. It should not
|
|
|
|
// be modified.
|
|
|
|
func (sa *scriptAddress) Script() []byte {
|
|
|
|
return sa.script
|
2014-03-27 16:32:56 +01:00
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// Addresses returns the list of addresses that must sign the script.
|
|
|
|
func (sa *scriptAddress) Addresses() []btcutil.Address {
|
|
|
|
return sa.addresses
|
2014-03-13 20:13:39 +01:00
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// ScriptClass returns the type of script the address is.
|
|
|
|
func (sa *scriptAddress) ScriptClass() btcscript.ScriptClass {
|
|
|
|
return sa.class
|
2014-03-13 20:13:39 +01:00
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// RequiredSigs returns the number of signatures required by the script.
|
|
|
|
func (sa *scriptAddress) RequiredSigs() int {
|
|
|
|
return sa.reqSigs
|
2014-03-13 20:13:39 +01:00
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// SyncStatus returns a SyncStatus type for how the address is currently
|
|
|
|
// synced. For an Unsynced type, the value is the recorded first seen
|
|
|
|
// block height of the address.
|
|
|
|
// Implements WalletAddress.
|
|
|
|
func (a *scriptAddress) SyncStatus() SyncStatus {
|
2014-03-27 16:32:56 +01:00
|
|
|
switch {
|
|
|
|
case a.flags.unsynced && !a.flags.partialSync:
|
|
|
|
return Unsynced(a.firstBlock)
|
|
|
|
case a.flags.unsynced && a.flags.partialSync:
|
|
|
|
return PartialSync(a.partialSyncHeight)
|
|
|
|
default:
|
|
|
|
return FullSync{}
|
|
|
|
}
|
2014-03-17 15:24:14 +01:00
|
|
|
}
|
|
|
|
|
2014-03-27 16:32:56 +01:00
|
|
|
// setSyncStatus sets the address flags and possibly the partial sync height
|
|
|
|
// depending on the type of s.
|
|
|
|
func (a *scriptAddress) setSyncStatus(s SyncStatus) {
|
|
|
|
switch e := s.(type) {
|
|
|
|
case Unsynced:
|
|
|
|
a.flags.unsynced = true
|
|
|
|
a.flags.partialSync = false
|
|
|
|
a.partialSyncHeight = 0
|
|
|
|
|
|
|
|
case PartialSync:
|
|
|
|
a.flags.unsynced = true
|
|
|
|
a.flags.partialSync = true
|
|
|
|
a.partialSyncHeight = int32(e)
|
|
|
|
|
|
|
|
case FullSync:
|
|
|
|
a.flags.unsynced = false
|
|
|
|
a.flags.partialSync = false
|
|
|
|
a.partialSyncHeight = 0
|
|
|
|
}
|
2014-03-17 15:24:14 +01:00
|
|
|
}
|
|
|
|
|
2014-04-09 02:18:52 +02:00
|
|
|
// watchingCopy creates a copy of an address without a private key.
|
|
|
|
// This is used to fill a watching a wallet with addresses from a
|
|
|
|
// normal wallet.
|
|
|
|
func (a *scriptAddress) watchingCopy(wallet *Wallet) walletAddress {
|
|
|
|
return &scriptAddress{
|
|
|
|
wallet: wallet,
|
|
|
|
address: a.address,
|
|
|
|
addresses: a.addresses,
|
|
|
|
class: a.class,
|
|
|
|
reqSigs: a.reqSigs,
|
|
|
|
flags: scriptFlags{
|
|
|
|
change: a.flags.change,
|
|
|
|
unsynced: a.flags.unsynced,
|
|
|
|
},
|
|
|
|
script: a.script,
|
|
|
|
firstSeen: a.firstSeen,
|
|
|
|
lastSeen: a.lastSeen,
|
|
|
|
firstBlock: a.firstBlock,
|
|
|
|
partialSyncHeight: a.partialSyncHeight,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
func walletHash(b []byte) uint32 {
|
|
|
|
sum := btcwire.DoubleSha256(b)
|
|
|
|
return binary.LittleEndian.Uint32(sum)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(jrick) add error correction.
|
|
|
|
func verifyAndFix(b []byte, chk uint32) error {
|
|
|
|
if walletHash(b) != chk {
|
2013-09-09 20:14:57 +02:00
|
|
|
return ErrChecksumMismatch
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type kdfParameters struct {
|
|
|
|
mem uint64
|
|
|
|
nIter uint32
|
|
|
|
salt [32]byte
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// computeKdfParameters returns best guess parameters to the
|
|
|
|
// memory-hard key derivation function to make the computation last
|
|
|
|
// targetSec seconds, while using no more than maxMem bytes of memory.
|
2013-11-15 17:59:37 +01:00
|
|
|
func computeKdfParameters(targetSec float64, maxMem uint64) (*kdfParameters, error) {
|
2013-09-03 06:10:32 +02:00
|
|
|
params := &kdfParameters{}
|
2013-11-15 17:59:37 +01:00
|
|
|
if _, err := rand.Read(params.salt[:]); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-09-03 06:10:32 +02:00
|
|
|
|
|
|
|
testKey := []byte("This is an example key to test KDF iteration speed")
|
|
|
|
|
|
|
|
memoryReqtBytes := uint64(1024)
|
|
|
|
approxSec := float64(0)
|
|
|
|
|
|
|
|
for approxSec <= targetSec/4 && memoryReqtBytes < maxMem {
|
|
|
|
memoryReqtBytes *= 2
|
|
|
|
before := time.Now()
|
|
|
|
_ = keyOneIter(testKey, params.salt[:], memoryReqtBytes)
|
|
|
|
approxSec = time.Since(before).Seconds()
|
|
|
|
}
|
|
|
|
|
|
|
|
allItersSec := float64(0)
|
|
|
|
nIter := uint32(1)
|
|
|
|
for allItersSec < 0.02 { // This is a magic number straight from armory's source.
|
|
|
|
nIter *= 2
|
|
|
|
before := time.Now()
|
|
|
|
for i := uint32(0); i < nIter; i++ {
|
|
|
|
_ = keyOneIter(testKey, params.salt[:], memoryReqtBytes)
|
|
|
|
}
|
|
|
|
allItersSec = time.Since(before).Seconds()
|
|
|
|
}
|
|
|
|
|
|
|
|
params.mem = memoryReqtBytes
|
|
|
|
params.nIter = nIter
|
|
|
|
|
2013-11-15 17:59:37 +01:00
|
|
|
return params, nil
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
func (params *kdfParameters) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
var written int64
|
|
|
|
|
|
|
|
memBytes := make([]byte, 8)
|
|
|
|
nIterBytes := make([]byte, 4)
|
|
|
|
binary.LittleEndian.PutUint64(memBytes, params.mem)
|
|
|
|
binary.LittleEndian.PutUint32(nIterBytes, params.nIter)
|
|
|
|
chkedBytes := append(memBytes, nIterBytes...)
|
|
|
|
chkedBytes = append(chkedBytes, params.salt[:]...)
|
|
|
|
|
|
|
|
datas := []interface{}{
|
|
|
|
¶ms.mem,
|
|
|
|
¶ms.nIter,
|
|
|
|
¶ms.salt,
|
|
|
|
walletHash(chkedBytes),
|
|
|
|
make([]byte, 256-(binary.Size(params)+4)), // padding
|
|
|
|
}
|
|
|
|
for _, data := range datas {
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, data); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
}
|
|
|
|
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (params *kdfParameters) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
// These must be read in but are not saved directly to params.
|
|
|
|
chkedBytes := make([]byte, 44)
|
|
|
|
var chk uint32
|
|
|
|
padding := make([]byte, 256-(binary.Size(params)+4))
|
|
|
|
|
|
|
|
datas := []interface{}{
|
|
|
|
chkedBytes,
|
|
|
|
&chk,
|
|
|
|
padding,
|
|
|
|
}
|
|
|
|
for _, data := range datas {
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, data); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify checksum
|
|
|
|
if err = verifyAndFix(chkedBytes, chk); err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// Read params
|
2013-08-21 16:37:30 +02:00
|
|
|
buf := bytes.NewBuffer(chkedBytes)
|
|
|
|
datas = []interface{}{
|
|
|
|
¶ms.mem,
|
|
|
|
¶ms.nIter,
|
|
|
|
¶ms.salt,
|
|
|
|
}
|
|
|
|
for _, data := range datas {
|
|
|
|
if err = binary.Read(buf, binary.LittleEndian, data); err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type addrEntry struct {
|
|
|
|
pubKeyHash160 [ripemd160.Size]byte
|
|
|
|
addr btcAddress
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *addrEntry) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
var written int64
|
|
|
|
|
|
|
|
// Write header
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, addrHeader); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
|
|
|
|
// Write hash
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, &e.pubKeyHash160); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
|
|
|
|
// Write btcAddress
|
|
|
|
written, err = e.addr.WriteTo(w)
|
2013-09-03 06:10:32 +02:00
|
|
|
n += written
|
|
|
|
return n, err
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (e *addrEntry) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &e.pubKeyHash160); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
read, err = e.addr.ReadFrom(r)
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
|
2014-03-13 20:13:39 +01:00
|
|
|
// scriptEntry is the entry type for a P2SH script.
|
|
|
|
type scriptEntry struct {
|
|
|
|
scriptHash160 [ripemd160.Size]byte
|
|
|
|
script scriptAddress
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTo implements io.WriterTo by writing the entry to w.
|
|
|
|
func (e *scriptEntry) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
var written int64
|
|
|
|
|
|
|
|
// Write header
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, scriptHeader); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
|
|
|
|
// Write hash
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, &e.scriptHash160); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
|
|
|
|
// Write btcAddress
|
|
|
|
written, err = e.script.WriteTo(w)
|
|
|
|
n += written
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadFrom implements io.ReaderFrom by reading the entry from e.
|
|
|
|
func (e *scriptEntry) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &e.scriptHash160); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
read, err = e.script.ReadFrom(r)
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
type addrCommentEntry struct {
|
|
|
|
pubKeyHash160 [ripemd160.Size]byte
|
|
|
|
comment []byte
|
|
|
|
}
|
|
|
|
|
2014-05-27 19:50:51 +02:00
|
|
|
func (e *addrCommentEntry) address(net *btcnet.Params) (*btcutil.AddressPubKeyHash, error) {
|
2014-05-17 05:19:48 +02:00
|
|
|
return btcutil.NewAddressPubKeyHash(e.pubKeyHash160[:], net)
|
2014-01-06 18:24:29 +01:00
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
func (e *addrCommentEntry) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
var written int64
|
|
|
|
|
|
|
|
// Comments shall not overflow their entry.
|
|
|
|
if len(e.comment) > maxCommentLen {
|
2013-09-09 20:14:57 +02:00
|
|
|
return n, ErrMalformedEntry
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write header
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, addrCommentHeader); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
|
|
|
|
// Write hash
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, &e.pubKeyHash160); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
|
|
|
|
// Write length
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, uint16(len(e.comment))); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
|
|
|
|
// Write comment
|
|
|
|
written, err = binaryWrite(w, binary.LittleEndian, e.comment)
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *addrCommentEntry) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &e.pubKeyHash160); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
var clen uint16
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &clen); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
e.comment = make([]byte, clen)
|
|
|
|
read, err = binaryRead(r, binary.LittleEndian, e.comment)
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
|
|
|
|
type txCommentEntry struct {
|
|
|
|
txHash [sha256.Size]byte
|
|
|
|
comment []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *txCommentEntry) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
var written int64
|
|
|
|
|
|
|
|
// Comments shall not overflow their entry.
|
|
|
|
if len(e.comment) > maxCommentLen {
|
2013-09-09 20:14:57 +02:00
|
|
|
return n, ErrMalformedEntry
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write header
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, txCommentHeader); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
|
|
|
|
// Write length
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, uint16(len(e.comment))); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write comment
|
|
|
|
written, err = binaryWrite(w, binary.LittleEndian, e.comment)
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *txCommentEntry) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &e.txHash); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
var clen uint16
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &clen); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
e.comment = make([]byte, clen)
|
|
|
|
read, err = binaryRead(r, binary.LittleEndian, e.comment)
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
|
2013-11-04 17:50:32 +01:00
|
|
|
type deletedEntry struct{}
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
func (e *deletedEntry) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
var ulen uint16
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &ulen); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
unused := make([]byte, ulen)
|
2014-04-16 23:22:39 +02:00
|
|
|
nRead, err := io.ReadFull(r, unused)
|
|
|
|
n += int64(nRead)
|
|
|
|
return n, err
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
|
|
|
|
// BlockStamp defines a block (by height and a unique hash) and is
|
|
|
|
// used to mark a point in the blockchain that a wallet element is
|
|
|
|
// synced to.
|
|
|
|
type BlockStamp struct {
|
|
|
|
Height int32
|
|
|
|
Hash btcwire.ShaHash
|
|
|
|
}
|