2013-08-21 16:37:30 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2013 Conformal Systems LLC <info@conformal.com>
|
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and distribute this software for any
|
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package wallet
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"code.google.com/p/go.crypto/ripemd160"
|
|
|
|
"crypto/aes"
|
|
|
|
"crypto/cipher"
|
2013-09-09 20:14:57 +02:00
|
|
|
"crypto/ecdsa"
|
2013-09-03 06:10:32 +02:00
|
|
|
"crypto/rand"
|
2013-08-21 16:37:30 +02:00
|
|
|
"crypto/sha256"
|
|
|
|
"crypto/sha512"
|
|
|
|
"encoding/binary"
|
2013-11-20 16:17:49 +01:00
|
|
|
"encoding/hex"
|
2013-08-21 16:37:30 +02:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"github.com/conformal/btcec"
|
|
|
|
"github.com/conformal/btcutil"
|
|
|
|
"github.com/conformal/btcwire"
|
2013-09-03 06:10:32 +02:00
|
|
|
"hash"
|
2013-08-21 16:37:30 +02:00
|
|
|
"io"
|
2013-09-03 06:10:32 +02:00
|
|
|
"math/big"
|
2013-08-21 19:25:22 +02:00
|
|
|
"sync"
|
2013-09-03 06:10:32 +02:00
|
|
|
"time"
|
2013-08-21 16:37:30 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// Length in bytes of KDF output.
|
|
|
|
kdfOutputBytes = 32
|
|
|
|
|
|
|
|
// Maximum length in bytes of a comment that can have a size represented
|
|
|
|
// as a uint16.
|
|
|
|
maxCommentLen = (1 << 16) - 1
|
2013-11-21 17:57:28 +01:00
|
|
|
|
|
|
|
// Number of addresses to extend keypool by.
|
|
|
|
nKeypoolIncrement = 100
|
2013-08-21 16:37:30 +02:00
|
|
|
)
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
const (
|
|
|
|
defaultKdfComputeTime = 0.25
|
|
|
|
defaultKdfMaxMem = 32 * 1024 * 1024
|
|
|
|
)
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
// Possible errors when dealing with wallets.
|
|
|
|
var (
|
2013-11-11 21:30:50 +01:00
|
|
|
ErrAddressNotFound = errors.New("address not found")
|
2013-09-09 20:14:57 +02:00
|
|
|
ErrChecksumMismatch = errors.New("checksum mismatch")
|
|
|
|
ErrMalformedEntry = errors.New("malformed entry")
|
2013-11-11 21:30:50 +01:00
|
|
|
ErrNetworkMismatch = errors.New("network mismatch")
|
2013-09-09 20:14:57 +02:00
|
|
|
ErrWalletDoesNotExist = errors.New("non-existant wallet")
|
2013-11-11 21:30:50 +01:00
|
|
|
ErrWalletLocked = errors.New("wallet is locked")
|
2013-08-21 16:37:30 +02:00
|
|
|
)
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
var (
|
|
|
|
// '\xbaWALLET\x00'
|
|
|
|
fileID = [8]byte{0xba, 0x57, 0x41, 0x4c, 0x4c, 0x45, 0x54, 0x00}
|
|
|
|
|
|
|
|
mainnetMagicBytes = [4]byte{0xf9, 0xbe, 0xb4, 0xd9}
|
|
|
|
testnetMagicBytes = [4]byte{0x0b, 0x11, 0x09, 0x07}
|
|
|
|
)
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
type entryHeader byte
|
|
|
|
|
|
|
|
const (
|
|
|
|
addrCommentHeader entryHeader = 1 << iota
|
|
|
|
txCommentHeader
|
|
|
|
deletedHeader
|
|
|
|
addrHeader entryHeader = 0
|
|
|
|
)
|
|
|
|
|
|
|
|
// We want to use binaryRead and binaryWrite instead of binary.Read
|
|
|
|
// and binary.Write because those from the binary package do not return
|
|
|
|
// the number of bytes actually written or read. We need to return
|
|
|
|
// this value to correctly support the io.ReaderFrom and io.WriterTo
|
|
|
|
// interfaces.
|
|
|
|
func binaryRead(r io.Reader, order binary.ByteOrder, data interface{}) (n int64, err error) {
|
|
|
|
var read int
|
|
|
|
buf := make([]byte, binary.Size(data))
|
|
|
|
if read, err = r.Read(buf); err != nil {
|
|
|
|
return int64(read), err
|
|
|
|
}
|
2013-09-09 20:14:57 +02:00
|
|
|
if read < binary.Size(data) {
|
|
|
|
return int64(read), io.EOF
|
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
return int64(read), binary.Read(bytes.NewBuffer(buf), order, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// See comment for binaryRead().
|
|
|
|
func binaryWrite(w io.Writer, order binary.ByteOrder, data interface{}) (n int64, err error) {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
if err = binary.Write(&buf, order, data); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
written, err := w.Write(buf.Bytes())
|
|
|
|
return int64(written), err
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// Calculate the hash of hasher over buf.
|
|
|
|
func calcHash(buf []byte, hasher hash.Hash) []byte {
|
|
|
|
hasher.Write(buf)
|
|
|
|
return hasher.Sum(nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// calculate hash160 which is ripemd160(sha256(data))
|
|
|
|
func calcHash160(buf []byte) []byte {
|
|
|
|
return calcHash(calcHash(buf, sha256.New()), ripemd160.New())
|
|
|
|
}
|
|
|
|
|
|
|
|
// calculate hash256 which is sha256(sha256(data))
|
|
|
|
func calcHash256(buf []byte) []byte {
|
|
|
|
return calcHash(calcHash(buf, sha256.New()), sha256.New())
|
|
|
|
}
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
// calculate sha512(data)
|
|
|
|
func calcSha512(buf []byte) []byte {
|
|
|
|
return calcHash(buf, sha512.New())
|
|
|
|
}
|
|
|
|
|
2013-11-04 17:50:32 +01:00
|
|
|
// pubkeyFromPrivkey creates an encoded pubkey based on a
|
|
|
|
// 32-byte privkey. The returned pubkey is 33 bytes if compressed,
|
|
|
|
// or 65 bytes if uncompressed.
|
|
|
|
func pubkeyFromPrivkey(privkey []byte, compress bool) (pubkey []byte) {
|
2013-09-03 06:10:32 +02:00
|
|
|
x, y := btcec.S256().ScalarBaseMult(privkey)
|
2013-09-09 20:14:57 +02:00
|
|
|
pub := (*btcec.PublicKey)(&ecdsa.PublicKey{
|
|
|
|
Curve: btcec.S256(),
|
|
|
|
X: x,
|
|
|
|
Y: y,
|
|
|
|
})
|
2013-11-04 17:50:32 +01:00
|
|
|
|
|
|
|
if compress {
|
|
|
|
return pub.SerializeCompressed()
|
|
|
|
}
|
2013-09-09 20:14:57 +02:00
|
|
|
return pub.SerializeUncompressed()
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
func keyOneIter(passphrase, salt []byte, memReqts uint64) []byte {
|
|
|
|
saltedpass := append(passphrase, salt...)
|
|
|
|
lutbl := make([]byte, memReqts)
|
|
|
|
|
|
|
|
// Seed for lookup table
|
2013-09-09 20:14:57 +02:00
|
|
|
seed := calcSha512(saltedpass)
|
|
|
|
copy(lutbl[:sha512.Size], seed)
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
for nByte := 0; nByte < (int(memReqts) - sha512.Size); nByte += sha512.Size {
|
2013-09-09 20:14:57 +02:00
|
|
|
hash := calcSha512(lutbl[nByte : nByte+sha512.Size])
|
2013-08-21 16:37:30 +02:00
|
|
|
copy(lutbl[nByte+sha512.Size:nByte+2*sha512.Size], hash[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
x := lutbl[cap(lutbl)-sha512.Size:]
|
|
|
|
|
|
|
|
seqCt := uint32(memReqts / sha512.Size)
|
|
|
|
nLookups := seqCt / 2
|
|
|
|
for i := uint32(0); i < nLookups; i++ {
|
|
|
|
// Armory ignores endianness here. We assume LE.
|
|
|
|
newIdx := binary.LittleEndian.Uint32(x[cap(x)-4:]) % seqCt
|
|
|
|
|
|
|
|
// Index of hash result at newIdx
|
|
|
|
vIdx := newIdx * sha512.Size
|
|
|
|
v := lutbl[vIdx : vIdx+sha512.Size]
|
|
|
|
|
|
|
|
// XOR hash x with hash v
|
|
|
|
for j := 0; j < sha512.Size; j++ {
|
|
|
|
x[j] ^= v[j]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save new hash to x
|
2013-09-09 20:14:57 +02:00
|
|
|
hash := calcSha512(x)
|
2013-08-21 16:37:30 +02:00
|
|
|
copy(x, hash[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
return x[:kdfOutputBytes]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Key implements the key derivation function used by Armory
|
|
|
|
// based on the ROMix algorithm described in Colin Percival's paper
|
|
|
|
// "Stronger Key Derivation via Sequential Memory-Hard Functions"
|
|
|
|
// (http://www.tarsnap.com/scrypt/scrypt.pdf).
|
2013-09-03 06:10:32 +02:00
|
|
|
func Key(passphrase []byte, params *kdfParameters) []byte {
|
2013-08-21 16:37:30 +02:00
|
|
|
masterKey := passphrase
|
2013-09-03 06:10:32 +02:00
|
|
|
for i := uint32(0); i < params.nIter; i++ {
|
|
|
|
masterKey = keyOneIter(masterKey, params.salt[:], params.mem)
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
return masterKey
|
|
|
|
}
|
|
|
|
|
2013-11-04 17:50:32 +01:00
|
|
|
func pad(size int, b []byte) []byte {
|
|
|
|
// Prevent a possible panic if the input exceeds the expected size.
|
|
|
|
if len(b) > size {
|
|
|
|
size = len(b)
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2013-11-04 17:50:32 +01:00
|
|
|
|
|
|
|
p := make([]byte, size)
|
|
|
|
copy(p[size-len(b):], b)
|
|
|
|
return p
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
// ChainedPrivKey deterministically generates a new private key using a
|
2013-09-03 06:10:32 +02:00
|
|
|
// previous address and chaincode. privkey and chaincode must be 32
|
2013-11-04 17:50:32 +01:00
|
|
|
// bytes long, and pubkey may either be 33 bytes, 65 bytes or nil (in
|
|
|
|
// which case it is generated by the privkey).
|
2013-09-03 06:10:32 +02:00
|
|
|
func ChainedPrivKey(privkey, pubkey, chaincode []byte) ([]byte, error) {
|
|
|
|
if len(privkey) != 32 {
|
2013-09-09 20:14:57 +02:00
|
|
|
return nil, fmt.Errorf("invalid privkey length %d (must be 32)",
|
2013-09-03 06:10:32 +02:00
|
|
|
len(privkey))
|
|
|
|
}
|
|
|
|
if len(chaincode) != 32 {
|
2013-09-09 20:14:57 +02:00
|
|
|
return nil, fmt.Errorf("invalid chaincode length %d (must be 32)",
|
2013-09-03 06:10:32 +02:00
|
|
|
len(chaincode))
|
|
|
|
}
|
|
|
|
if pubkey == nil {
|
2013-11-04 17:50:32 +01:00
|
|
|
pubkey = pubkeyFromPrivkey(privkey, true)
|
|
|
|
} else if !(len(pubkey) == 65 || len(pubkey) == 33) {
|
2013-09-09 20:14:57 +02:00
|
|
|
return nil, fmt.Errorf("invalid pubkey length %d", len(pubkey))
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// This is a perfect example of YOLO crypto. Armory claims this XORing
|
|
|
|
// with the SHA256 hash of the pubkey is done to add extra entropy (why
|
|
|
|
// you'd want to add entropy to a deterministic function, I don't know),
|
|
|
|
// even though the pubkey is generated directly from the privkey. In
|
|
|
|
// terms of security or privacy, this is a complete waste of CPU cycles,
|
|
|
|
// but we do the same because we want to keep compatibility with
|
|
|
|
// Armory's chained address generation.
|
|
|
|
xorbytes := make([]byte, 32)
|
|
|
|
chainMod := calcHash256(pubkey)
|
2013-09-09 20:14:57 +02:00
|
|
|
for i := range xorbytes {
|
2013-09-03 06:10:32 +02:00
|
|
|
xorbytes[i] = chainMod[i] ^ chaincode[i]
|
|
|
|
}
|
|
|
|
chainXor := new(big.Int).SetBytes(xorbytes)
|
|
|
|
privint := new(big.Int).SetBytes(privkey)
|
|
|
|
|
|
|
|
t := new(big.Int).Mul(chainXor, privint)
|
|
|
|
b := t.Mod(t, btcec.S256().N).Bytes()
|
2013-11-04 17:50:32 +01:00
|
|
|
return pad(32, b), nil
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
type version struct {
|
|
|
|
major byte
|
|
|
|
minor byte
|
|
|
|
bugfix byte
|
|
|
|
autoincrement byte
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enforce that version satisifies the io.ReaderFrom and
|
|
|
|
// io.WriterTo interfaces.
|
|
|
|
var _ io.ReaderFrom = &version{}
|
|
|
|
var _ io.WriterTo = &version{}
|
|
|
|
|
|
|
|
// ReaderFromVersion is an io.ReaderFrom and io.WriterTo that
|
|
|
|
// can specify any particular wallet file format for reading
|
|
|
|
// depending on the wallet file version.
|
|
|
|
type ReaderFromVersion interface {
|
|
|
|
ReadFromVersion(version, io.Reader) (int64, error)
|
|
|
|
io.WriterTo
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v version) String() string {
|
|
|
|
str := fmt.Sprintf("%d.%d", v.major, v.minor)
|
|
|
|
if v.bugfix != 0x00 || v.autoincrement != 0x00 {
|
|
|
|
str += fmt.Sprintf(".%d", v.bugfix)
|
|
|
|
}
|
|
|
|
if v.autoincrement != 0x00 {
|
|
|
|
str += fmt.Sprintf(".%d", v.autoincrement)
|
|
|
|
}
|
|
|
|
return str
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v version) Uint32() uint32 {
|
|
|
|
return uint32(v.major)<<6 | uint32(v.minor)<<4 | uint32(v.bugfix)<<2 | uint32(v.autoincrement)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *version) ReadFrom(r io.Reader) (int64, error) {
|
|
|
|
// Read 4 bytes for the version.
|
|
|
|
versBytes := make([]byte, 4)
|
|
|
|
n, err := r.Read(versBytes)
|
|
|
|
if err != nil {
|
|
|
|
return int64(n), err
|
|
|
|
}
|
|
|
|
v.major = versBytes[0]
|
|
|
|
v.minor = versBytes[1]
|
|
|
|
v.bugfix = versBytes[2]
|
|
|
|
v.autoincrement = versBytes[3]
|
|
|
|
return int64(n), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *version) WriteTo(w io.Writer) (int64, error) {
|
|
|
|
// Write 4 bytes for the version.
|
|
|
|
versBytes := []byte{
|
|
|
|
v.major,
|
|
|
|
v.minor,
|
|
|
|
v.bugfix,
|
|
|
|
v.autoincrement,
|
|
|
|
}
|
|
|
|
n, err := w.Write(versBytes)
|
|
|
|
return int64(n), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// LT returns whether v is an earlier version than v2.
|
|
|
|
func (v version) LT(v2 version) bool {
|
|
|
|
switch {
|
|
|
|
case v.major < v2.major:
|
|
|
|
return true
|
|
|
|
|
|
|
|
case v.minor < v2.minor:
|
|
|
|
return true
|
|
|
|
|
|
|
|
case v.bugfix < v2.bugfix:
|
|
|
|
return true
|
|
|
|
|
|
|
|
case v.autoincrement < v2.autoincrement:
|
|
|
|
return true
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// EQ returns whether v2 is an equal version to v.
|
|
|
|
func (v version) EQ(v2 version) bool {
|
|
|
|
switch {
|
|
|
|
case v.major != v2.major:
|
|
|
|
return false
|
|
|
|
|
|
|
|
case v.minor != v2.minor:
|
|
|
|
return false
|
|
|
|
|
|
|
|
case v.bugfix != v2.bugfix:
|
|
|
|
return false
|
|
|
|
|
|
|
|
case v.autoincrement != v2.autoincrement:
|
|
|
|
return false
|
|
|
|
|
|
|
|
default:
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GT returns whether v is a later version than v2.
|
|
|
|
func (v version) GT(v2 version) bool {
|
|
|
|
switch {
|
|
|
|
case v.major > v2.major:
|
|
|
|
return true
|
|
|
|
|
|
|
|
case v.minor > v2.minor:
|
|
|
|
return true
|
|
|
|
|
|
|
|
case v.bugfix > v2.bugfix:
|
|
|
|
return true
|
|
|
|
|
|
|
|
case v.autoincrement > v2.autoincrement:
|
|
|
|
return true
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Various versions.
|
|
|
|
var (
|
|
|
|
// VersArmory is the latest version used by Armory.
|
|
|
|
VersArmory = version{1, 35, 0, 0}
|
|
|
|
|
|
|
|
// Vers20LastBlocks is the version where wallet files now hold
|
|
|
|
// the 20 most recently seen block hashes.
|
|
|
|
Vers20LastBlocks = version{1, 36, 0, 0}
|
|
|
|
|
|
|
|
// VersCurrent is the current wallet file version.
|
|
|
|
VersCurrent = Vers20LastBlocks
|
|
|
|
)
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
type varEntries []io.WriterTo
|
|
|
|
|
|
|
|
func (v *varEntries) WriteTo(w io.Writer) (n int64, err error) {
|
2013-11-20 02:18:11 +01:00
|
|
|
ss := []io.WriterTo(*v)
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
var written int64
|
|
|
|
for _, s := range ss {
|
|
|
|
var err error
|
|
|
|
if written, err = s.WriteTo(w); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
}
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *varEntries) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
// Remove any previous entries.
|
|
|
|
*v = nil
|
2013-11-20 02:18:11 +01:00
|
|
|
wts := []io.WriterTo(*v)
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
// Keep reading entries until an EOF is reached.
|
|
|
|
for {
|
|
|
|
var header entryHeader
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &header); err != nil {
|
|
|
|
// EOF here is not an error.
|
|
|
|
if err == io.EOF {
|
|
|
|
return n + read, nil
|
|
|
|
}
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
var wt io.WriterTo
|
2013-08-21 16:37:30 +02:00
|
|
|
switch header {
|
|
|
|
case addrHeader:
|
|
|
|
var entry addrEntry
|
|
|
|
if read, err = entry.ReadFrom(r); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
wt = &entry
|
|
|
|
case addrCommentHeader:
|
|
|
|
var entry addrCommentEntry
|
|
|
|
if read, err = entry.ReadFrom(r); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
wt = &entry
|
|
|
|
case txCommentHeader:
|
|
|
|
var entry txCommentEntry
|
|
|
|
if read, err = entry.ReadFrom(r); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
wt = &entry
|
|
|
|
case deletedHeader:
|
|
|
|
var entry deletedEntry
|
|
|
|
if read, err = entry.ReadFrom(r); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
default:
|
2013-09-09 20:14:57 +02:00
|
|
|
return n, fmt.Errorf("unknown entry header: %d", uint8(header))
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
if wt != nil {
|
|
|
|
wts = append(wts, wt)
|
|
|
|
*v = wts
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-04 17:50:32 +01:00
|
|
|
type addressHashKey string
|
|
|
|
type transactionHashKey string
|
|
|
|
type comment []byte
|
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
// Wallet represents an btcwallet wallet in memory. It implements
|
|
|
|
// the io.ReaderFrom and io.WriterTo interfaces to read from and
|
|
|
|
// write to any type of byte streams, including files.
|
2013-08-21 16:37:30 +02:00
|
|
|
type Wallet struct {
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
net btcwire.BitcoinNet
|
|
|
|
flags walletFlags
|
|
|
|
uniqID [6]byte
|
|
|
|
createDate int64
|
|
|
|
name [32]byte
|
|
|
|
desc [256]byte
|
|
|
|
highestUsed int64
|
|
|
|
kdfParams kdfParameters
|
|
|
|
keyGenerator btcAddress
|
|
|
|
|
|
|
|
// These are non-standard and fit in the extra 1024 bytes between the
|
|
|
|
// root address and the appended entries.
|
2013-12-06 21:37:07 +01:00
|
|
|
recent recentBlocks
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
|
2013-11-04 17:50:32 +01:00
|
|
|
addrMap map[addressHashKey]*btcAddress
|
|
|
|
addrCommentMap map[addressHashKey]comment
|
|
|
|
txCommentMap map[transactionHashKey]comment
|
2013-08-21 16:37:30 +02:00
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// These are not serialized.
|
|
|
|
secret struct {
|
2013-08-21 19:25:22 +02:00
|
|
|
sync.Mutex
|
2013-11-11 21:30:50 +01:00
|
|
|
key []byte
|
2013-08-21 19:25:22 +02:00
|
|
|
}
|
2013-11-20 02:18:11 +01:00
|
|
|
chainIdxMap map[int64]addressHashKey
|
|
|
|
importedAddrs []*btcAddress
|
|
|
|
lastChainIdx int64
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
// NewWallet creates and initializes a new Wallet. name's and
|
2013-09-03 06:10:32 +02:00
|
|
|
// desc's binary representation must not exceed 32 and 256 bytes,
|
|
|
|
// respectively. All address private keys are encrypted with passphrase.
|
|
|
|
// The wallet is returned unlocked.
|
2013-11-20 02:18:11 +01:00
|
|
|
func NewWallet(name, desc string, passphrase []byte, net btcwire.BitcoinNet,
|
|
|
|
createdAt *BlockStamp) (*Wallet, error) {
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Check sizes of inputs.
|
|
|
|
if len([]byte(name)) > 32 {
|
2013-09-03 06:10:32 +02:00
|
|
|
return nil, errors.New("name exceeds 32 byte maximum size")
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
if len([]byte(desc)) > 256 {
|
2013-09-03 06:10:32 +02:00
|
|
|
return nil, errors.New("desc exceeds 256 byte maximum size")
|
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Randomly-generate rootkey and chaincode.
|
2013-09-03 06:10:32 +02:00
|
|
|
rootkey, chaincode := make([]byte, 32), make([]byte, 32)
|
2013-11-13 23:25:50 +01:00
|
|
|
if _, err := rand.Read(rootkey); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if _, err := rand.Read(chaincode); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
|
|
|
|
// Create new root address from key and chaincode.
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
root, err := newRootBtcAddress(rootkey, nil, chaincode, createdAt)
|
2013-09-03 06:10:32 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
|
2013-12-03 18:33:37 +01:00
|
|
|
// Verify root address keypairs.
|
|
|
|
if err := root.verifyKeypairs(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Compute AES key and encrypt root address.
|
2013-11-15 17:59:37 +01:00
|
|
|
kdfp, err := computeKdfParameters(defaultKdfComputeTime, defaultKdfMaxMem)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-09-03 06:10:32 +02:00
|
|
|
aeskey := Key([]byte(passphrase), kdfp)
|
|
|
|
if err := root.encrypt(aeskey); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Create and fill wallet.
|
2013-09-03 06:10:32 +02:00
|
|
|
w := &Wallet{
|
2013-11-11 21:30:50 +01:00
|
|
|
// TODO(jrick): not sure we will need uniqID, but would be good for
|
|
|
|
// compat with armory.
|
|
|
|
net: net,
|
2013-09-03 06:10:32 +02:00
|
|
|
flags: walletFlags{
|
|
|
|
useEncryption: true,
|
|
|
|
watchingOnly: false,
|
|
|
|
},
|
2013-12-06 21:37:07 +01:00
|
|
|
createDate: time.Now().Unix(),
|
|
|
|
highestUsed: rootKeyChainIdx,
|
|
|
|
kdfParams: *kdfp,
|
|
|
|
keyGenerator: *root,
|
|
|
|
recent: recentBlocks{
|
|
|
|
lastHeight: createdAt.Height,
|
|
|
|
hashes: []*btcwire.ShaHash{
|
|
|
|
&createdAt.Hash,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
addrMap: make(map[addressHashKey]*btcAddress),
|
|
|
|
addrCommentMap: make(map[addressHashKey]comment),
|
|
|
|
txCommentMap: make(map[transactionHashKey]comment),
|
|
|
|
chainIdxMap: make(map[int64]addressHashKey),
|
|
|
|
lastChainIdx: rootKeyChainIdx,
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
copy(w.name[:], []byte(name))
|
|
|
|
copy(w.desc[:], []byte(desc))
|
2013-09-03 06:10:32 +02:00
|
|
|
|
|
|
|
// Add root address to maps.
|
2013-11-04 17:50:32 +01:00
|
|
|
w.addrMap[addressHashKey(w.keyGenerator.pubKeyHash[:])] = &w.keyGenerator
|
2013-11-20 02:18:11 +01:00
|
|
|
w.chainIdxMap[rootKeyChainIdx] = addressHashKey(w.keyGenerator.pubKeyHash[:])
|
2013-09-03 06:10:32 +02:00
|
|
|
|
2013-11-21 17:57:28 +01:00
|
|
|
// Fill keypool.
|
|
|
|
if err := w.extendKeypool(nKeypoolIncrement, aeskey, createdAt); err != nil {
|
|
|
|
return nil, err
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
return w, nil
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
// Name returns the name of a wallet. This name is used as the
|
|
|
|
// account name for btcwallet JSON methods.
|
2013-09-03 23:16:07 +02:00
|
|
|
func (w *Wallet) Name() string {
|
2013-11-22 20:46:23 +01:00
|
|
|
last := len(w.name[:])
|
|
|
|
for i, b := range w.name[:] {
|
|
|
|
if b == 0x00 {
|
|
|
|
last = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return string(w.name[:last])
|
2013-09-03 23:16:07 +02:00
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
// ReadFrom reads data from a io.Reader and saves it to a Wallet,
|
|
|
|
// returning the number of bytes read and any errors encountered.
|
2013-09-03 06:10:32 +02:00
|
|
|
func (w *Wallet) ReadFrom(r io.Reader) (n int64, err error) {
|
2013-08-21 16:37:30 +02:00
|
|
|
var read int64
|
|
|
|
|
2013-11-04 17:50:32 +01:00
|
|
|
w.addrMap = make(map[addressHashKey]*btcAddress)
|
|
|
|
w.addrCommentMap = make(map[addressHashKey]comment)
|
|
|
|
w.chainIdxMap = make(map[int64]addressHashKey)
|
|
|
|
w.txCommentMap = make(map[transactionHashKey]comment)
|
2013-09-03 06:10:32 +02:00
|
|
|
|
|
|
|
var id [8]byte
|
2013-12-06 21:37:07 +01:00
|
|
|
var vers version
|
2013-09-03 06:10:32 +02:00
|
|
|
var appendedEntries varEntries
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
// Iterate through each entry needing to be read. If data
|
|
|
|
// implements io.ReaderFrom, use its ReadFrom func. Otherwise,
|
|
|
|
// data is a pointer to a fixed sized value.
|
|
|
|
datas := []interface{}{
|
2013-09-03 06:10:32 +02:00
|
|
|
&id,
|
2013-12-06 21:37:07 +01:00
|
|
|
&vers,
|
2013-09-03 06:10:32 +02:00
|
|
|
&w.net,
|
|
|
|
&w.flags,
|
|
|
|
&w.uniqID,
|
|
|
|
&w.createDate,
|
|
|
|
&w.name,
|
|
|
|
&w.desc,
|
|
|
|
&w.highestUsed,
|
|
|
|
&w.kdfParams,
|
|
|
|
make([]byte, 256),
|
|
|
|
&w.keyGenerator,
|
2013-12-06 21:37:07 +01:00
|
|
|
newUnusedSpace(1024, &w.recent),
|
2013-09-03 06:10:32 +02:00
|
|
|
&appendedEntries,
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
for _, data := range datas {
|
|
|
|
var err error
|
2013-12-06 21:37:07 +01:00
|
|
|
switch d := data.(type) {
|
|
|
|
case ReaderFromVersion:
|
|
|
|
read, err = d.ReadFromVersion(vers, r)
|
|
|
|
|
|
|
|
case io.ReaderFrom:
|
|
|
|
read, err = d.ReadFrom(r)
|
|
|
|
|
|
|
|
default:
|
|
|
|
read, err = binaryRead(r, binary.LittleEndian, d)
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
n += read
|
|
|
|
if err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
if id != fileID {
|
2013-09-09 20:14:57 +02:00
|
|
|
return n, errors.New("unknown file ID")
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-11-20 02:18:11 +01:00
|
|
|
// Add root address to address map.
|
|
|
|
rootAddrKey := addressHashKey(w.keyGenerator.pubKeyHash[:])
|
|
|
|
w.addrMap[rootAddrKey] = &w.keyGenerator
|
|
|
|
w.chainIdxMap[rootKeyChainIdx] = rootAddrKey
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
// Fill unserializied fields.
|
2013-09-03 06:10:32 +02:00
|
|
|
wts := ([]io.WriterTo)(appendedEntries)
|
2013-08-21 16:37:30 +02:00
|
|
|
for _, wt := range wts {
|
2013-11-20 02:18:11 +01:00
|
|
|
switch e := wt.(type) {
|
2013-08-21 16:37:30 +02:00
|
|
|
case *addrEntry:
|
2013-11-20 02:18:11 +01:00
|
|
|
addrKey := addressHashKey(e.pubKeyHash160[:])
|
|
|
|
w.addrMap[addrKey] = &e.addr
|
|
|
|
if e.addr.chainIndex == importedKeyChainIdx {
|
|
|
|
w.importedAddrs = append(w.importedAddrs, &e.addr)
|
|
|
|
} else {
|
|
|
|
w.chainIdxMap[e.addr.chainIndex] = addrKey
|
|
|
|
if w.lastChainIdx < e.addr.chainIndex {
|
|
|
|
w.lastChainIdx = e.addr.chainIndex
|
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-11-20 02:18:11 +01:00
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
case *addrCommentEntry:
|
2013-11-20 02:18:11 +01:00
|
|
|
addrKey := addressHashKey(e.pubKeyHash160[:])
|
|
|
|
w.addrCommentMap[addrKey] = comment(e.comment)
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
case *txCommentEntry:
|
2013-11-20 02:18:11 +01:00
|
|
|
txKey := transactionHashKey(e.txHash[:])
|
|
|
|
w.txCommentMap[txKey] = comment(e.comment)
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
default:
|
2013-09-09 20:14:57 +02:00
|
|
|
return n, errors.New("unknown appended entry")
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// WriteTo serializes a Wallet and writes it to a io.Writer,
|
|
|
|
// returning the number of bytes written and any errors encountered.
|
|
|
|
func (w *Wallet) WriteTo(wtr io.Writer) (n int64, err error) {
|
2013-11-20 02:18:11 +01:00
|
|
|
var wts []io.WriterTo
|
|
|
|
var chainedAddrs = make([]io.WriterTo, len(w.chainIdxMap)-1)
|
|
|
|
var importedAddrs []io.WriterTo
|
2013-09-03 06:10:32 +02:00
|
|
|
for hash, addr := range w.addrMap {
|
2013-11-20 02:18:11 +01:00
|
|
|
e := &addrEntry{
|
|
|
|
addr: *addr,
|
|
|
|
}
|
|
|
|
copy(e.pubKeyHash160[:], []byte(hash))
|
|
|
|
if addr.chainIndex >= 0 {
|
|
|
|
// Chained addresses are sorted. This is
|
|
|
|
// kind of nice but probably isn't necessary.
|
|
|
|
chainedAddrs[addr.chainIndex] = e
|
|
|
|
} else if addr.chainIndex == importedKeyChainIdx {
|
|
|
|
// No order for imported addresses.
|
|
|
|
importedAddrs = append(importedAddrs, e)
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
}
|
2013-11-20 02:18:11 +01:00
|
|
|
wts = append(chainedAddrs, importedAddrs...)
|
2013-09-03 06:10:32 +02:00
|
|
|
for hash, comment := range w.addrCommentMap {
|
|
|
|
e := &addrCommentEntry{
|
2013-11-04 17:50:32 +01:00
|
|
|
comment: []byte(comment),
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2013-11-04 17:50:32 +01:00
|
|
|
copy(e.pubKeyHash160[:], []byte(hash))
|
2013-09-03 06:10:32 +02:00
|
|
|
wts = append(wts, e)
|
|
|
|
}
|
|
|
|
for hash, comment := range w.txCommentMap {
|
|
|
|
e := &txCommentEntry{
|
2013-11-04 17:50:32 +01:00
|
|
|
comment: []byte(comment),
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2013-11-04 17:50:32 +01:00
|
|
|
copy(e.txHash[:], []byte(hash))
|
2013-09-03 06:10:32 +02:00
|
|
|
wts = append(wts, e)
|
|
|
|
}
|
|
|
|
appendedEntries := varEntries(wts)
|
|
|
|
|
|
|
|
// Iterate through each entry needing to be written. If data
|
|
|
|
// implements io.WriterTo, use its WriteTo func. Otherwise,
|
|
|
|
// data is a pointer to a fixed size value.
|
|
|
|
datas := []interface{}{
|
|
|
|
&fileID,
|
2013-12-06 21:37:07 +01:00
|
|
|
&VersCurrent,
|
2013-09-03 06:10:32 +02:00
|
|
|
&w.net,
|
|
|
|
&w.flags,
|
|
|
|
&w.uniqID,
|
|
|
|
&w.createDate,
|
|
|
|
&w.name,
|
|
|
|
&w.desc,
|
|
|
|
&w.highestUsed,
|
|
|
|
&w.kdfParams,
|
|
|
|
make([]byte, 256),
|
|
|
|
&w.keyGenerator,
|
2013-12-06 21:37:07 +01:00
|
|
|
newUnusedSpace(1024, &w.recent),
|
2013-09-03 06:10:32 +02:00
|
|
|
&appendedEntries,
|
|
|
|
}
|
|
|
|
var written int64
|
|
|
|
for _, data := range datas {
|
|
|
|
if s, ok := data.(io.WriterTo); ok {
|
|
|
|
written, err = s.WriteTo(wtr)
|
|
|
|
} else {
|
|
|
|
written, err = binaryWrite(wtr, binary.LittleEndian, data)
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
if err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
// Unlock derives an AES key from passphrase and wallet's KDF
|
2013-11-11 21:30:50 +01:00
|
|
|
// parameters and unlocks the root key of the wallet. If
|
|
|
|
// the unlock was successful, the wallet's secret key is saved,
|
|
|
|
// allowing the decryption of any encrypted private key.
|
2013-09-03 06:10:32 +02:00
|
|
|
func (w *Wallet) Unlock(passphrase []byte) error {
|
2013-11-11 21:30:50 +01:00
|
|
|
// Derive key from KDF parameters and passphrase.
|
2013-09-03 06:10:32 +02:00
|
|
|
key := Key(passphrase, &w.kdfParams)
|
2013-08-21 16:37:30 +02:00
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Unlock root address with derived key.
|
|
|
|
if _, err := w.keyGenerator.unlock(key); err != nil {
|
2013-08-21 19:25:22 +02:00
|
|
|
return err
|
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
|
|
|
|
// If unlock was successful, save the secret key.
|
|
|
|
w.secret.Lock()
|
|
|
|
w.secret.key = key
|
|
|
|
w.secret.Unlock()
|
2013-09-09 20:14:57 +02:00
|
|
|
return nil
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Lock performs a best try effort to remove and zero all secret keys
|
|
|
|
// associated with the wallet.
|
2013-09-03 06:10:32 +02:00
|
|
|
func (w *Wallet) Lock() (err error) {
|
2013-11-11 21:30:50 +01:00
|
|
|
// Remove clear text passphrase from wallet.
|
|
|
|
w.secret.Lock()
|
|
|
|
if w.secret.key == nil {
|
|
|
|
err = ErrWalletLocked
|
|
|
|
} else {
|
|
|
|
zero(w.secret.key)
|
|
|
|
w.secret.key = nil
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
w.secret.Unlock()
|
2013-09-03 06:10:32 +02:00
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Remove clear text private keys from all address entries.
|
|
|
|
for _, addr := range w.addrMap {
|
|
|
|
addr.privKeyCT.Lock()
|
|
|
|
zero(addr.privKeyCT.key)
|
|
|
|
addr.privKeyCT.key = nil
|
|
|
|
addr.privKeyCT.Unlock()
|
2013-08-21 19:25:22 +02:00
|
|
|
}
|
2013-09-03 06:10:32 +02:00
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func zero(b []byte) {
|
|
|
|
for i := range b {
|
|
|
|
b[i] = 0
|
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// IsLocked returns whether a wallet is unlocked (in which case the
|
|
|
|
// key is saved in memory), or locked.
|
|
|
|
func (w *Wallet) IsLocked() (locked bool) {
|
2013-11-11 21:30:50 +01:00
|
|
|
w.secret.Lock()
|
|
|
|
locked = w.secret.key == nil
|
|
|
|
w.secret.Unlock()
|
2013-08-21 20:46:20 +02:00
|
|
|
return locked
|
|
|
|
}
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
// Version returns a wallet's version as a string and int.
|
2013-09-03 06:10:32 +02:00
|
|
|
func (w *Wallet) Version() (string, int) {
|
2013-08-21 16:37:30 +02:00
|
|
|
return "", 0
|
|
|
|
}
|
|
|
|
|
2013-11-21 17:57:28 +01:00
|
|
|
// NextChainedAddress attempts to get the next chained address,
|
|
|
|
// refilling the keypool if necessary.
|
|
|
|
func (w *Wallet) NextChainedAddress(bs *BlockStamp) (string, error) {
|
2013-11-11 21:30:50 +01:00
|
|
|
// Attempt to get address hash of next chained address.
|
2013-11-20 02:18:11 +01:00
|
|
|
next160, ok := w.chainIdxMap[w.highestUsed+1]
|
|
|
|
if !ok {
|
2013-11-21 17:57:28 +01:00
|
|
|
// Extending the keypool requires an unlocked wallet.
|
|
|
|
aeskey := make([]byte, 32)
|
|
|
|
w.secret.Lock()
|
|
|
|
if len(w.secret.key) != 32 {
|
|
|
|
w.secret.Unlock()
|
|
|
|
return "", ErrWalletLocked
|
|
|
|
}
|
|
|
|
copy(aeskey, w.secret.key)
|
|
|
|
w.secret.Unlock()
|
|
|
|
|
|
|
|
err := w.extendKeypool(nKeypoolIncrement, aeskey, bs)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
next160, ok = w.chainIdxMap[w.highestUsed+1]
|
|
|
|
if !ok {
|
|
|
|
return "", errors.New("chain index map inproperly updated")
|
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
|
|
|
|
// Look up address.
|
2013-11-21 17:57:28 +01:00
|
|
|
addr, ok := w.addrMap[next160]
|
|
|
|
if !ok {
|
2013-09-09 20:14:57 +02:00
|
|
|
return "", errors.New("cannot find generated address")
|
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
|
2013-11-21 17:57:28 +01:00
|
|
|
w.highestUsed++
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Create and return payment address for address hash.
|
2013-09-09 20:14:57 +02:00
|
|
|
return addr.paymentAddress(w.net)
|
|
|
|
}
|
|
|
|
|
2013-11-21 17:57:28 +01:00
|
|
|
// extendKeypool grows the keypool by n addresses.
|
|
|
|
func (w *Wallet) extendKeypool(n uint, aeskey []byte, bs *BlockStamp) error {
|
|
|
|
// Get last chained address. New chained addresses will be
|
|
|
|
// chained off of this address's chaincode and private key.
|
|
|
|
addrKey := w.chainIdxMap[w.lastChainIdx]
|
|
|
|
addr, ok := w.addrMap[addrKey]
|
|
|
|
if !ok {
|
|
|
|
return errors.New("expected last chained address not found")
|
|
|
|
}
|
|
|
|
privkey, err := addr.unlock(aeskey)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
cc := addr.chaincode[:]
|
|
|
|
|
|
|
|
// Create n encrypted addresses and add each to the wallet's
|
|
|
|
// bookkeeping maps.
|
|
|
|
for i := uint(0); i < n; i++ {
|
|
|
|
privkey, err = ChainedPrivKey(privkey, addr.pubKey, cc)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
newaddr, err := newBtcAddress(privkey, nil, bs, true)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-12-03 18:33:37 +01:00
|
|
|
if err := newaddr.verifyKeypairs(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-11-21 17:57:28 +01:00
|
|
|
if err = newaddr.encrypt(aeskey); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
addrKey := addressHashKey(newaddr.pubKeyHash[:])
|
|
|
|
w.addrMap[addrKey] = newaddr
|
|
|
|
newaddr.chainIndex = addr.chainIndex + 1
|
|
|
|
w.chainIdxMap[newaddr.chainIndex] = addrKey
|
|
|
|
w.lastChainIdx++
|
|
|
|
// armory does this.. but all the chaincodes are equal so why
|
|
|
|
// not use the root's?
|
|
|
|
copy(newaddr.chaincode[:], cc)
|
|
|
|
addr = newaddr
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// addrHashForAddress decodes and returns the address hash for a
|
|
|
|
// payment address string, performing some basic sanity checking that it
|
|
|
|
// matches the Bitcoin network used by the wallet.
|
|
|
|
func (w *Wallet) addrHashForAddress(addr string) ([]byte, error) {
|
2013-09-09 20:14:57 +02:00
|
|
|
addr160, net, err := btcutil.DecodeAddress(addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
|
|
|
|
// Return error if address is for the wrong Bitcoin network.
|
2013-09-09 20:14:57 +02:00
|
|
|
switch {
|
|
|
|
case net == btcutil.MainNetAddr && w.net != btcwire.MainNet:
|
|
|
|
fallthrough
|
|
|
|
case net == btcutil.TestNetAddr && w.net != btcwire.TestNet:
|
2013-11-11 21:30:50 +01:00
|
|
|
return nil, ErrNetworkMismatch
|
2013-09-09 20:14:57 +02:00
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
return addr160, nil
|
|
|
|
}
|
|
|
|
|
2013-12-02 20:56:06 +01:00
|
|
|
// AddressKey returns the private key for a payment address stored
|
2013-11-11 21:30:50 +01:00
|
|
|
// in a wallet. This can fail if the payment address is for a different
|
|
|
|
// Bitcoin network than what this wallet uses, the address is not
|
|
|
|
// contained in the wallet, the address does not include a public and
|
|
|
|
// private key, or if the wallet is locked.
|
2013-12-02 20:56:06 +01:00
|
|
|
func (w *Wallet) AddressKey(addr string) (key *ecdsa.PrivateKey, err error) {
|
2013-11-11 21:30:50 +01:00
|
|
|
// Get address hash for payment address string.
|
|
|
|
addr160, err := w.addrHashForAddress(addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup address from map.
|
2013-11-04 17:50:32 +01:00
|
|
|
btcaddr, ok := w.addrMap[addressHashKey(addr160)]
|
2013-09-09 20:14:57 +02:00
|
|
|
if !ok {
|
2013-11-11 21:30:50 +01:00
|
|
|
return nil, ErrAddressNotFound
|
2013-09-09 20:14:57 +02:00
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Both the pubkey and encrypted privkey must be recorded to return
|
|
|
|
// the private key. Error if neither are saved.
|
2013-09-09 20:14:57 +02:00
|
|
|
if !btcaddr.flags.hasPubKey {
|
|
|
|
return nil, errors.New("no public key for address")
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-09-09 20:14:57 +02:00
|
|
|
if !btcaddr.flags.hasPrivKey {
|
|
|
|
return nil, errors.New("no private key for address")
|
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Parse public key.
|
2013-11-04 17:50:32 +01:00
|
|
|
pubkey, err := btcec.ParsePubKey(btcaddr.pubKey, btcec.S256())
|
2013-09-09 20:14:57 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// The wallet's secret will be zeroed on lock, so make a local
|
|
|
|
// copy.
|
|
|
|
localSecret := make([]byte, 32)
|
|
|
|
w.secret.Lock()
|
|
|
|
if len(w.secret.key) != 32 {
|
|
|
|
w.secret.Unlock()
|
|
|
|
return nil, ErrWalletLocked
|
|
|
|
}
|
|
|
|
copy(localSecret, w.secret.key)
|
|
|
|
w.secret.Unlock()
|
|
|
|
|
|
|
|
// Unlock address with wallet secret. unlock returns a copy of the
|
|
|
|
// clear text private key, and may be used safely even during an address
|
|
|
|
// lock.
|
|
|
|
privKeyCT, err := btcaddr.unlock(localSecret)
|
|
|
|
if err != nil {
|
2013-09-09 20:14:57 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
return &ecdsa.PrivateKey{
|
2013-09-09 20:14:57 +02:00
|
|
|
PublicKey: *pubkey,
|
2013-11-11 21:30:50 +01:00
|
|
|
D: new(big.Int).SetBytes(privKeyCT),
|
|
|
|
}, nil
|
2013-09-09 20:14:57 +02:00
|
|
|
}
|
|
|
|
|
2013-12-02 20:56:06 +01:00
|
|
|
// AddressInfo returns an AddressInfo structure for an address in a wallet.
|
|
|
|
func (w *Wallet) AddressInfo(addr string) (*AddressInfo, error) {
|
2013-11-11 21:30:50 +01:00
|
|
|
// Get address hash for addr.
|
|
|
|
addr160, err := w.addrHashForAddress(addr)
|
2013-11-04 17:50:32 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Look up address by address hash.
|
2013-11-04 17:50:32 +01:00
|
|
|
btcaddr, ok := w.addrMap[addressHashKey(addr160)]
|
|
|
|
if !ok {
|
|
|
|
return nil, errors.New("address not in wallet")
|
|
|
|
}
|
|
|
|
|
|
|
|
return btcaddr.info(w.net)
|
|
|
|
}
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
// Net returns the bitcoin network identifier for this wallet.
|
|
|
|
func (w *Wallet) Net() btcwire.BitcoinNet {
|
|
|
|
return w.net
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
// SetSyncedWith marks the wallet to be in sync with the block
|
|
|
|
// described by height and hash.
|
|
|
|
func (w *Wallet) SetSyncedWith(bs *BlockStamp) {
|
2013-12-06 21:37:07 +01:00
|
|
|
// Check if we're trying to rollback the last seen history.
|
|
|
|
// If so, and this bs is already saved, remove anything
|
|
|
|
// after and return. Otherwire, remove previous hashes.
|
|
|
|
if bs.Height < w.recent.lastHeight {
|
|
|
|
maybeIdx := len(w.recent.hashes) - 1 - int(w.recent.lastHeight-bs.Height)
|
|
|
|
if maybeIdx >= 0 && maybeIdx < len(w.recent.hashes) &&
|
|
|
|
*w.recent.hashes[maybeIdx] == bs.Hash {
|
|
|
|
|
|
|
|
w.recent.lastHeight = bs.Height
|
|
|
|
// subslice out the removed hashes.
|
|
|
|
w.recent.hashes = w.recent.hashes[:maybeIdx]
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.recent.hashes = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if bs.Height != w.recent.lastHeight+1 {
|
|
|
|
w.recent.hashes = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
w.recent.lastHeight = bs.Height
|
|
|
|
blockSha := new(btcwire.ShaHash)
|
|
|
|
copy(blockSha[:], bs.Hash[:])
|
|
|
|
if len(w.recent.hashes) == 20 {
|
|
|
|
// Make room for the most recent hash.
|
|
|
|
copy(w.recent.hashes, w.recent.hashes[1:])
|
|
|
|
|
|
|
|
// Set new block in the last position.
|
|
|
|
w.recent.hashes[19] = blockSha
|
|
|
|
} else {
|
|
|
|
w.recent.hashes = append(w.recent.hashes, blockSha)
|
|
|
|
}
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// SyncedWith returns the height and hash of the block the wallet is
|
|
|
|
// currently marked to be in sync with.
|
|
|
|
func (w *Wallet) SyncedWith() *BlockStamp {
|
2013-12-06 21:37:07 +01:00
|
|
|
nHashes := len(w.recent.hashes)
|
|
|
|
if nHashes == 0 || w.recent.lastHeight == -1 {
|
|
|
|
return &BlockStamp{
|
|
|
|
Height: -1,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
lastSha := w.recent.hashes[nHashes-1]
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
return &BlockStamp{
|
2013-12-06 21:37:07 +01:00
|
|
|
Height: w.recent.lastHeight,
|
|
|
|
Hash: *lastSha,
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
// NewIterateRecentBlocks returns an iterator for recently-seen blocks.
|
|
|
|
// The iterator starts at the most recently-added block, and Prev should
|
|
|
|
// be used to access earlier blocks.
|
|
|
|
func (w *Wallet) NewIterateRecentBlocks() RecentBlockIterator {
|
|
|
|
return w.recent.NewIterator()
|
|
|
|
}
|
|
|
|
|
2013-11-20 02:18:11 +01:00
|
|
|
// EarliestBlockHeight returns the height of the blockchain for when any
|
|
|
|
// wallet address first appeared. This will usually be the block height
|
|
|
|
// at the time of wallet creation, unless a private key with an earlier
|
|
|
|
// block height was imported into the wallet. This is needed when
|
|
|
|
// performing a full rescan to prevent unnecessary rescanning before
|
|
|
|
// wallet addresses first appeared.
|
|
|
|
func (w *Wallet) EarliestBlockHeight() int32 {
|
|
|
|
height := w.keyGenerator.firstBlock
|
|
|
|
|
|
|
|
// Imported keys will be the only ones that may have an earlier
|
|
|
|
// blockchain height. Check each and set the returned height
|
|
|
|
for _, addr := range w.importedAddrs {
|
|
|
|
if addr.firstBlock < height {
|
|
|
|
height = addr.firstBlock
|
|
|
|
|
|
|
|
// Can't go any lower than 0.
|
|
|
|
if height == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return height
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
}
|
|
|
|
|
2013-11-20 02:18:11 +01:00
|
|
|
// ImportPrivateKey creates a new encrypted btcAddress with a
|
|
|
|
// user-provided private key and adds it to the wallet. If the
|
|
|
|
// import is successful, the payment address string is returned.
|
|
|
|
func (w *Wallet) ImportPrivateKey(privkey []byte, compressed bool,
|
|
|
|
bs *BlockStamp) (string, error) {
|
|
|
|
|
|
|
|
// The wallet's secret will be zeroed on lock, so make a local
|
|
|
|
// copy.
|
|
|
|
w.secret.Lock()
|
|
|
|
if len(w.secret.key) != 32 {
|
|
|
|
w.secret.Unlock()
|
|
|
|
return "", ErrWalletLocked
|
|
|
|
}
|
|
|
|
localSecret := make([]byte, 32)
|
|
|
|
copy(localSecret, w.secret.key)
|
|
|
|
w.secret.Unlock()
|
|
|
|
|
|
|
|
// Create new address with this private key.
|
|
|
|
addr, err := newBtcAddress(privkey, nil, bs, compressed)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
addr.chainIndex = importedKeyChainIdx
|
|
|
|
|
|
|
|
// Encrypt imported address with the derived AES key.
|
|
|
|
if err = addr.encrypt(localSecret); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create payment address string. If this fails, return an error
|
|
|
|
// before adding the address to the wallet.
|
|
|
|
addr160 := addr.pubKeyHash[:]
|
|
|
|
addrstr, err := btcutil.EncodeAddress(addr160, w.Net())
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-11-20 02:18:11 +01:00
|
|
|
|
|
|
|
// Add address to wallet's bookkeeping structures. Adding to
|
|
|
|
// the map will result in the imported address being serialized
|
|
|
|
// on the next WriteTo call.
|
|
|
|
w.addrMap[addressHashKey(addr160)] = addr
|
|
|
|
w.importedAddrs = append(w.importedAddrs, addr)
|
|
|
|
|
|
|
|
return addrstr, nil
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
// AddressInfo holds information regarding an address needed to manage
|
|
|
|
// a complete wallet.
|
|
|
|
type AddressInfo struct {
|
|
|
|
Address string
|
2013-11-06 17:23:30 +01:00
|
|
|
AddrHash string
|
2013-11-04 17:50:32 +01:00
|
|
|
Compressed bool
|
2013-11-20 02:18:11 +01:00
|
|
|
FirstBlock int32
|
|
|
|
Imported bool
|
2013-11-20 16:17:49 +01:00
|
|
|
Pubkey string
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
}
|
|
|
|
|
2013-12-02 20:56:06 +01:00
|
|
|
// SortedActiveAddresses returns all wallet addresses that have been
|
2013-11-11 21:30:50 +01:00
|
|
|
// requested to be generated. These do not include unused addresses in
|
|
|
|
// the key pool. Use this when ordered addresses are needed. Otherwise,
|
2013-12-02 20:56:06 +01:00
|
|
|
// ActiveAddresses is preferred.
|
|
|
|
func (w *Wallet) SortedActiveAddresses() []*AddressInfo {
|
2013-11-20 02:18:11 +01:00
|
|
|
addrs := make([]*AddressInfo, 0,
|
|
|
|
w.highestUsed+int64(len(w.importedAddrs))+1)
|
|
|
|
for i := int64(rootKeyChainIdx); i <= w.highestUsed; i++ {
|
|
|
|
addr160, ok := w.chainIdxMap[i]
|
|
|
|
if !ok {
|
2013-08-21 16:37:30 +02:00
|
|
|
return addrs
|
|
|
|
}
|
2013-11-04 17:50:32 +01:00
|
|
|
addr := w.addrMap[addr160]
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
info, err := addr.info(w.Net())
|
2013-10-08 04:22:47 +02:00
|
|
|
if err == nil {
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
addrs = append(addrs, info)
|
2013-09-09 20:14:57 +02:00
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-11-20 02:18:11 +01:00
|
|
|
for _, addr := range w.importedAddrs {
|
|
|
|
info, err := addr.info(w.Net())
|
|
|
|
if err == nil {
|
|
|
|
addrs = append(addrs, info)
|
|
|
|
}
|
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
return addrs
|
|
|
|
}
|
|
|
|
|
2013-12-02 20:56:06 +01:00
|
|
|
// ActiveAddresses returns a map between active payment addresses
|
2013-11-11 21:30:50 +01:00
|
|
|
// and their full info. These do not include unused addresses in the
|
2013-12-02 20:56:06 +01:00
|
|
|
// key pool. If addresses must be sorted, use SortedActiveAddresses.
|
|
|
|
func (w *Wallet) ActiveAddresses() map[string]*AddressInfo {
|
2013-11-06 17:23:30 +01:00
|
|
|
addrs := make(map[string]*AddressInfo)
|
2013-11-20 02:18:11 +01:00
|
|
|
for i := int64(rootKeyChainIdx); i <= w.highestUsed; i++ {
|
|
|
|
addr160, ok := w.chainIdxMap[i]
|
|
|
|
if !ok {
|
2013-11-06 17:23:30 +01:00
|
|
|
return addrs
|
|
|
|
}
|
|
|
|
addr := w.addrMap[addr160]
|
|
|
|
info, err := addr.info(w.Net())
|
|
|
|
if err == nil {
|
|
|
|
addrs[info.Address] = info
|
|
|
|
}
|
|
|
|
}
|
2013-11-20 02:18:11 +01:00
|
|
|
for _, addr := range w.importedAddrs {
|
|
|
|
info, err := addr.info(w.Net())
|
|
|
|
if err == nil {
|
|
|
|
addrs[info.Address] = info
|
|
|
|
}
|
|
|
|
}
|
2013-11-06 17:23:30 +01:00
|
|
|
return addrs
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
type walletFlags struct {
|
|
|
|
useEncryption bool
|
|
|
|
watchingOnly bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (wf *walletFlags) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
raw := make([]byte, 8)
|
|
|
|
n, err = binaryRead(r, binary.LittleEndian, raw)
|
|
|
|
wf.useEncryption = raw[0] != 0
|
|
|
|
wf.watchingOnly = raw[1] != 0
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (wf *walletFlags) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
raw := make([]byte, 8)
|
|
|
|
if wf.useEncryption {
|
|
|
|
raw[0] = 1
|
|
|
|
}
|
|
|
|
if wf.watchingOnly {
|
|
|
|
raw[1] = 1
|
|
|
|
}
|
|
|
|
return binaryWrite(w, binary.LittleEndian, raw)
|
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
type addrFlags struct {
|
2013-11-04 17:50:32 +01:00
|
|
|
hasPrivKey bool
|
|
|
|
hasPubKey bool
|
|
|
|
encrypted bool
|
|
|
|
createPrivKeyNextUnlock bool // unimplemented in btcwallet
|
|
|
|
compressed bool
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (af *addrFlags) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
var b [8]byte
|
|
|
|
read, err = binaryRead(r, binary.LittleEndian, &b)
|
|
|
|
if err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
if b[0]&(1<<0) != 0 {
|
|
|
|
af.hasPrivKey = true
|
|
|
|
}
|
|
|
|
if b[0]&(1<<1) != 0 {
|
|
|
|
af.hasPubKey = true
|
|
|
|
}
|
|
|
|
if b[0]&(1<<2) == 0 {
|
2013-09-09 20:14:57 +02:00
|
|
|
return n, errors.New("address flag specifies unencrypted address")
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
af.encrypted = true
|
2013-11-04 17:50:32 +01:00
|
|
|
if b[0]&(1<<3) != 0 {
|
|
|
|
af.createPrivKeyNextUnlock = true
|
|
|
|
}
|
|
|
|
if b[0]&(1<<4) != 0 {
|
|
|
|
af.compressed = true
|
|
|
|
}
|
2013-09-03 06:10:32 +02:00
|
|
|
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (af *addrFlags) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
var b [8]byte
|
|
|
|
if af.hasPrivKey {
|
|
|
|
b[0] |= 1 << 0
|
|
|
|
}
|
|
|
|
if af.hasPubKey {
|
|
|
|
b[0] |= 1 << 1
|
|
|
|
}
|
|
|
|
if !af.encrypted {
|
|
|
|
// We only support encrypted privkeys.
|
2013-09-09 20:14:57 +02:00
|
|
|
return n, errors.New("address must be encrypted")
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
b[0] |= 1 << 2
|
2013-11-04 17:50:32 +01:00
|
|
|
if af.createPrivKeyNextUnlock {
|
|
|
|
b[0] |= 1 << 3
|
|
|
|
}
|
|
|
|
if af.compressed {
|
|
|
|
b[0] |= 1 << 4
|
|
|
|
}
|
2013-09-03 06:10:32 +02:00
|
|
|
|
|
|
|
return binaryWrite(w, binary.LittleEndian, b)
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
// recentBlocks holds at most the last 20 seen block hashes as well as
|
|
|
|
// the block height of the most recently seen block.
|
|
|
|
type recentBlocks struct {
|
|
|
|
hashes []*btcwire.ShaHash
|
|
|
|
lastHeight int32
|
|
|
|
}
|
|
|
|
|
|
|
|
type blockIterator struct {
|
|
|
|
height int32
|
|
|
|
index int
|
|
|
|
rb *recentBlocks
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rb *recentBlocks) ReadFromVersion(v version, r io.Reader) (int64, error) {
|
|
|
|
if !v.LT(Vers20LastBlocks) {
|
|
|
|
// Use current version.
|
|
|
|
return rb.ReadFrom(r)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Old file versions only saved the most recently seen
|
|
|
|
// block height and hash, not the last 20.
|
|
|
|
|
|
|
|
var read int64
|
|
|
|
var syncedBlockHash btcwire.ShaHash
|
|
|
|
|
|
|
|
// Read height.
|
|
|
|
heightBytes := make([]byte, 4) // 4 bytes for a int32
|
|
|
|
n, err := r.Read(heightBytes)
|
|
|
|
if err != nil {
|
|
|
|
return read + int64(n), err
|
|
|
|
}
|
|
|
|
read += int64(n)
|
|
|
|
rb.lastHeight = int32(binary.LittleEndian.Uint32(heightBytes))
|
|
|
|
|
|
|
|
// If height is -1, the last synced block is unknown, so don't try
|
|
|
|
// to read a block hash.
|
|
|
|
if rb.lastHeight == -1 {
|
|
|
|
rb.hashes = nil
|
|
|
|
return read, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read block hash.
|
|
|
|
n, err = r.Read(syncedBlockHash[:])
|
|
|
|
if err != nil {
|
|
|
|
return read + int64(n), err
|
|
|
|
}
|
|
|
|
read += int64(n)
|
|
|
|
|
|
|
|
rb.hashes = []*btcwire.ShaHash{
|
|
|
|
&syncedBlockHash,
|
|
|
|
}
|
|
|
|
|
|
|
|
return read, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rb *recentBlocks) ReadFrom(r io.Reader) (int64, error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
// Read number of saved blocks. This should not exceed 20.
|
|
|
|
nBlockBytes := make([]byte, 4) // 4 bytes for a uint32
|
|
|
|
n, err := r.Read(nBlockBytes)
|
|
|
|
if err != nil {
|
|
|
|
return read + int64(n), err
|
|
|
|
}
|
|
|
|
read += int64(n)
|
|
|
|
nBlocks := binary.LittleEndian.Uint32(nBlockBytes)
|
|
|
|
if nBlocks > 20 {
|
|
|
|
return read, errors.New("number of last seen blocks exceeds maximum of 20")
|
|
|
|
}
|
|
|
|
|
|
|
|
// If number of blocks is 0, our work here is done.
|
|
|
|
if nBlocks == 0 {
|
|
|
|
rb.lastHeight = -1
|
|
|
|
rb.hashes = nil
|
|
|
|
return read, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read most recently seen block height.
|
|
|
|
heightBytes := make([]byte, 4) // 4 bytes for a int32
|
|
|
|
n, err = r.Read(heightBytes)
|
|
|
|
if err != nil {
|
|
|
|
return read + int64(n), err
|
|
|
|
}
|
|
|
|
read += int64(n)
|
|
|
|
height := int32(binary.LittleEndian.Uint32(heightBytes))
|
|
|
|
|
|
|
|
// height should not be -1 (or any other negative number)
|
|
|
|
// since at this point we should be reading in at least one
|
|
|
|
// known block.
|
|
|
|
if height < 0 {
|
|
|
|
return read, errors.New("expected a block but specified height is negative")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set last seen height.
|
|
|
|
rb.lastHeight = height
|
|
|
|
|
|
|
|
// Read nBlocks block hashes. Hashes are expected to be in
|
|
|
|
// order of oldest to newest, but there's no way to check
|
|
|
|
// that here.
|
|
|
|
rb.hashes = make([]*btcwire.ShaHash, 0, nBlocks)
|
|
|
|
for i := uint32(0); i < nBlocks; i++ {
|
|
|
|
blockSha := new(btcwire.ShaHash)
|
|
|
|
n, err := r.Read(blockSha[:])
|
|
|
|
if err != nil {
|
|
|
|
return read + int64(n), err
|
|
|
|
}
|
|
|
|
read += int64(n)
|
|
|
|
rb.hashes = append(rb.hashes, blockSha)
|
|
|
|
}
|
|
|
|
|
|
|
|
return read, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rb *recentBlocks) WriteTo(w io.Writer) (int64, error) {
|
|
|
|
var written int64
|
|
|
|
|
|
|
|
// Write number of saved blocks. This should not exceed 20.
|
|
|
|
nBlocks := uint32(len(rb.hashes))
|
|
|
|
if nBlocks > 20 {
|
|
|
|
return written, errors.New("number of last seen blocks exceeds maximum of 20")
|
|
|
|
}
|
|
|
|
if nBlocks != 0 && rb.lastHeight < 0 {
|
|
|
|
return written, errors.New("number of block hashes is positive, but height is negative")
|
|
|
|
}
|
|
|
|
if nBlocks == 0 && rb.lastHeight != -1 {
|
|
|
|
return written, errors.New("no block hashes available, but height is not -1")
|
|
|
|
}
|
|
|
|
nBlockBytes := make([]byte, 4) // 4 bytes for a uint32
|
|
|
|
binary.LittleEndian.PutUint32(nBlockBytes, nBlocks)
|
|
|
|
n, err := w.Write(nBlockBytes)
|
|
|
|
if err != nil {
|
|
|
|
return written + int64(n), err
|
|
|
|
}
|
|
|
|
written += int64(n)
|
|
|
|
|
|
|
|
// If number of blocks is 0, our work here is done.
|
|
|
|
if nBlocks == 0 {
|
|
|
|
return written, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write most recently seen block height.
|
|
|
|
heightBytes := make([]byte, 4) // 4 bytes for a int32
|
|
|
|
binary.LittleEndian.PutUint32(heightBytes, uint32(rb.lastHeight))
|
|
|
|
n, err = w.Write(heightBytes)
|
|
|
|
if err != nil {
|
|
|
|
return written + int64(n), err
|
|
|
|
}
|
|
|
|
written += int64(n)
|
|
|
|
|
|
|
|
// Write block hashes.
|
|
|
|
for _, hash := range rb.hashes {
|
|
|
|
n, err := w.Write(hash[:])
|
|
|
|
if err != nil {
|
|
|
|
return written + int64(n), err
|
|
|
|
}
|
|
|
|
written += int64(n)
|
|
|
|
}
|
|
|
|
|
|
|
|
return written, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// RecentBlockIterator is a type to iterate through recent-seen
|
|
|
|
// blocks.
|
|
|
|
type RecentBlockIterator interface {
|
|
|
|
Next() bool
|
|
|
|
Prev() bool
|
|
|
|
BlockStamp() *BlockStamp
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rb *recentBlocks) NewIterator() RecentBlockIterator {
|
|
|
|
if rb.lastHeight == -1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &blockIterator{
|
|
|
|
height: rb.lastHeight,
|
|
|
|
index: len(rb.hashes) - 1,
|
|
|
|
rb: rb,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *blockIterator) Next() bool {
|
|
|
|
if it.index+1 >= len(it.rb.hashes) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
it.index += 1
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *blockIterator) Prev() bool {
|
|
|
|
if it.index-1 < 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
it.index -= 1
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *blockIterator) BlockStamp() *BlockStamp {
|
|
|
|
return &BlockStamp{
|
|
|
|
Height: it.rb.lastHeight - int32(len(it.rb.hashes)-1-it.index),
|
|
|
|
Hash: *it.rb.hashes[it.index],
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// unusedSpace is a wrapper type to read or write one or more types
|
|
|
|
// that btcwallet fits into an unused space left by Armory's wallet file
|
|
|
|
// format.
|
|
|
|
type unusedSpace struct {
|
|
|
|
nBytes int // number of unused bytes that armory left.
|
|
|
|
rfvs []ReaderFromVersion
|
|
|
|
}
|
|
|
|
|
|
|
|
func newUnusedSpace(nBytes int, rfvs ...ReaderFromVersion) *unusedSpace {
|
|
|
|
return &unusedSpace{
|
|
|
|
nBytes: nBytes,
|
|
|
|
rfvs: rfvs,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *unusedSpace) ReadFromVersion(v version, r io.Reader) (int64, error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
for _, rfv := range u.rfvs {
|
|
|
|
n, err := rfv.ReadFromVersion(v, r)
|
|
|
|
if err != nil {
|
|
|
|
return read + n, err
|
|
|
|
}
|
|
|
|
read += n
|
|
|
|
if read > int64(u.nBytes) {
|
|
|
|
return read, errors.New("read too much from armory's unused space")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read rest of actually unused bytes.
|
|
|
|
unused := make([]byte, u.nBytes-int(read))
|
|
|
|
n, err := r.Read(unused)
|
|
|
|
return read + int64(n), err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *unusedSpace) WriteTo(w io.Writer) (int64, error) {
|
|
|
|
var written int64
|
|
|
|
|
|
|
|
for _, wt := range u.rfvs {
|
|
|
|
n, err := wt.WriteTo(w)
|
|
|
|
if err != nil {
|
|
|
|
return written + n, err
|
|
|
|
}
|
|
|
|
written += n
|
|
|
|
if written > int64(u.nBytes) {
|
|
|
|
return written, errors.New("wrote too much to armory's unused space")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write rest of actually unused bytes.
|
|
|
|
unused := make([]byte, u.nBytes-int(written))
|
|
|
|
n, err := w.Write(unused)
|
|
|
|
return written + int64(n), err
|
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
type btcAddress struct {
|
|
|
|
pubKeyHash [ripemd160.Size]byte
|
2013-09-03 06:10:32 +02:00
|
|
|
flags addrFlags
|
2013-08-21 16:37:30 +02:00
|
|
|
chaincode [32]byte
|
|
|
|
chainIndex int64
|
2013-09-03 06:10:32 +02:00
|
|
|
chainDepth int64 // currently unused (will use when extending a locked wallet)
|
2013-08-21 16:37:30 +02:00
|
|
|
initVector [16]byte
|
|
|
|
privKey [32]byte
|
2013-11-04 17:50:32 +01:00
|
|
|
pubKey publicKey
|
2013-10-28 14:15:26 +01:00
|
|
|
firstSeen int64
|
|
|
|
lastSeen int64
|
|
|
|
firstBlock int32
|
|
|
|
lastBlock int32
|
2013-11-11 21:30:50 +01:00
|
|
|
privKeyCT struct {
|
|
|
|
sync.Mutex
|
|
|
|
key []byte // non-nil if unlocked.
|
|
|
|
}
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-11-20 02:18:11 +01:00
|
|
|
const (
|
|
|
|
// Root address has a chain index of -1. Each subsequent
|
|
|
|
// chained address increments the index.
|
|
|
|
rootKeyChainIdx = -1
|
|
|
|
|
|
|
|
// Imported private keys are not part of the chain, and have a
|
|
|
|
// special index of -2.
|
|
|
|
importedKeyChainIdx = -2
|
|
|
|
)
|
|
|
|
|
2013-11-04 17:50:32 +01:00
|
|
|
const (
|
|
|
|
pubkeyCompressed byte = 0x2
|
|
|
|
pubkeyUncompressed byte = 0x4
|
|
|
|
)
|
|
|
|
|
|
|
|
type publicKey []byte
|
|
|
|
|
|
|
|
func (k *publicKey) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
var format byte
|
|
|
|
read, err = binaryRead(r, binary.LittleEndian, &format)
|
|
|
|
if err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
// Remove the oddness from the format
|
|
|
|
noodd := format
|
|
|
|
noodd &= ^byte(0x1)
|
|
|
|
|
|
|
|
var s []byte
|
|
|
|
switch noodd {
|
|
|
|
case pubkeyUncompressed:
|
|
|
|
// Read the remaining 64 bytes.
|
|
|
|
s = make([]byte, 64)
|
|
|
|
|
|
|
|
case pubkeyCompressed:
|
|
|
|
// Read the remaining 32 bytes.
|
|
|
|
s = make([]byte, 32)
|
|
|
|
|
|
|
|
default:
|
|
|
|
return n, errors.New("unrecognized pubkey format")
|
|
|
|
}
|
|
|
|
|
|
|
|
read, err = binaryRead(r, binary.LittleEndian, &s)
|
|
|
|
if err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
*k = append([]byte{format}, s...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *publicKey) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
return binaryWrite(w, binary.LittleEndian, []byte(*k))
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// newBtcAddress initializes and returns a new address. privkey must
|
|
|
|
// be 32 bytes. iv must be 16 bytes, or nil (in which case it is
|
|
|
|
// randomly generated).
|
2013-11-20 02:18:11 +01:00
|
|
|
func newBtcAddress(privkey, iv []byte, bs *BlockStamp, compressed bool) (addr *btcAddress, err error) {
|
2013-09-03 06:10:32 +02:00
|
|
|
if len(privkey) != 32 {
|
2013-09-09 20:14:57 +02:00
|
|
|
return nil, errors.New("private key is not 32 bytes")
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
if iv == nil {
|
|
|
|
iv = make([]byte, 16)
|
2013-11-15 17:59:37 +01:00
|
|
|
if _, err := rand.Read(iv); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-09-03 06:10:32 +02:00
|
|
|
} else if len(iv) != 16 {
|
2013-09-09 20:14:57 +02:00
|
|
|
return nil, errors.New("init vector must be nil or 16 bytes large")
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
addr = &btcAddress{
|
|
|
|
flags: addrFlags{
|
|
|
|
hasPrivKey: true,
|
|
|
|
hasPubKey: true,
|
2013-11-20 02:18:11 +01:00
|
|
|
compressed: compressed,
|
2013-09-03 06:10:32 +02:00
|
|
|
},
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
firstSeen: time.Now().Unix(),
|
|
|
|
firstBlock: bs.Height,
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
addr.privKeyCT.key = privkey
|
2013-09-03 06:10:32 +02:00
|
|
|
copy(addr.initVector[:], iv)
|
2013-11-20 16:47:44 +01:00
|
|
|
addr.pubKey = pubkeyFromPrivkey(privkey, compressed)
|
2013-11-04 17:50:32 +01:00
|
|
|
copy(addr.pubKeyHash[:], calcHash160(addr.pubKey))
|
2013-09-03 06:10:32 +02:00
|
|
|
|
|
|
|
return addr, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// newRootBtcAddress generates a new address, also setting the
|
|
|
|
// chaincode and chain index to represent this address as a root
|
|
|
|
// address.
|
2013-11-20 02:18:11 +01:00
|
|
|
func newRootBtcAddress(privKey, iv, chaincode []byte,
|
|
|
|
bs *BlockStamp) (addr *btcAddress, err error) {
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
if len(chaincode) != 32 {
|
2013-09-09 20:14:57 +02:00
|
|
|
return nil, errors.New("chaincode is not 32 bytes")
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-11-20 02:18:11 +01:00
|
|
|
// Create new btcAddress with provided inputs. This will
|
|
|
|
// always use a compressed pubkey.
|
|
|
|
addr, err = newBtcAddress(privKey, iv, bs, true)
|
2013-09-03 06:10:32 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
copy(addr.chaincode[:], chaincode)
|
2013-11-20 02:18:11 +01:00
|
|
|
addr.chainIndex = rootKeyChainIdx
|
2013-09-03 06:10:32 +02:00
|
|
|
|
|
|
|
return addr, err
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-12-03 18:33:37 +01:00
|
|
|
// verifyKeypairs creates a signature using the parsed private key and
|
|
|
|
// verifies the signature with the parsed public key. If either of these
|
|
|
|
// steps fail, the keypair generation failed and any funds sent to this
|
|
|
|
// address will be unspendable. This step requires an unencrypted or
|
|
|
|
// unlocked btcAddress.
|
|
|
|
func (a *btcAddress) verifyKeypairs() error {
|
2013-12-03 18:45:27 +01:00
|
|
|
// Parse public key.
|
|
|
|
pubkey, err := btcec.ParsePubKey(a.pubKey, btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-12-03 18:33:37 +01:00
|
|
|
|
|
|
|
if len(a.privKeyCT.key) != 32 {
|
|
|
|
return errors.New("private key unavailable")
|
|
|
|
}
|
|
|
|
|
2013-12-03 18:45:27 +01:00
|
|
|
privkey := &ecdsa.PrivateKey{
|
|
|
|
PublicKey: *pubkey,
|
|
|
|
D: new(big.Int).SetBytes(a.privKeyCT.key),
|
|
|
|
}
|
2013-12-03 18:33:37 +01:00
|
|
|
|
|
|
|
data := "String to sign."
|
|
|
|
r, s, err := ecdsa.Sign(rand.Reader, privkey, []byte(data))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ok := ecdsa.Verify(&privkey.PublicKey, []byte(data), r, s)
|
|
|
|
if !ok {
|
|
|
|
return errors.New("ecdsa verification failed")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
// ReadFrom reads an encrypted address from an io.Reader.
|
2013-09-09 20:14:57 +02:00
|
|
|
func (a *btcAddress) ReadFrom(r io.Reader) (n int64, err error) {
|
2013-08-21 16:37:30 +02:00
|
|
|
var read int64
|
|
|
|
|
|
|
|
// Checksums
|
|
|
|
var chkPubKeyHash uint32
|
|
|
|
var chkChaincode uint32
|
|
|
|
var chkInitVector uint32
|
|
|
|
var chkPrivKey uint32
|
|
|
|
var chkPubKey uint32
|
|
|
|
|
|
|
|
// Read serialized wallet into addr fields and checksums.
|
|
|
|
datas := []interface{}{
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.pubKeyHash,
|
2013-08-21 16:37:30 +02:00
|
|
|
&chkPubKeyHash,
|
2013-09-03 06:10:32 +02:00
|
|
|
make([]byte, 4), // version
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.flags,
|
|
|
|
&a.chaincode,
|
2013-08-21 16:37:30 +02:00
|
|
|
&chkChaincode,
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.chainIndex,
|
|
|
|
&a.chainDepth,
|
|
|
|
&a.initVector,
|
2013-08-21 16:37:30 +02:00
|
|
|
&chkInitVector,
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.privKey,
|
2013-08-21 16:37:30 +02:00
|
|
|
&chkPrivKey,
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.pubKey,
|
2013-08-21 16:37:30 +02:00
|
|
|
&chkPubKey,
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.firstSeen,
|
|
|
|
&a.lastSeen,
|
|
|
|
&a.firstBlock,
|
|
|
|
&a.lastBlock,
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
for _, data := range datas {
|
2013-09-03 06:10:32 +02:00
|
|
|
if rf, ok := data.(io.ReaderFrom); ok {
|
|
|
|
read, err = rf.ReadFrom(r)
|
|
|
|
} else {
|
|
|
|
read, err = binaryRead(r, binary.LittleEndian, data)
|
|
|
|
}
|
|
|
|
if err != nil {
|
2013-08-21 16:37:30 +02:00
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify checksums, correct errors where possible.
|
|
|
|
checks := []struct {
|
|
|
|
data []byte
|
|
|
|
chk uint32
|
|
|
|
}{
|
2013-09-09 20:14:57 +02:00
|
|
|
{a.pubKeyHash[:], chkPubKeyHash},
|
|
|
|
{a.chaincode[:], chkChaincode},
|
|
|
|
{a.initVector[:], chkInitVector},
|
|
|
|
{a.privKey[:], chkPrivKey},
|
2013-11-04 17:50:32 +01:00
|
|
|
{a.pubKey, chkPubKey},
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-09-09 20:14:57 +02:00
|
|
|
for i := range checks {
|
2013-08-21 16:37:30 +02:00
|
|
|
if err = verifyAndFix(checks[i].data, checks[i].chk); err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
func (a *btcAddress) WriteTo(w io.Writer) (n int64, err error) {
|
2013-08-21 16:37:30 +02:00
|
|
|
var written int64
|
|
|
|
|
|
|
|
datas := []interface{}{
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.pubKeyHash,
|
|
|
|
walletHash(a.pubKeyHash[:]),
|
2013-09-03 06:10:32 +02:00
|
|
|
make([]byte, 4), //version
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.flags,
|
|
|
|
&a.chaincode,
|
|
|
|
walletHash(a.chaincode[:]),
|
|
|
|
&a.chainIndex,
|
|
|
|
&a.chainDepth,
|
|
|
|
&a.initVector,
|
|
|
|
walletHash(a.initVector[:]),
|
|
|
|
&a.privKey,
|
|
|
|
walletHash(a.privKey[:]),
|
|
|
|
&a.pubKey,
|
2013-11-04 17:50:32 +01:00
|
|
|
walletHash(a.pubKey),
|
2013-09-09 20:14:57 +02:00
|
|
|
&a.firstSeen,
|
|
|
|
&a.lastSeen,
|
|
|
|
&a.firstBlock,
|
|
|
|
&a.lastBlock,
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
for _, data := range datas {
|
2013-09-03 06:10:32 +02:00
|
|
|
if wt, ok := data.(io.WriterTo); ok {
|
|
|
|
written, err = wt.WriteTo(w)
|
|
|
|
} else {
|
|
|
|
written, err = binaryWrite(w, binary.LittleEndian, data)
|
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
if err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
}
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// encrypt attempts to encrypt an address's clear text private key,
|
|
|
|
// failing if the address is already encrypted or if the private key is
|
|
|
|
// not 32 bytes. If successful, the encryption flag is set.
|
|
|
|
func (a *btcAddress) encrypt(key []byte) error {
|
|
|
|
if a.flags.encrypted {
|
2013-09-09 20:14:57 +02:00
|
|
|
return errors.New("address already encrypted")
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
a.privKeyCT.Lock()
|
|
|
|
defer a.privKeyCT.Unlock()
|
|
|
|
if len(a.privKeyCT.key) != 32 {
|
2013-09-09 20:14:57 +02:00
|
|
|
return errors.New("invalid clear text private key")
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
aesBlockEncrypter, err := aes.NewCipher(key)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
aesEncrypter := cipher.NewCFBEncrypter(aesBlockEncrypter, a.initVector[:])
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
aesEncrypter.XORKeyStream(a.privKey[:], a.privKeyCT.key)
|
2013-09-03 06:10:32 +02:00
|
|
|
|
|
|
|
a.flags.encrypted = true
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// lock removes the reference this address holds to its clear text
|
|
|
|
// private key. This function fails if the address is not encrypted.
|
|
|
|
func (a *btcAddress) lock() error {
|
|
|
|
if !a.flags.encrypted {
|
2013-09-09 20:14:57 +02:00
|
|
|
return errors.New("unable to lock unencrypted address")
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
a.privKeyCT.Lock()
|
|
|
|
zero(a.privKeyCT.key)
|
|
|
|
a.privKeyCT.key = nil
|
|
|
|
a.privKeyCT.Unlock()
|
2013-09-03 06:10:32 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// unlock decrypts and stores a pointer to this address's private key,
|
|
|
|
// failing if the address is not encrypted, or the provided key is
|
2013-11-11 21:30:50 +01:00
|
|
|
// incorrect. The returned clear text private key will always be a copy
|
|
|
|
// that may be safely used by the caller without worrying about it being
|
|
|
|
// zeroed during an address lock.
|
|
|
|
func (a *btcAddress) unlock(key []byte) (privKeyCT []byte, err error) {
|
2013-09-03 06:10:32 +02:00
|
|
|
if !a.flags.encrypted {
|
2013-11-11 21:30:50 +01:00
|
|
|
return nil, errors.New("unable to unlock unencrypted address")
|
|
|
|
}
|
|
|
|
|
|
|
|
// If secret is already saved, return a copy without performing a full
|
|
|
|
// unlock.
|
|
|
|
a.privKeyCT.Lock()
|
|
|
|
if len(a.privKeyCT.key) == 32 {
|
|
|
|
privKeyCT := make([]byte, 32)
|
|
|
|
copy(privKeyCT, a.privKeyCT.key)
|
|
|
|
a.privKeyCT.Unlock()
|
|
|
|
return privKeyCT, nil
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
a.privKeyCT.Unlock()
|
2013-09-03 06:10:32 +02:00
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Decrypt private key with AES key.
|
2013-09-03 06:10:32 +02:00
|
|
|
aesBlockDecrypter, err := aes.NewCipher(key)
|
2013-08-21 16:37:30 +02:00
|
|
|
if err != nil {
|
2013-11-11 21:30:50 +01:00
|
|
|
return nil, err
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-09-03 06:10:32 +02:00
|
|
|
aesDecrypter := cipher.NewCFBDecrypter(aesBlockDecrypter, a.initVector[:])
|
2013-11-11 21:30:50 +01:00
|
|
|
privkey := make([]byte, 32)
|
|
|
|
aesDecrypter.XORKeyStream(privkey, a.privKey[:])
|
2013-08-21 16:37:30 +02:00
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
// Generate new x, y from clear text private key and check that they
|
|
|
|
// match the recorded pubkey.
|
2013-11-04 17:50:32 +01:00
|
|
|
pubKey, err := btcec.ParsePubKey(a.pubKey, btcec.S256())
|
2013-08-21 16:37:30 +02:00
|
|
|
if err != nil {
|
2013-11-11 21:30:50 +01:00
|
|
|
return nil, fmt.Errorf("cannot parse pubkey: %s", err)
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2013-11-11 21:30:50 +01:00
|
|
|
x, y := btcec.S256().ScalarBaseMult(privkey)
|
2013-08-21 16:37:30 +02:00
|
|
|
if x.Cmp(pubKey.X) != 0 || y.Cmp(pubKey.Y) != 0 {
|
2013-11-11 21:30:50 +01:00
|
|
|
return nil, errors.New("decryption failed")
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-11-11 21:30:50 +01:00
|
|
|
privkeyCopy := make([]byte, 32)
|
|
|
|
copy(privkeyCopy, privkey)
|
|
|
|
a.privKeyCT.Lock()
|
|
|
|
a.privKeyCT.key = privkey
|
|
|
|
a.privKeyCT.Unlock()
|
|
|
|
return privkeyCopy, nil
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(jrick)
|
2013-09-09 20:14:57 +02:00
|
|
|
func (a *btcAddress) changeEncryptionKey(oldkey, newkey []byte) error {
|
|
|
|
return errors.New("unimplemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
// paymentAddress returns a human readable payment address string for
|
|
|
|
// an address.
|
|
|
|
func (a *btcAddress) paymentAddress(net btcwire.BitcoinNet) (string, error) {
|
2013-10-08 19:36:39 +02:00
|
|
|
return btcutil.EncodeAddress(a.pubKeyHash[:], net)
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
// info returns information about a btcAddress stored in a AddressInfo
|
|
|
|
// struct.
|
|
|
|
func (a *btcAddress) info(net btcwire.BitcoinNet) (*AddressInfo, error) {
|
|
|
|
address, err := a.paymentAddress(net)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &AddressInfo{
|
|
|
|
Address: address,
|
2013-11-06 17:23:30 +01:00
|
|
|
AddrHash: string(a.pubKeyHash[:]),
|
2013-11-04 17:50:32 +01:00
|
|
|
Compressed: a.flags.compressed,
|
2013-11-20 02:18:11 +01:00
|
|
|
FirstBlock: a.firstBlock,
|
|
|
|
Imported: a.chainIndex == importedKeyChainIdx,
|
2013-11-20 16:17:49 +01:00
|
|
|
Pubkey: hex.EncodeToString(a.pubKey),
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
func walletHash(b []byte) uint32 {
|
|
|
|
sum := btcwire.DoubleSha256(b)
|
|
|
|
return binary.LittleEndian.Uint32(sum)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(jrick) add error correction.
|
|
|
|
func verifyAndFix(b []byte, chk uint32) error {
|
|
|
|
if walletHash(b) != chk {
|
2013-09-09 20:14:57 +02:00
|
|
|
return ErrChecksumMismatch
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type kdfParameters struct {
|
|
|
|
mem uint64
|
|
|
|
nIter uint32
|
|
|
|
salt [32]byte
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// computeKdfParameters returns best guess parameters to the
|
|
|
|
// memory-hard key derivation function to make the computation last
|
|
|
|
// targetSec seconds, while using no more than maxMem bytes of memory.
|
2013-11-15 17:59:37 +01:00
|
|
|
func computeKdfParameters(targetSec float64, maxMem uint64) (*kdfParameters, error) {
|
2013-09-03 06:10:32 +02:00
|
|
|
params := &kdfParameters{}
|
2013-11-15 17:59:37 +01:00
|
|
|
if _, err := rand.Read(params.salt[:]); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-09-03 06:10:32 +02:00
|
|
|
|
|
|
|
testKey := []byte("This is an example key to test KDF iteration speed")
|
|
|
|
|
|
|
|
memoryReqtBytes := uint64(1024)
|
|
|
|
approxSec := float64(0)
|
|
|
|
|
|
|
|
for approxSec <= targetSec/4 && memoryReqtBytes < maxMem {
|
|
|
|
memoryReqtBytes *= 2
|
|
|
|
before := time.Now()
|
|
|
|
_ = keyOneIter(testKey, params.salt[:], memoryReqtBytes)
|
|
|
|
approxSec = time.Since(before).Seconds()
|
|
|
|
}
|
|
|
|
|
|
|
|
allItersSec := float64(0)
|
|
|
|
nIter := uint32(1)
|
|
|
|
for allItersSec < 0.02 { // This is a magic number straight from armory's source.
|
|
|
|
nIter *= 2
|
|
|
|
before := time.Now()
|
|
|
|
for i := uint32(0); i < nIter; i++ {
|
|
|
|
_ = keyOneIter(testKey, params.salt[:], memoryReqtBytes)
|
|
|
|
}
|
|
|
|
allItersSec = time.Since(before).Seconds()
|
|
|
|
}
|
|
|
|
|
|
|
|
params.mem = memoryReqtBytes
|
|
|
|
params.nIter = nIter
|
|
|
|
|
2013-11-15 17:59:37 +01:00
|
|
|
return params, nil
|
2013-09-03 06:10:32 +02:00
|
|
|
}
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
func (params *kdfParameters) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
var written int64
|
|
|
|
|
|
|
|
memBytes := make([]byte, 8)
|
|
|
|
nIterBytes := make([]byte, 4)
|
|
|
|
binary.LittleEndian.PutUint64(memBytes, params.mem)
|
|
|
|
binary.LittleEndian.PutUint32(nIterBytes, params.nIter)
|
|
|
|
chkedBytes := append(memBytes, nIterBytes...)
|
|
|
|
chkedBytes = append(chkedBytes, params.salt[:]...)
|
|
|
|
|
|
|
|
datas := []interface{}{
|
|
|
|
¶ms.mem,
|
|
|
|
¶ms.nIter,
|
|
|
|
¶ms.salt,
|
|
|
|
walletHash(chkedBytes),
|
|
|
|
make([]byte, 256-(binary.Size(params)+4)), // padding
|
|
|
|
}
|
|
|
|
for _, data := range datas {
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, data); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
}
|
|
|
|
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (params *kdfParameters) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
// These must be read in but are not saved directly to params.
|
|
|
|
chkedBytes := make([]byte, 44)
|
|
|
|
var chk uint32
|
|
|
|
padding := make([]byte, 256-(binary.Size(params)+4))
|
|
|
|
|
|
|
|
datas := []interface{}{
|
|
|
|
chkedBytes,
|
|
|
|
&chk,
|
|
|
|
padding,
|
|
|
|
}
|
|
|
|
for _, data := range datas {
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, data); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify checksum
|
|
|
|
if err = verifyAndFix(chkedBytes, chk); err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
2013-09-03 06:10:32 +02:00
|
|
|
// Read params
|
2013-08-21 16:37:30 +02:00
|
|
|
buf := bytes.NewBuffer(chkedBytes)
|
|
|
|
datas = []interface{}{
|
|
|
|
¶ms.mem,
|
|
|
|
¶ms.nIter,
|
|
|
|
¶ms.salt,
|
|
|
|
}
|
|
|
|
for _, data := range datas {
|
|
|
|
if err = binary.Read(buf, binary.LittleEndian, data); err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type addrEntry struct {
|
|
|
|
pubKeyHash160 [ripemd160.Size]byte
|
|
|
|
addr btcAddress
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *addrEntry) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
var written int64
|
|
|
|
|
|
|
|
// Write header
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, addrHeader); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
|
|
|
|
// Write hash
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, &e.pubKeyHash160); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
|
|
|
|
// Write btcAddress
|
|
|
|
written, err = e.addr.WriteTo(w)
|
2013-09-03 06:10:32 +02:00
|
|
|
n += written
|
|
|
|
return n, err
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (e *addrEntry) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &e.pubKeyHash160); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
read, err = e.addr.ReadFrom(r)
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
|
|
|
|
type addrCommentEntry struct {
|
|
|
|
pubKeyHash160 [ripemd160.Size]byte
|
|
|
|
comment []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *addrCommentEntry) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
var written int64
|
|
|
|
|
|
|
|
// Comments shall not overflow their entry.
|
|
|
|
if len(e.comment) > maxCommentLen {
|
2013-09-09 20:14:57 +02:00
|
|
|
return n, ErrMalformedEntry
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write header
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, addrCommentHeader); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
|
|
|
|
// Write hash
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, &e.pubKeyHash160); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
|
|
|
|
// Write length
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, uint16(len(e.comment))); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
|
|
|
|
// Write comment
|
|
|
|
written, err = binaryWrite(w, binary.LittleEndian, e.comment)
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *addrCommentEntry) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &e.pubKeyHash160); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
var clen uint16
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &clen); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
e.comment = make([]byte, clen)
|
|
|
|
read, err = binaryRead(r, binary.LittleEndian, e.comment)
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
|
|
|
|
type txCommentEntry struct {
|
|
|
|
txHash [sha256.Size]byte
|
|
|
|
comment []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *txCommentEntry) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
var written int64
|
|
|
|
|
|
|
|
// Comments shall not overflow their entry.
|
|
|
|
if len(e.comment) > maxCommentLen {
|
2013-09-09 20:14:57 +02:00
|
|
|
return n, ErrMalformedEntry
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write header
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, txCommentHeader); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
n += written
|
|
|
|
|
|
|
|
// Write length
|
|
|
|
if written, err = binaryWrite(w, binary.LittleEndian, uint16(len(e.comment))); err != nil {
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write comment
|
|
|
|
written, err = binaryWrite(w, binary.LittleEndian, e.comment)
|
|
|
|
return n + written, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *txCommentEntry) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &e.txHash); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
var clen uint16
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &clen); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
e.comment = make([]byte, clen)
|
|
|
|
read, err = binaryRead(r, binary.LittleEndian, e.comment)
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
|
2013-11-04 17:50:32 +01:00
|
|
|
type deletedEntry struct{}
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
func (e *deletedEntry) ReadFrom(r io.Reader) (n int64, err error) {
|
|
|
|
var read int64
|
|
|
|
|
|
|
|
var ulen uint16
|
|
|
|
if read, err = binaryRead(r, binary.LittleEndian, &ulen); err != nil {
|
|
|
|
return n + read, err
|
|
|
|
}
|
|
|
|
n += read
|
|
|
|
|
|
|
|
unused := make([]byte, ulen)
|
2013-09-09 20:14:57 +02:00
|
|
|
nRead, err := r.Read(unused)
|
|
|
|
if err == io.EOF {
|
2013-08-21 16:37:30 +02:00
|
|
|
return n + int64(nRead), nil
|
|
|
|
}
|
2013-09-09 20:14:57 +02:00
|
|
|
return n + int64(nRead), err
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
|
|
|
|
// BlockStamp defines a block (by height and a unique hash) and is
|
|
|
|
// used to mark a point in the blockchain that a wallet element is
|
|
|
|
// synced to.
|
|
|
|
type BlockStamp struct {
|
|
|
|
Height int32
|
|
|
|
Hash btcwire.ShaHash
|
|
|
|
}
|