2013-08-21 16:37:30 +02:00
|
|
|
/*
|
2014-01-03 19:34:37 +01:00
|
|
|
* Copyright (c) 2013, 2014 Conformal Systems LLC <info@conformal.com>
|
2013-08-21 16:37:30 +02:00
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and distribute this software for any
|
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"code.google.com/p/go.net/websocket"
|
2013-12-05 17:59:08 +01:00
|
|
|
"crypto/sha256"
|
|
|
|
"crypto/subtle"
|
2013-11-19 18:21:54 +01:00
|
|
|
"crypto/tls"
|
|
|
|
"crypto/x509"
|
2013-10-03 15:11:35 +02:00
|
|
|
"encoding/base64"
|
2013-08-21 16:37:30 +02:00
|
|
|
"encoding/json"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"github.com/conformal/btcjson"
|
2014-01-10 21:51:54 +01:00
|
|
|
"github.com/conformal/btcutil"
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
"github.com/conformal/btcwallet/wallet"
|
2013-11-06 17:23:30 +01:00
|
|
|
"github.com/conformal/btcws"
|
2013-11-20 02:47:15 +01:00
|
|
|
"github.com/conformal/go-socks"
|
2014-03-18 04:36:31 +01:00
|
|
|
"io"
|
2014-01-10 21:51:54 +01:00
|
|
|
"io/ioutil"
|
2013-10-16 23:49:35 +02:00
|
|
|
"net"
|
2013-08-21 16:37:30 +02:00
|
|
|
"net/http"
|
2013-12-03 16:52:09 +01:00
|
|
|
"os"
|
2014-01-17 18:17:51 +01:00
|
|
|
"path/filepath"
|
2013-12-05 23:20:52 +01:00
|
|
|
"runtime"
|
2013-08-21 16:37:30 +02:00
|
|
|
"sync"
|
2013-12-03 16:52:09 +01:00
|
|
|
"time"
|
2013-08-21 16:37:30 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2014-03-20 15:07:05 +01:00
|
|
|
// ErrBadAuth represents an error where a request is denied due to
|
|
|
|
// a missing, incorrect, or duplicate authentication request.
|
|
|
|
ErrBadAuth = errors.New("bad auth")
|
|
|
|
|
2014-04-29 01:16:51 +02:00
|
|
|
// ErrNoAuth represents an error where authentication could not succeed
|
|
|
|
// due to a missing Authorization HTTP header.
|
|
|
|
ErrNoAuth = errors.New("no auth")
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
// ErrConnRefused represents an error where a connection to another
|
|
|
|
// process cannot be established.
|
|
|
|
ErrConnRefused = errors.New("connection refused")
|
|
|
|
|
|
|
|
// ErrConnLost represents an error where a connection to another
|
|
|
|
// process cannot be established.
|
|
|
|
ErrConnLost = errors.New("connection lost")
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
// Adds a frontend listener channel
|
2014-02-18 04:18:30 +01:00
|
|
|
addClient = make(chan clientContext)
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
// Messages sent to this channel are sent to each connected frontend.
|
2014-02-18 04:18:30 +01:00
|
|
|
allClients = make(chan []byte, 100)
|
2013-08-21 16:37:30 +02:00
|
|
|
)
|
|
|
|
|
2013-11-18 19:31:58 +01:00
|
|
|
// server holds the items the RPC server may need to access (auth,
|
|
|
|
// config, shutdown, etc.)
|
|
|
|
type server struct {
|
|
|
|
wg sync.WaitGroup
|
|
|
|
listeners []net.Listener
|
2013-12-05 17:59:08 +01:00
|
|
|
authsha [sha256.Size]byte
|
2013-11-18 19:31:58 +01:00
|
|
|
}
|
|
|
|
|
2014-02-18 04:18:30 +01:00
|
|
|
type clientContext struct {
|
2014-03-20 15:07:05 +01:00
|
|
|
send chan []byte
|
|
|
|
quit chan struct{} // closed on disconnect
|
2014-02-18 04:18:30 +01:00
|
|
|
}
|
|
|
|
|
2013-12-05 23:20:52 +01:00
|
|
|
// parseListeners splits the list of listen addresses passed in addrs into
|
|
|
|
// IPv4 and IPv6 slices and returns them. This allows easy creation of the
|
|
|
|
// listeners on the correct interface "tcp4" and "tcp6". It also properly
|
|
|
|
// detects addresses which apply to "all interfaces" and adds the address to
|
|
|
|
// both slices.
|
|
|
|
func parseListeners(addrs []string) ([]string, []string, error) {
|
|
|
|
ipv4ListenAddrs := make([]string, 0, len(addrs)*2)
|
|
|
|
ipv6ListenAddrs := make([]string, 0, len(addrs)*2)
|
|
|
|
for _, addr := range addrs {
|
|
|
|
host, _, err := net.SplitHostPort(addr)
|
|
|
|
if err != nil {
|
|
|
|
// Shouldn't happen due to already being normalized.
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Empty host or host of * on plan9 is both IPv4 and IPv6.
|
|
|
|
if host == "" || (host == "*" && runtime.GOOS == "plan9") {
|
|
|
|
ipv4ListenAddrs = append(ipv4ListenAddrs, addr)
|
|
|
|
ipv6ListenAddrs = append(ipv6ListenAddrs, addr)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse the IP.
|
|
|
|
ip := net.ParseIP(host)
|
|
|
|
if ip == nil {
|
|
|
|
return nil, nil, fmt.Errorf("'%s' is not a valid IP "+
|
|
|
|
"address", host)
|
|
|
|
}
|
|
|
|
|
|
|
|
// To4 returns nil when the IP is not an IPv4 address, so use
|
|
|
|
// this determine the address type.
|
|
|
|
if ip.To4() == nil {
|
|
|
|
ipv6ListenAddrs = append(ipv6ListenAddrs, addr)
|
|
|
|
} else {
|
|
|
|
ipv4ListenAddrs = append(ipv4ListenAddrs, addr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ipv4ListenAddrs, ipv6ListenAddrs, nil
|
|
|
|
}
|
|
|
|
|
2013-11-18 19:31:58 +01:00
|
|
|
// newServer returns a new instance of the server struct.
|
2013-12-05 23:20:52 +01:00
|
|
|
func newServer(listenAddrs []string) (*server, error) {
|
2013-12-05 17:59:08 +01:00
|
|
|
login := cfg.Username + ":" + cfg.Password
|
|
|
|
auth := "Basic " + base64.StdEncoding.EncodeToString([]byte(login))
|
2013-11-18 19:31:58 +01:00
|
|
|
s := server{
|
2013-12-05 17:59:08 +01:00
|
|
|
authsha: sha256.Sum256([]byte(auth)),
|
2013-12-03 16:52:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check for existence of cert file and key file
|
|
|
|
if !fileExists(cfg.RPCKey) && !fileExists(cfg.RPCCert) {
|
|
|
|
// if both files do not exist, we generate them.
|
2014-01-14 17:13:27 +01:00
|
|
|
err := genCertPair(cfg.RPCCert, cfg.RPCKey)
|
2013-12-03 16:52:09 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
keypair, err := tls.LoadX509KeyPair(cfg.RPCCert, cfg.RPCKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsConfig := tls.Config{
|
|
|
|
Certificates: []tls.Certificate{keypair},
|
2013-11-18 19:31:58 +01:00
|
|
|
}
|
|
|
|
|
2013-12-05 23:20:52 +01:00
|
|
|
ipv4ListenAddrs, ipv6ListenAddrs, err := parseListeners(listenAddrs)
|
|
|
|
listeners := make([]net.Listener, 0,
|
|
|
|
len(ipv6ListenAddrs)+len(ipv4ListenAddrs))
|
|
|
|
for _, addr := range ipv4ListenAddrs {
|
|
|
|
listener, err := tls.Listen("tcp4", addr, &tlsConfig)
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("RPCS: Can't listen on %s: %v", addr,
|
|
|
|
err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
listeners = append(listeners, listener)
|
2013-11-18 19:31:58 +01:00
|
|
|
}
|
|
|
|
|
2013-12-05 23:20:52 +01:00
|
|
|
for _, addr := range ipv6ListenAddrs {
|
|
|
|
listener, err := tls.Listen("tcp6", addr, &tlsConfig)
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("RPCS: Can't listen on %s: %v", addr,
|
|
|
|
err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
listeners = append(listeners, listener)
|
|
|
|
}
|
|
|
|
if len(listeners) == 0 {
|
2014-01-30 16:14:02 +01:00
|
|
|
return nil, errors.New("no valid listen address")
|
2013-11-18 19:31:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
s.listeners = listeners
|
|
|
|
|
|
|
|
return &s, nil
|
|
|
|
}
|
|
|
|
|
2014-01-10 21:51:54 +01:00
|
|
|
// genCertPair generates a key/cert pair to the paths provided.
|
|
|
|
func genCertPair(certFile, keyFile string) error {
|
2013-12-03 16:52:09 +01:00
|
|
|
log.Infof("Generating TLS certificates...")
|
|
|
|
|
2014-01-17 18:17:51 +01:00
|
|
|
// Create directories for cert and key files if they do not yet exist.
|
|
|
|
certDir, _ := filepath.Split(certFile)
|
|
|
|
keyDir, _ := filepath.Split(keyFile)
|
|
|
|
if err := os.MkdirAll(certDir, 0700); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := os.MkdirAll(keyDir, 0700); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate cert pair.
|
2014-01-10 21:51:54 +01:00
|
|
|
org := "btcwallet autogenerated cert"
|
|
|
|
validUntil := time.Now().Add(10 * 365 * 24 * time.Hour)
|
|
|
|
cert, key, err := btcutil.NewTLSCertPair(org, validUntil, nil)
|
2013-12-03 16:52:09 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-01-10 21:51:54 +01:00
|
|
|
// Write cert and key files.
|
|
|
|
if err = ioutil.WriteFile(certFile, cert, 0666); err != nil {
|
2013-12-03 16:52:09 +01:00
|
|
|
return err
|
|
|
|
}
|
2014-01-10 21:51:54 +01:00
|
|
|
if err = ioutil.WriteFile(keyFile, key, 0600); err != nil {
|
|
|
|
os.Remove(certFile)
|
2013-12-03 16:52:09 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-01-10 21:51:54 +01:00
|
|
|
log.Infof("Done generating TLS certificates")
|
2013-12-03 16:52:09 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-01-30 16:14:02 +01:00
|
|
|
// ParseRequest parses a command or notification out of a JSON-RPC request,
|
|
|
|
// returning any errors as a JSON-RPC error.
|
|
|
|
func ParseRequest(msg []byte) (btcjson.Cmd, *btcjson.Error) {
|
|
|
|
cmd, err := btcjson.ParseMarshaledCmd(msg)
|
|
|
|
if err != nil || cmd.Id() == nil {
|
|
|
|
return cmd, &btcjson.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
return cmd, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReplyToFrontend responds to a marshaled JSON-RPC request with a
|
|
|
|
// marshaled JSON-RPC response for both standard and extension
|
2014-03-20 15:07:05 +01:00
|
|
|
// (websocket) clients. The returned error is ErrBadAuth if a
|
|
|
|
// missing, incorrect, or duplicate authentication request is
|
|
|
|
// received.
|
|
|
|
func (s *server) ReplyToFrontend(msg []byte, ws, authenticated bool) ([]byte, error) {
|
2014-01-30 16:14:02 +01:00
|
|
|
cmd, jsonErr := ParseRequest(msg)
|
|
|
|
var id interface{}
|
|
|
|
if cmd != nil {
|
|
|
|
id = cmd.Id()
|
|
|
|
}
|
2014-03-20 15:07:05 +01:00
|
|
|
|
|
|
|
// If client is not already authenticated, the parsed request must
|
|
|
|
// be for authentication.
|
|
|
|
authCmd, ok := cmd.(*btcws.AuthenticateCmd)
|
|
|
|
if authenticated {
|
|
|
|
if ok {
|
|
|
|
// Duplicate auth request.
|
|
|
|
return nil, ErrBadAuth
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if !ok {
|
|
|
|
// The first unauthenticated request must be an auth request.
|
|
|
|
return nil, ErrBadAuth
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check credentials.
|
|
|
|
login := authCmd.Username + ":" + authCmd.Passphrase
|
|
|
|
auth := "Basic " + base64.StdEncoding.EncodeToString([]byte(login))
|
|
|
|
authSha := sha256.Sum256([]byte(auth))
|
|
|
|
cmp := subtle.ConstantTimeCompare(authSha[:], s.authsha[:])
|
|
|
|
if cmp != 1 {
|
|
|
|
return nil, ErrBadAuth
|
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2014-01-30 16:14:02 +01:00
|
|
|
if jsonErr != nil {
|
|
|
|
response := btcjson.Reply{
|
|
|
|
Id: &id,
|
|
|
|
Error: jsonErr,
|
|
|
|
}
|
|
|
|
mresponse, _ := json.Marshal(response)
|
2014-03-20 15:07:05 +01:00
|
|
|
return mresponse, nil
|
2013-11-18 19:31:58 +01:00
|
|
|
}
|
|
|
|
|
2014-01-30 16:14:02 +01:00
|
|
|
cReq := NewClientRequest(cmd, ws)
|
2014-04-09 18:07:09 +02:00
|
|
|
rawResp := cReq.Handle()
|
|
|
|
|
|
|
|
response := struct {
|
|
|
|
Jsonrpc string `json:"jsonrpc"`
|
|
|
|
Id interface{} `json:"id"`
|
|
|
|
Result *json.RawMessage `json:"result"`
|
|
|
|
Error *json.RawMessage `json:"error"`
|
|
|
|
}{
|
|
|
|
Jsonrpc: "1.0",
|
|
|
|
Id: id,
|
|
|
|
Result: rawResp.Result,
|
|
|
|
Error: rawResp.Error,
|
2014-01-30 16:14:02 +01:00
|
|
|
}
|
2014-01-03 19:34:37 +01:00
|
|
|
mresponse, err := json.Marshal(response)
|
|
|
|
if err != nil {
|
2014-01-30 16:14:02 +01:00
|
|
|
log.Errorf("Cannot marhal response: %v", err)
|
2014-04-09 18:07:09 +02:00
|
|
|
response := btcjson.Reply{
|
2014-01-30 16:14:02 +01:00
|
|
|
Id: &id,
|
2014-01-03 19:34:37 +01:00
|
|
|
Error: &btcjson.ErrInternal,
|
2013-11-18 22:37:28 +01:00
|
|
|
}
|
2014-01-30 16:14:02 +01:00
|
|
|
mresponse, _ = json.Marshal(&response)
|
|
|
|
}
|
|
|
|
|
2014-03-20 15:07:05 +01:00
|
|
|
return mresponse, nil
|
2014-01-30 16:14:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ServeRPCRequest processes and replies to a JSON-RPC client request.
|
|
|
|
func (s *server) ServeRPCRequest(w http.ResponseWriter, r *http.Request) {
|
|
|
|
body, err := btcjson.GetRaw(r.Body)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("RPCS: Error getting JSON message: %v", err)
|
2014-01-03 19:34:37 +01:00
|
|
|
}
|
2013-11-18 22:37:28 +01:00
|
|
|
|
2014-03-20 15:07:05 +01:00
|
|
|
resp, err := s.ReplyToFrontend(body, false, true)
|
|
|
|
if err == ErrBadAuth {
|
|
|
|
http.Error(w, "401 Unauthorized.", http.StatusUnauthorized)
|
|
|
|
return
|
|
|
|
}
|
2014-01-30 16:14:02 +01:00
|
|
|
if _, err := w.Write(resp); err != nil {
|
2014-01-03 19:34:37 +01:00
|
|
|
log.Warnf("RPCS: could not respond to RPC request: %v", err)
|
|
|
|
}
|
2013-11-18 19:31:58 +01:00
|
|
|
}
|
|
|
|
|
2014-02-18 04:18:30 +01:00
|
|
|
// clientResponseDuplicator listens for new wallet listener channels
|
|
|
|
// and duplicates messages sent to allClients to all connected clients.
|
|
|
|
func clientResponseDuplicator() {
|
|
|
|
clients := make(map[clientContext]struct{})
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
for {
|
2014-02-18 04:18:30 +01:00
|
|
|
select {
|
|
|
|
case cc := <-addClient:
|
|
|
|
clients[cc] = struct{}{}
|
|
|
|
|
|
|
|
case n := <-allClients:
|
|
|
|
for cc := range clients {
|
|
|
|
select {
|
2014-03-20 15:07:05 +01:00
|
|
|
case <-cc.quit:
|
2014-02-18 04:18:30 +01:00
|
|
|
delete(clients, cc)
|
2014-02-19 03:11:33 +01:00
|
|
|
case cc.send <- n:
|
2014-02-18 04:18:30 +01:00
|
|
|
}
|
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-03 19:34:37 +01:00
|
|
|
// NotifyBtcdConnection notifies a frontend of the current connection
|
|
|
|
// status of btcwallet to btcd.
|
|
|
|
func NotifyBtcdConnection(reply chan []byte) {
|
2014-01-30 16:14:02 +01:00
|
|
|
if btcd, ok := CurrentServerConn().(*BtcdRPCConn); ok {
|
2014-01-03 19:34:37 +01:00
|
|
|
ntfn := btcws.NewBtcdConnectedNtfn(btcd.Connected())
|
|
|
|
mntfn, _ := ntfn.MarshalJSON()
|
|
|
|
reply <- mntfn
|
|
|
|
}
|
2013-12-13 17:00:31 +01:00
|
|
|
|
2013-10-29 14:19:11 +01:00
|
|
|
}
|
|
|
|
|
2014-03-20 15:07:05 +01:00
|
|
|
// stringQueue manages a queue of strings, reading from in and sending
|
|
|
|
// the oldest unsent to out. This handler closes out and returns after
|
|
|
|
// in is closed and any queued items are sent. Any reads on quit result
|
|
|
|
// in immediate shutdown of the handler.
|
|
|
|
func stringQueue(in <-chan string, out chan<- string, quit <-chan struct{}) {
|
|
|
|
var q []string
|
|
|
|
var dequeue chan<- string
|
|
|
|
skipQueue := out
|
|
|
|
var next string
|
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case n, ok := <-in:
|
|
|
|
if !ok {
|
|
|
|
// Sender closed input channel. Nil channel
|
|
|
|
// and continue so the remaining queued
|
|
|
|
// items may be sent. If the queue is empty,
|
|
|
|
// break out of the loop.
|
|
|
|
in = nil
|
|
|
|
if dequeue == nil {
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Either send to out immediately if skipQueue is
|
|
|
|
// non-nil (queue is empty) and reader is ready,
|
|
|
|
// or append to the queue and send later.
|
|
|
|
select {
|
|
|
|
case skipQueue <- n:
|
|
|
|
default:
|
|
|
|
q = append(q, n)
|
|
|
|
dequeue = out
|
|
|
|
skipQueue = nil
|
|
|
|
next = q[0]
|
|
|
|
}
|
|
|
|
|
|
|
|
case dequeue <- next:
|
|
|
|
copy(q, q[1:])
|
|
|
|
q[len(q)-1] = "" // avoid leak
|
|
|
|
q = q[:len(q)-1]
|
|
|
|
if len(q) == 0 {
|
|
|
|
// If the input chan was closed and nil'd,
|
|
|
|
// break out of the loop.
|
|
|
|
if in == nil {
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
dequeue = nil
|
|
|
|
skipQueue = out
|
|
|
|
} else {
|
|
|
|
next = q[0]
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close(out)
|
|
|
|
}
|
|
|
|
|
2014-01-30 16:14:02 +01:00
|
|
|
// WSSendRecv is the handler for websocket client connections. It loops
|
|
|
|
// forever (until disconnected), reading JSON-RPC requests and sending
|
|
|
|
// sending responses and notifications.
|
2014-04-22 22:03:17 +02:00
|
|
|
func (s *server) WSSendRecv(ws *websocket.Conn, remoteAddr string, authenticated bool) {
|
2014-04-29 01:16:51 +02:00
|
|
|
// Clear the read deadline set before the websocket hijacked
|
|
|
|
// the connection.
|
|
|
|
ws.SetReadDeadline(time.Time{})
|
|
|
|
|
2014-02-18 21:51:06 +01:00
|
|
|
// Add client context so notifications duplicated to each
|
|
|
|
// client are received by this client.
|
2014-03-20 15:07:05 +01:00
|
|
|
recvQuit := make(chan struct{})
|
|
|
|
sendQuit := make(chan struct{})
|
2014-02-18 04:18:30 +01:00
|
|
|
cc := clientContext{
|
2014-03-20 15:07:05 +01:00
|
|
|
send: make(chan []byte, 1), // buffer size is number of initial notifications
|
|
|
|
quit: make(chan struct{}),
|
2014-02-18 04:18:30 +01:00
|
|
|
}
|
2014-03-20 15:07:05 +01:00
|
|
|
go func() {
|
|
|
|
select {
|
|
|
|
case <-recvQuit:
|
|
|
|
case <-sendQuit:
|
|
|
|
}
|
2014-04-22 22:03:17 +02:00
|
|
|
log.Infof("Disconnected websocket client %s", remoteAddr)
|
2014-03-20 15:07:05 +01:00
|
|
|
close(cc.quit)
|
|
|
|
}()
|
2014-04-22 22:03:17 +02:00
|
|
|
log.Infof("New websocket client %s", remoteAddr)
|
|
|
|
|
2014-02-18 21:51:06 +01:00
|
|
|
NotifyBtcdConnection(cc.send) // TODO(jrick): clients should explicitly request this.
|
2014-02-18 04:18:30 +01:00
|
|
|
addClient <- cc
|
2013-08-21 16:37:30 +02:00
|
|
|
|
2014-02-18 21:51:06 +01:00
|
|
|
// received passes all received messages from the currently connected
|
|
|
|
// frontend to the for-select loop. It is closed when reading a
|
|
|
|
// message from the websocket connection fails (presumably due to
|
|
|
|
// a disconnected client).
|
2014-03-20 15:07:05 +01:00
|
|
|
recvQueueIn := make(chan string)
|
2013-08-21 16:37:30 +02:00
|
|
|
|
|
|
|
// Receive messages from websocket and send across jsonMsgs until
|
|
|
|
// connection is lost
|
|
|
|
go func() {
|
|
|
|
for {
|
2014-03-20 15:07:05 +01:00
|
|
|
var m string
|
2013-08-21 16:37:30 +02:00
|
|
|
if err := websocket.Message.Receive(ws, &m); err != nil {
|
2014-03-20 15:07:05 +01:00
|
|
|
select {
|
|
|
|
case <-sendQuit:
|
|
|
|
// Do not log error.
|
|
|
|
|
|
|
|
default:
|
|
|
|
if err != io.EOF {
|
2014-04-22 22:03:17 +02:00
|
|
|
log.Warnf("Websocket receive failed from client %s: %v",
|
|
|
|
remoteAddr, err)
|
2014-03-20 15:07:05 +01:00
|
|
|
}
|
2014-03-18 04:36:31 +01:00
|
|
|
}
|
2014-03-20 15:07:05 +01:00
|
|
|
close(recvQueueIn)
|
|
|
|
close(recvQuit)
|
2013-08-21 16:37:30 +02:00
|
|
|
return
|
|
|
|
}
|
2014-03-20 15:07:05 +01:00
|
|
|
recvQueueIn <- m
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Manage queue of received messages for LIFO processing.
|
|
|
|
recvQueueOut := make(chan string)
|
|
|
|
go stringQueue(recvQueueIn, recvQueueOut, cc.quit)
|
|
|
|
|
|
|
|
badAuth := make(chan struct{})
|
2014-03-20 22:10:42 +01:00
|
|
|
sendResp := make(chan []byte)
|
2014-03-20 15:07:05 +01:00
|
|
|
go func() {
|
2014-03-20 22:10:42 +01:00
|
|
|
out:
|
2014-03-20 15:07:05 +01:00
|
|
|
for m := range recvQueueOut {
|
|
|
|
resp, err := s.ReplyToFrontend([]byte(m), true, authenticated)
|
|
|
|
if err == ErrBadAuth {
|
|
|
|
select {
|
|
|
|
case badAuth <- struct{}{}:
|
|
|
|
case <-cc.quit:
|
|
|
|
}
|
2014-03-20 22:10:42 +01:00
|
|
|
break out
|
2014-03-20 15:07:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Authentication passed.
|
|
|
|
authenticated = true
|
|
|
|
|
|
|
|
select {
|
2014-03-20 22:10:42 +01:00
|
|
|
case sendResp <- resp:
|
2014-03-20 15:07:05 +01:00
|
|
|
case <-cc.quit:
|
2014-03-20 22:10:42 +01:00
|
|
|
break out
|
2014-03-20 15:07:05 +01:00
|
|
|
}
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2014-03-20 22:10:42 +01:00
|
|
|
close(sendResp)
|
2013-08-21 16:37:30 +02:00
|
|
|
}()
|
|
|
|
|
2014-02-19 19:57:52 +01:00
|
|
|
const deadline time.Duration = 2 * time.Second
|
|
|
|
|
2014-03-20 15:07:05 +01:00
|
|
|
out:
|
2013-08-21 16:37:30 +02:00
|
|
|
for {
|
2014-03-20 22:10:42 +01:00
|
|
|
var m []byte
|
|
|
|
var ok bool
|
|
|
|
|
2013-08-21 16:37:30 +02:00
|
|
|
select {
|
2014-03-20 15:07:05 +01:00
|
|
|
case <-badAuth:
|
|
|
|
// Bad auth. Disconnect.
|
2014-04-22 22:03:17 +02:00
|
|
|
log.Warnf("Disconnecting unauthorized websocket client %s", remoteAddr)
|
2014-03-20 15:07:05 +01:00
|
|
|
break out
|
|
|
|
|
2014-03-20 22:10:42 +01:00
|
|
|
case m = <-cc.send: // sends from external writers. never closes.
|
|
|
|
case m, ok = <-sendResp:
|
2013-08-21 16:37:30 +02:00
|
|
|
if !ok {
|
2014-03-20 15:07:05 +01:00
|
|
|
// Nothing left to send. Return so the handler exits.
|
|
|
|
break out
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
2014-04-22 22:03:17 +02:00
|
|
|
case <-cc.quit:
|
|
|
|
break out
|
2014-03-20 22:10:42 +01:00
|
|
|
}
|
2014-02-18 04:18:30 +01:00
|
|
|
|
2014-03-20 22:10:42 +01:00
|
|
|
err := ws.SetWriteDeadline(time.Now().Add(deadline))
|
|
|
|
if err != nil {
|
2014-04-22 22:03:17 +02:00
|
|
|
log.Errorf("Cannot set write deadline on client %s: %v", remoteAddr, err)
|
2014-03-20 22:10:42 +01:00
|
|
|
break out
|
|
|
|
}
|
|
|
|
err = websocket.Message.Send(ws, string(m))
|
|
|
|
if err != nil {
|
2014-04-22 22:03:17 +02:00
|
|
|
log.Warnf("Websocket send failed to client %s: %v", remoteAddr, err)
|
2014-03-20 22:10:42 +01:00
|
|
|
break out
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
}
|
2014-03-20 15:07:05 +01:00
|
|
|
close(sendQuit)
|
2014-04-22 22:03:17 +02:00
|
|
|
log.Tracef("Leaving function WSSendRecv")
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|
|
|
|
|
2013-10-29 07:19:40 +01:00
|
|
|
// NotifyNewBlockChainHeight notifies all frontends of a new
|
2013-12-13 17:00:31 +01:00
|
|
|
// blockchain height. This sends the same notification as
|
|
|
|
// btcd, so this can probably be removed.
|
|
|
|
func NotifyNewBlockChainHeight(reply chan []byte, bs wallet.BlockStamp) {
|
|
|
|
ntfn := btcws.NewBlockConnectedNtfn(bs.Hash.String(), bs.Height)
|
|
|
|
mntfn, _ := ntfn.MarshalJSON()
|
|
|
|
reply <- mntfn
|
2013-10-29 07:19:40 +01:00
|
|
|
}
|
|
|
|
|
2013-09-09 20:14:57 +02:00
|
|
|
var duplicateOnce sync.Once
|
|
|
|
|
2013-11-18 19:31:58 +01:00
|
|
|
// Start starts a HTTP server to provide standard RPC and extension
|
|
|
|
// websocket connections for any number of btcwallet frontends.
|
|
|
|
func (s *server) Start() {
|
2014-02-24 20:35:30 +01:00
|
|
|
// A duplicator for notifications intended for all clients runs
|
|
|
|
// in another goroutines. Any such notifications are sent to
|
|
|
|
// the allClients channel and then sent to each connected client.
|
2013-09-09 20:14:57 +02:00
|
|
|
//
|
|
|
|
// Use a sync.Once to insure no extra duplicators run.
|
2014-02-18 04:18:30 +01:00
|
|
|
go duplicateOnce.Do(clientResponseDuplicator)
|
2013-08-21 16:37:30 +02:00
|
|
|
|
2013-12-03 16:52:09 +01:00
|
|
|
log.Trace("Starting RPC server")
|
|
|
|
|
2013-11-18 19:31:58 +01:00
|
|
|
serveMux := http.NewServeMux()
|
2014-04-29 01:16:51 +02:00
|
|
|
const rpcAuthTimeoutSeconds = 10
|
|
|
|
httpServer := &http.Server{
|
|
|
|
Handler: serveMux,
|
|
|
|
|
|
|
|
// Timeout connections which don't complete the initial
|
|
|
|
// handshake within the allowed timeframe.
|
|
|
|
ReadTimeout: time.Second * rpcAuthTimeoutSeconds,
|
|
|
|
}
|
2013-11-18 19:31:58 +01:00
|
|
|
serveMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
2013-12-05 17:59:08 +01:00
|
|
|
if err := s.checkAuth(r); err != nil {
|
2014-03-20 15:07:05 +01:00
|
|
|
log.Warnf("Unauthorized client connection attempt")
|
2013-12-05 17:59:08 +01:00
|
|
|
http.Error(w, "401 Unauthorized.", http.StatusUnauthorized)
|
|
|
|
return
|
|
|
|
}
|
2014-01-30 16:14:02 +01:00
|
|
|
s.ServeRPCRequest(w, r)
|
2013-11-18 19:31:58 +01:00
|
|
|
})
|
2013-12-05 17:59:08 +01:00
|
|
|
serveMux.HandleFunc("/frontend", func(w http.ResponseWriter, r *http.Request) {
|
2014-03-20 15:07:05 +01:00
|
|
|
authenticated := false
|
2014-04-29 01:16:51 +02:00
|
|
|
if err := s.checkAuth(r); err != nil {
|
|
|
|
// If auth was supplied but incorrect, rather than simply being
|
|
|
|
// missing, immediately terminate the connection.
|
|
|
|
if err != ErrNoAuth {
|
|
|
|
log.Warnf("Disconnecting improperly authorized websocket client")
|
|
|
|
http.Error(w, "401 Unauthorized.", http.StatusUnauthorized)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
2014-03-20 15:07:05 +01:00
|
|
|
authenticated = true
|
|
|
|
}
|
|
|
|
|
|
|
|
// A new Server instance is created rather than just creating the
|
|
|
|
// handler closure since the default server will disconnect the
|
|
|
|
// client if the origin is unset.
|
|
|
|
wsServer := websocket.Server{
|
|
|
|
Handler: websocket.Handler(func(ws *websocket.Conn) {
|
2014-04-22 22:03:17 +02:00
|
|
|
s.WSSendRecv(ws, r.RemoteAddr, authenticated)
|
2014-03-20 15:07:05 +01:00
|
|
|
}),
|
2013-12-05 17:59:08 +01:00
|
|
|
}
|
2014-03-20 15:07:05 +01:00
|
|
|
wsServer.ServeHTTP(w, r)
|
2013-12-05 17:59:08 +01:00
|
|
|
})
|
2013-11-18 19:31:58 +01:00
|
|
|
for _, listener := range s.listeners {
|
|
|
|
s.wg.Add(1)
|
|
|
|
go func(listener net.Listener) {
|
|
|
|
log.Infof("RPCS: RPC server listening on %s", listener.Addr())
|
|
|
|
httpServer.Serve(listener)
|
|
|
|
log.Tracef("RPCS: RPC listener done for %s", listener.Addr())
|
|
|
|
s.wg.Done()
|
|
|
|
}(listener)
|
|
|
|
}
|
2013-09-09 20:14:57 +02:00
|
|
|
}
|
|
|
|
|
2013-12-05 17:59:08 +01:00
|
|
|
// checkAuth checks the HTTP Basic authentication supplied by a frontend
|
|
|
|
// in the HTTP request r. If the frontend's supplied authentication does
|
|
|
|
// not match the username and password expected, a non-nil error is
|
|
|
|
// returned.
|
|
|
|
//
|
|
|
|
// This check is time-constant.
|
|
|
|
func (s *server) checkAuth(r *http.Request) error {
|
|
|
|
authhdr := r.Header["Authorization"]
|
2014-04-29 01:16:51 +02:00
|
|
|
if len(authhdr) == 0 {
|
|
|
|
return ErrNoAuth
|
2013-12-05 17:59:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
authsha := sha256.Sum256([]byte(authhdr[0]))
|
|
|
|
cmp := subtle.ConstantTimeCompare(authsha[:], s.authsha[:])
|
|
|
|
if cmp != 1 {
|
2014-03-20 15:07:05 +01:00
|
|
|
return ErrBadAuth
|
2013-12-05 17:59:08 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-01-03 19:34:37 +01:00
|
|
|
// BtcdWS opens a websocket connection to a btcd instance.
|
|
|
|
func BtcdWS(certificates []byte) (*websocket.Conn, error) {
|
2014-02-03 17:50:11 +01:00
|
|
|
url := fmt.Sprintf("wss://%s/ws", cfg.Connect)
|
2013-11-19 18:21:54 +01:00
|
|
|
config, err := websocket.NewConfig(url, "https://localhost/")
|
2013-10-03 15:11:35 +02:00
|
|
|
if err != nil {
|
2014-01-03 19:34:37 +01:00
|
|
|
return nil, err
|
2013-10-03 15:11:35 +02:00
|
|
|
}
|
2013-11-19 18:21:54 +01:00
|
|
|
|
2014-01-03 19:34:37 +01:00
|
|
|
// btcd uses a self-signed TLS certifiate which is used as the CA.
|
2013-11-19 18:21:54 +01:00
|
|
|
pool := x509.NewCertPool()
|
|
|
|
pool.AppendCertsFromPEM(certificates)
|
|
|
|
config.TlsConfig = &tls.Config{
|
2013-11-21 22:41:15 +01:00
|
|
|
RootCAs: pool,
|
|
|
|
MinVersion: tls.VersionTLS12,
|
2013-11-19 18:21:54 +01:00
|
|
|
}
|
|
|
|
|
2014-01-03 19:34:37 +01:00
|
|
|
// btcd requires basic authorization, so set the Authorization header.
|
2013-11-19 18:21:54 +01:00
|
|
|
login := cfg.Username + ":" + cfg.Password
|
2013-11-20 00:25:42 +01:00
|
|
|
auth := "Basic " + base64.StdEncoding.EncodeToString([]byte(login))
|
2013-10-03 15:11:35 +02:00
|
|
|
config.Header.Add("Authorization", auth)
|
|
|
|
|
2014-01-03 19:34:37 +01:00
|
|
|
// Dial connection.
|
|
|
|
var ws *websocket.Conn
|
2013-11-20 02:47:15 +01:00
|
|
|
var cerr error
|
|
|
|
if cfg.Proxy != "" {
|
|
|
|
proxy := &socks.Proxy{
|
|
|
|
Addr: cfg.Proxy,
|
|
|
|
Username: cfg.ProxyUser,
|
|
|
|
Password: cfg.ProxyPass,
|
|
|
|
}
|
|
|
|
conn, err := proxy.Dial("tcp", cfg.Connect)
|
|
|
|
if err != nil {
|
2014-01-03 19:34:37 +01:00
|
|
|
return nil, err
|
2013-11-20 02:47:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
tlsConn := tls.Client(conn, config.TlsConfig)
|
2014-01-03 19:34:37 +01:00
|
|
|
ws, cerr = websocket.NewClient(config, tlsConn)
|
2013-11-20 02:47:15 +01:00
|
|
|
} else {
|
2014-01-03 19:34:37 +01:00
|
|
|
ws, cerr = websocket.DialConfig(config)
|
2013-11-20 02:47:15 +01:00
|
|
|
}
|
|
|
|
if cerr != nil {
|
2014-01-03 19:34:37 +01:00
|
|
|
return nil, cerr
|
2013-09-09 20:14:57 +02:00
|
|
|
}
|
2014-01-03 19:34:37 +01:00
|
|
|
return ws, nil
|
|
|
|
}
|
2013-11-21 21:01:23 +01:00
|
|
|
|
2014-01-03 19:34:37 +01:00
|
|
|
// BtcdConnect connects to a running btcd instance over a websocket
|
|
|
|
// for sending and receiving chain-related messages, failing if the
|
|
|
|
// connection cannot be established or is lost.
|
|
|
|
func BtcdConnect(certificates []byte) (*BtcdRPCConn, error) {
|
|
|
|
// Open websocket connection.
|
|
|
|
ws, err := BtcdWS(certificates)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("Cannot open websocket connection to btcd: %v", err)
|
|
|
|
return nil, err
|
2013-11-21 21:01:23 +01:00
|
|
|
}
|
|
|
|
|
2014-01-03 19:34:37 +01:00
|
|
|
// Create and start RPC connection using the btcd websocket.
|
|
|
|
rpc := NewBtcdRPCConn(ws)
|
|
|
|
rpc.Start()
|
|
|
|
return rpc, nil
|
2013-10-07 18:35:32 +02:00
|
|
|
}
|
|
|
|
|
2014-01-03 19:34:37 +01:00
|
|
|
// Handshake first checks that the websocket connection between btcwallet and
|
|
|
|
// btcd is valid, that is, that there are no mismatching settings between
|
|
|
|
// the two processes (such as running on different Bitcoin networks). If the
|
|
|
|
// sanity checks pass, all wallets are set to be tracked against chain
|
|
|
|
// notifications from this btcd connection.
|
2013-12-06 21:37:07 +01:00
|
|
|
//
|
|
|
|
// TODO(jrick): Track and Rescan commands should be replaced with a
|
|
|
|
// single TrackSince function (or similar) which requests address
|
|
|
|
// notifications and performs the rescan since some block height.
|
2014-01-30 16:14:02 +01:00
|
|
|
func Handshake(rpc ServerConn) error {
|
2014-01-03 19:34:37 +01:00
|
|
|
net, jsonErr := GetCurrentNet(rpc)
|
|
|
|
if jsonErr != nil {
|
|
|
|
return jsonErr
|
2013-10-07 18:35:32 +02:00
|
|
|
}
|
2014-01-03 19:34:37 +01:00
|
|
|
if net != cfg.Net() {
|
2013-11-21 21:01:23 +01:00
|
|
|
return errors.New("btcd and btcwallet running on different Bitcoin networks")
|
2013-10-07 18:35:32 +02:00
|
|
|
}
|
|
|
|
|
2014-01-17 22:45:40 +01:00
|
|
|
// Request notifications for connected and disconnected blocks.
|
|
|
|
NotifyBlocks(rpc)
|
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
// Get current best block. If this is before than the oldest
|
|
|
|
// saved block hash, assume that this btcd instance is not yet
|
|
|
|
// synced up to a previous btcd that was last used with this
|
|
|
|
// wallet.
|
|
|
|
bs, err := GetCurBlock()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("cannot get best block: %v", err)
|
|
|
|
}
|
2014-02-18 04:18:30 +01:00
|
|
|
NotifyNewBlockChainHeight(allClients, bs)
|
|
|
|
NotifyBalances(allClients)
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
|
2013-12-09 22:51:07 +01:00
|
|
|
// Get default account. Only the default account is used to
|
|
|
|
// track recently-seen blocks.
|
2014-01-30 16:14:02 +01:00
|
|
|
a, err := AcctMgr.Account("")
|
2013-12-09 22:51:07 +01:00
|
|
|
if err != nil {
|
2014-01-10 01:20:11 +01:00
|
|
|
// No account yet is not a handshake error, but means our
|
|
|
|
// handshake is done.
|
|
|
|
return nil
|
2013-12-09 22:51:07 +01:00
|
|
|
}
|
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
// TODO(jrick): if height is less than the earliest-saved block
|
|
|
|
// height, should probably wait for btcd to catch up.
|
2013-12-02 20:56:06 +01:00
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
// Check that there was not any reorgs done since last connection.
|
|
|
|
// If so, rollback and rescan to catch up.
|
|
|
|
it := a.Wallet.NewIterateRecentBlocks()
|
|
|
|
for cont := it != nil; cont; cont = it.Prev() {
|
|
|
|
bs := it.BlockStamp()
|
|
|
|
log.Debugf("Checking for previous saved block with height %v hash %v",
|
|
|
|
bs.Height, bs.Hash)
|
2013-10-29 07:19:40 +01:00
|
|
|
|
Another day, another tx store implementation.
The last transaction store was a great example of how not to write
scalable software. For a variety of reasons, it was very slow at
processing transaction inserts. Among them:
1) Every single transaction record being saved in a linked list
(container/list), and inserting into this list would be an O(n)
operation so that records could be ordered by receive date.
2) Every single transaction in the above mentioned list was iterated
over in order to find double spends which must be removed. It is
silly to do this check for mined transactions, which already have
been checked for this by btcd. Worse yet, if double spends were
found, the list would be iterated a second (or third, or fourth)
time for each removed transaction.
3) All spend tracking for signed-by-wallet transactions was found on
each transaction insert, even if the now spent previous transaction
outputs were known by the caller.
This list could keep going on, but you get the idea. It was bad.
To resolve these issues a new transaction store had to be implemented.
The new implementation:
1) Tracks mined and unmined transactions in different data structures.
Mined transactions are cheap to track because the required double
spend checks have already been performed by the chain server, and
double spend checks are only required to be performed on
newly-inserted mined transactions which may conflict with previous
unmined transactions.
2) Saves mined transactions grouped by block first, and then by their
transaction index. Lookup keys for mined transactions are simply
the block height (in the best chain, that's all we save) and index
of the transaction in the block. This makes looking up any
arbitrary transaction almost an O(1) operation (almost, because
block height and block indexes are mapped to their slice indexes
with a Go map).
3) Saves records in each transaction for whether the outputs are
wallet credits (spendable by wallet) and for whether inputs debit
from previous credits. Both structures point back to the source
or spender (credits point to the transaction that spends them, or
nil for unspent credits, and debits include keys to lookup the
transaction credits they spent. While complicated to keep track
of, this greatly simplifies the spent tracking for transactions
across rollbacks and transaction removals.
4) Implements double spend checking as an almost O(1) operation. A
Go map is used to map each previous outpoint for all unconfirmed
transactions to the unconfirmed tx record itself. Checking for
double spends on confirmed transaction inserts only involves
looking up each previous outpoint of the inserted tx in this map.
If a double spend is found, removal is simplified by only
removing the transaction and its spend chain from store maps,
rather than iterating a linked list several times over to remove
each dead transaction in the spend chain.
5) Allows the caller to specify the previous credits which are spent
by a debiting transaction. When a transaction is created by
wallet, the previous outputs are already known, and by passing
their record types to the AddDebits method, lookups for each
previously unspent credit are omitted.
6) Bookkeeps all blocks with transactions with unspent credits, and
bookkeeps the transaction indexes of all transactions with unspent
outputs for a single block. For the case where the caller adding a
debit record does not know what credits a transaction debits from,
these bookkeeping structures allow the store to only consider known
unspent transactions, rather than searching through both spent and
unspents.
7) Saves amount deltas for the entire balance as a result of each
block, due to transactions within that block. This improves the
performance of calculating the full balance by not needing to
iterate over every transaction, and then every credit, to determine
if a credit is spent or unspent. When transactions are moved from
unconfirmed to a block structure, the amount deltas are incremented
by the amount of all transaction credits (both spent and unspent)
and debited by the total amount the transaction spends from
previous wallet credits. For the common case of calculating a
balance with just one confirmation, the only involves iterating
over each block structure and adding the (possibly negative)
amount delta. Coinbase rewards are saved similarly, but with a
different amount variable so they can be seperatly included or
excluded.
Due to all of the changes in how the store internally works, the
serialization format has changed. To simplify the serialization
logic, support for reading the last store file version has been
removed. Past this change, a rescan (run automatically) will be
required to rebuild the transaction history.
2014-05-05 23:12:05 +02:00
|
|
|
_, jsonErr := GetBlock(rpc, bs.Hash.String())
|
|
|
|
if jsonErr != nil {
|
2013-12-06 21:37:07 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug("Found matching block.")
|
|
|
|
|
|
|
|
// If we had to go back to any previous blocks (it.Next
|
|
|
|
// returns true), then rollback the next and all child blocks.
|
|
|
|
// This rollback is done here instead of in the blockMissing
|
|
|
|
// check above for each removed block because Rollback will
|
|
|
|
// try to write new tx and utxo files on each rollback.
|
|
|
|
if it.Next() {
|
|
|
|
bs := it.BlockStamp()
|
2014-01-30 16:14:02 +01:00
|
|
|
AcctMgr.Rollback(bs.Height, &bs.Hash)
|
2013-12-06 21:37:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set default account to be marked in sync with the current
|
|
|
|
// blockstamp. This invalidates the iterator.
|
|
|
|
a.Wallet.SetSyncedWith(bs)
|
|
|
|
|
|
|
|
// Begin tracking wallets against this btcd instance.
|
2014-01-30 16:14:02 +01:00
|
|
|
AcctMgr.Track()
|
Another day, another tx store implementation.
The last transaction store was a great example of how not to write
scalable software. For a variety of reasons, it was very slow at
processing transaction inserts. Among them:
1) Every single transaction record being saved in a linked list
(container/list), and inserting into this list would be an O(n)
operation so that records could be ordered by receive date.
2) Every single transaction in the above mentioned list was iterated
over in order to find double spends which must be removed. It is
silly to do this check for mined transactions, which already have
been checked for this by btcd. Worse yet, if double spends were
found, the list would be iterated a second (or third, or fourth)
time for each removed transaction.
3) All spend tracking for signed-by-wallet transactions was found on
each transaction insert, even if the now spent previous transaction
outputs were known by the caller.
This list could keep going on, but you get the idea. It was bad.
To resolve these issues a new transaction store had to be implemented.
The new implementation:
1) Tracks mined and unmined transactions in different data structures.
Mined transactions are cheap to track because the required double
spend checks have already been performed by the chain server, and
double spend checks are only required to be performed on
newly-inserted mined transactions which may conflict with previous
unmined transactions.
2) Saves mined transactions grouped by block first, and then by their
transaction index. Lookup keys for mined transactions are simply
the block height (in the best chain, that's all we save) and index
of the transaction in the block. This makes looking up any
arbitrary transaction almost an O(1) operation (almost, because
block height and block indexes are mapped to their slice indexes
with a Go map).
3) Saves records in each transaction for whether the outputs are
wallet credits (spendable by wallet) and for whether inputs debit
from previous credits. Both structures point back to the source
or spender (credits point to the transaction that spends them, or
nil for unspent credits, and debits include keys to lookup the
transaction credits they spent. While complicated to keep track
of, this greatly simplifies the spent tracking for transactions
across rollbacks and transaction removals.
4) Implements double spend checking as an almost O(1) operation. A
Go map is used to map each previous outpoint for all unconfirmed
transactions to the unconfirmed tx record itself. Checking for
double spends on confirmed transaction inserts only involves
looking up each previous outpoint of the inserted tx in this map.
If a double spend is found, removal is simplified by only
removing the transaction and its spend chain from store maps,
rather than iterating a linked list several times over to remove
each dead transaction in the spend chain.
5) Allows the caller to specify the previous credits which are spent
by a debiting transaction. When a transaction is created by
wallet, the previous outputs are already known, and by passing
their record types to the AddDebits method, lookups for each
previously unspent credit are omitted.
6) Bookkeeps all blocks with transactions with unspent credits, and
bookkeeps the transaction indexes of all transactions with unspent
outputs for a single block. For the case where the caller adding a
debit record does not know what credits a transaction debits from,
these bookkeeping structures allow the store to only consider known
unspent transactions, rather than searching through both spent and
unspents.
7) Saves amount deltas for the entire balance as a result of each
block, due to transactions within that block. This improves the
performance of calculating the full balance by not needing to
iterate over every transaction, and then every credit, to determine
if a credit is spent or unspent. When transactions are moved from
unconfirmed to a block structure, the amount deltas are incremented
by the amount of all transaction credits (both spent and unspent)
and debited by the total amount the transaction spends from
previous wallet credits. For the common case of calculating a
balance with just one confirmation, the only involves iterating
over each block structure and adding the (possibly negative)
amount delta. Coinbase rewards are saved similarly, but with a
different amount variable so they can be seperatly included or
excluded.
Due to all of the changes in how the store internally works, the
serialization format has changed. To simplify the serialization
logic, support for reading the last store file version has been
removed. Past this change, a rescan (run automatically) will be
required to rebuild the transaction history.
2014-05-05 23:12:05 +02:00
|
|
|
if err := AcctMgr.RescanActiveAddresses(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// TODO: Only begin tracking new unspent outputs as a result
|
|
|
|
// of the rescan. This is also pretty racy, as a new block
|
|
|
|
// could arrive between rescan and by the time the new outpoint
|
|
|
|
// is added to btcd's websocket's unspent output set.
|
|
|
|
AcctMgr.Track()
|
2013-12-06 21:37:07 +01:00
|
|
|
|
|
|
|
// (Re)send any unmined transactions to btcd in case of a btcd restart.
|
2014-02-24 20:35:30 +01:00
|
|
|
AcctMgr.ResendUnminedTxs()
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
// Get current blockchain height and best block hash.
|
|
|
|
return nil
|
Implement address rescanning.
When a wallet is opened, a rescan request will be sent to btcd with
all active addresses from the wallet, to rescan from the last synced
block (now saved to the wallet file) and the current best block.
As multi-account support is further explored, rescan requests should
be batched together to send a single request for all addresses from
all wallets.
This change introduces several changes to the wallet, tx, and utxo
files. Wallet files are still compatible, however, a rescan will try
to start at the genesis block since no correct "last synced to" or
"created at block X" was saved. The tx and utxo files, however, are
not compatible and should be deleted (or an error will occur on read).
If any errors occur opening the utxo file, a rescan will start
beginning at the creation block saved in the wallet.
2013-10-30 02:22:14 +01:00
|
|
|
}
|
2013-11-21 21:01:23 +01:00
|
|
|
|
2013-12-06 21:37:07 +01:00
|
|
|
log.Warnf("None of the previous saved blocks in btcd chain. Must perform full rescan.")
|
|
|
|
|
|
|
|
// Iterator was invalid (wallet has never been synced) or there was a
|
|
|
|
// huge chain fork + reorg (more than 20 blocks). Since we don't know
|
|
|
|
// what block (if any) this wallet is synced to, roll back everything
|
|
|
|
// and start a new rescan since the earliest block wallet must know
|
|
|
|
// about.
|
|
|
|
a.fullRescan = true
|
2014-01-30 16:14:02 +01:00
|
|
|
AcctMgr.Track()
|
|
|
|
AcctMgr.RescanActiveAddresses()
|
Another day, another tx store implementation.
The last transaction store was a great example of how not to write
scalable software. For a variety of reasons, it was very slow at
processing transaction inserts. Among them:
1) Every single transaction record being saved in a linked list
(container/list), and inserting into this list would be an O(n)
operation so that records could be ordered by receive date.
2) Every single transaction in the above mentioned list was iterated
over in order to find double spends which must be removed. It is
silly to do this check for mined transactions, which already have
been checked for this by btcd. Worse yet, if double spends were
found, the list would be iterated a second (or third, or fourth)
time for each removed transaction.
3) All spend tracking for signed-by-wallet transactions was found on
each transaction insert, even if the now spent previous transaction
outputs were known by the caller.
This list could keep going on, but you get the idea. It was bad.
To resolve these issues a new transaction store had to be implemented.
The new implementation:
1) Tracks mined and unmined transactions in different data structures.
Mined transactions are cheap to track because the required double
spend checks have already been performed by the chain server, and
double spend checks are only required to be performed on
newly-inserted mined transactions which may conflict with previous
unmined transactions.
2) Saves mined transactions grouped by block first, and then by their
transaction index. Lookup keys for mined transactions are simply
the block height (in the best chain, that's all we save) and index
of the transaction in the block. This makes looking up any
arbitrary transaction almost an O(1) operation (almost, because
block height and block indexes are mapped to their slice indexes
with a Go map).
3) Saves records in each transaction for whether the outputs are
wallet credits (spendable by wallet) and for whether inputs debit
from previous credits. Both structures point back to the source
or spender (credits point to the transaction that spends them, or
nil for unspent credits, and debits include keys to lookup the
transaction credits they spent. While complicated to keep track
of, this greatly simplifies the spent tracking for transactions
across rollbacks and transaction removals.
4) Implements double spend checking as an almost O(1) operation. A
Go map is used to map each previous outpoint for all unconfirmed
transactions to the unconfirmed tx record itself. Checking for
double spends on confirmed transaction inserts only involves
looking up each previous outpoint of the inserted tx in this map.
If a double spend is found, removal is simplified by only
removing the transaction and its spend chain from store maps,
rather than iterating a linked list several times over to remove
each dead transaction in the spend chain.
5) Allows the caller to specify the previous credits which are spent
by a debiting transaction. When a transaction is created by
wallet, the previous outputs are already known, and by passing
their record types to the AddDebits method, lookups for each
previously unspent credit are omitted.
6) Bookkeeps all blocks with transactions with unspent credits, and
bookkeeps the transaction indexes of all transactions with unspent
outputs for a single block. For the case where the caller adding a
debit record does not know what credits a transaction debits from,
these bookkeeping structures allow the store to only consider known
unspent transactions, rather than searching through both spent and
unspents.
7) Saves amount deltas for the entire balance as a result of each
block, due to transactions within that block. This improves the
performance of calculating the full balance by not needing to
iterate over every transaction, and then every credit, to determine
if a credit is spent or unspent. When transactions are moved from
unconfirmed to a block structure, the amount deltas are incremented
by the amount of all transaction credits (both spent and unspent)
and debited by the total amount the transaction spends from
previous wallet credits. For the common case of calculating a
balance with just one confirmation, the only involves iterating
over each block structure and adding the (possibly negative)
amount delta. Coinbase rewards are saved similarly, but with a
different amount variable so they can be seperatly included or
excluded.
Due to all of the changes in how the store internally works, the
serialization format has changed. To simplify the serialization
logic, support for reading the last store file version has been
removed. Past this change, a rescan (run automatically) will be
required to rebuild the transaction history.
2014-05-05 23:12:05 +02:00
|
|
|
// TODO: only begin tracking new unspent outputs as a result of the
|
|
|
|
// rescan. This is also racy (see comment for second Track above).
|
|
|
|
AcctMgr.Track()
|
2014-02-24 20:35:30 +01:00
|
|
|
AcctMgr.ResendUnminedTxs()
|
2013-11-21 21:01:23 +01:00
|
|
|
return nil
|
2013-08-21 16:37:30 +02:00
|
|
|
}
|