Compare commits

..

6 commits

Author SHA1 Message Date
Jonathan Moody
d2193e980a
blockchain.transaction.broadcast implementation (#80)
* Generate secondary hashXNotification(s) on every headerNotification.

* Attempt LBCD connection with rpc.Client.

* Optional --daemon-url.

* Correct HashXStatusKey field. Should be HASHX_LEN.

* Connect to lbcd using lbcd/rpcclient library.

* Handle deprecation of node.js 12 actions.

* Add --daemon-ca-path argument and establish HTTPS connection if specified.

* Remove dead code. Tighten definition of TransactionBroadcastReq.

* Correct default daemon URL.
2022-12-16 13:54:19 -06:00
Jeffrey Picard
711c4b4b7b
WIP: Json rpc federation, search/getclaimbyid, and shutdown (#76)
* Cleanup shutdown and peers subscribe

this has errors currently, need to figure out data race

* fixed data race finally....

* getclaimbyid and search

* hook jsonrpc peers subcribe into current federation

* cleanup some peers stuff

* remove commented code

* tie into session manager in jsonrpc peers subscribe

* use int for port args

* cleanup and fix a bunch of compiler warnings

* use logrus everywhere

* cleanup test
2022-12-07 11:01:36 -05:00
Jonathan Moody
317cdf7129
WIP: blockchain.transaction.yyy JSON RPC implementations (#78)
* Partial blockchain.transaction.yyy RPC implementations.

* Register RPC service object.

* Move session manager start/stop to a better place.

* Attempt to fill in the details of transaction.get_batch,
including merkle path.

* Correct interpretation of DBStateValue Genesis hash.

* Convert Args.Port to int and validate. Run UDP ping server on JSONRPCPort too.

* Add BlockHeader to HeightHash notification.

* Limit session-based JSON RPC service to IPv4.
Client not ready for IPv6.

* Adapt to new HeightHash struct.

* Fine tune JSON RPC handlers and types to match lbry-sdk expectations.
Implement UnmarshalJSON()/MarshalJSON() for several types.

* Add more special handling of DBStateValue.Genesis hash.

* Set IncludeStop=false generally to avoid returning extra rows.
Other misc fixes.
2022-12-06 16:14:28 -05:00
Jonathan Moody
e070e8a51e
JSON RPC compatibility workarounds to support lbry-sdk (#75)
* Fix log spam (Already shutting down...)

* Workaround to allow lbry-sdk to call server.version and server.features.
Incoming/outgoing JSON is patched using yet another codec (jsonPatchingCodec).
Add more logging of raw/patched JSON.

* Elaborate comment on jsonPatchingCodec.
2022-10-29 18:42:24 +03:00
Jeffrey Picard
cd7f20a461
Server endpoints goroutine refactor (#69)
* server.xxx endpoints

Additional server endpoints in jsonrpc and also some refactoring

* server.banner

* more endpoints

* use lbry.go stop pattern

* set genesis hash properly

* updates and refactors

* remove shutdowncalled and itmut

* remove OpenIterators

* remove shutdown and done channels from db and use stop group

* currently broken, incorporated stop groups into the session manager

* set the rest of the default args for tests

* add default json rpc http port and cleanup

* tests for new jsonrpc endpoints

* cleanup and add manage goroutine to stopper pattern

* cleanup

* NewDebug

* asdf

* fix
2022-10-25 08:48:13 +03:00
Jeffrey Picard
b85556499f v0.2022.10.05.1 2022-10-05 06:25:42 +03:00
33 changed files with 1682 additions and 569 deletions

View file

@ -7,6 +7,6 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v3
- name: Build and Test - name: Build and Test
uses: ./ uses: ./

View file

@ -8,7 +8,6 @@ import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"os" "os"
"sync"
"time" "time"
"github.com/lbryio/herald.go/db/prefixes" "github.com/lbryio/herald.go/db/prefixes"
@ -16,6 +15,7 @@ import (
"github.com/lbryio/herald.go/internal" "github.com/lbryio/herald.go/internal"
"github.com/lbryio/herald.go/internal/metrics" "github.com/lbryio/herald.go/internal/metrics"
pb "github.com/lbryio/herald.go/protobuf/go" pb "github.com/lbryio/herald.go/protobuf/go"
"github.com/lbryio/lbry.go/v3/extras/stop"
"github.com/linxGnu/grocksdb" "github.com/linxGnu/grocksdb"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@ -59,11 +59,7 @@ type ReadOnlyDBColumnFamily struct {
BlockedChannels map[string][]byte BlockedChannels map[string][]byte
FilteredStreams map[string][]byte FilteredStreams map[string][]byte
FilteredChannels map[string][]byte FilteredChannels map[string][]byte
OpenIterators map[string][]chan struct{} Grp *stop.Group
ItMut sync.RWMutex
ShutdownChan chan struct{}
DoneChan chan struct{}
ShutdownCalled bool
Cleanup func() Cleanup func()
} }
@ -339,20 +335,11 @@ func interruptRequested(interrupted <-chan struct{}) bool {
func IterCF(db *grocksdb.DB, opts *IterOptions) <-chan *prefixes.PrefixRowKV { func IterCF(db *grocksdb.DB, opts *IterOptions) <-chan *prefixes.PrefixRowKV {
ch := make(chan *prefixes.PrefixRowKV) ch := make(chan *prefixes.PrefixRowKV)
iterKey := fmt.Sprintf("%p", opts) // Check if we've been told to shutdown in between getting created and getting here
if opts.DB != nil { if opts.Grp != nil && interruptRequested(opts.Grp.Ch()) {
opts.DB.ItMut.Lock() opts.Grp.Done()
// There is a tiny chance that we were wating on the above lock while shutdown was
// being called and by the time we get it the db has already notified all active
// iterators to shutdown. In this case we go to the else branch.
if !opts.DB.ShutdownCalled {
opts.DB.OpenIterators[iterKey] = []chan struct{}{opts.DoneChan, opts.ShutdownChan}
opts.DB.ItMut.Unlock()
} else {
opts.DB.ItMut.Unlock()
return ch return ch
} }
}
ro := grocksdb.NewDefaultReadOptions() ro := grocksdb.NewDefaultReadOptions()
ro.SetFillCache(opts.FillCache) ro.SetFillCache(opts.FillCache)
@ -369,11 +356,9 @@ func IterCF(db *grocksdb.DB, opts *IterOptions) <-chan *prefixes.PrefixRowKV {
it.Close() it.Close()
close(ch) close(ch)
ro.Destroy() ro.Destroy()
if opts.DB != nil { if opts.Grp != nil {
opts.DoneChan <- struct{}{} // opts.Grp.DoneNamed(iterKey)
opts.DB.ItMut.Lock() opts.Grp.Done()
delete(opts.DB.OpenIterators, iterKey)
opts.DB.ItMut.Unlock()
} }
}() }()
@ -394,7 +379,7 @@ func IterCF(db *grocksdb.DB, opts *IterOptions) <-chan *prefixes.PrefixRowKV {
if kv = opts.ReadRow(&prevKey); kv != nil { if kv = opts.ReadRow(&prevKey); kv != nil {
ch <- kv ch <- kv
} }
if interruptRequested(opts.ShutdownChan) { if opts.Grp != nil && interruptRequested(opts.Grp.Ch()) {
return return
} }
} }
@ -456,8 +441,8 @@ func (db *ReadOnlyDBColumnFamily) selectFrom(prefix []byte, startKey, stopKey pr
// Prefix and handle // Prefix and handle
options := NewIterateOptions().WithDB(db).WithPrefix(prefix).WithCfHandle(handle) options := NewIterateOptions().WithDB(db).WithPrefix(prefix).WithCfHandle(handle)
// Start and stop bounds // Start and stop bounds
options = options.WithStart(startKey.PackKey()).WithStop(stopKey.PackKey()).WithIncludeStop(true) options = options.WithStart(startKey.PackKey()).WithStop(stopKey.PackKey()).WithIncludeStop(false)
// Don't include the key // Include the key and value
options = options.WithIncludeKey(true).WithIncludeValue(true) options = options.WithIncludeKey(true).WithIncludeValue(true)
return []*IterOptions{options}, nil return []*IterOptions{options}, nil
} }
@ -470,7 +455,7 @@ func iterate(db *grocksdb.DB, opts []*IterOptions) <-chan []*prefixes.PrefixRowK
for kv := range IterCF(db, o) { for kv := range IterCF(db, o) {
row := make([]*prefixes.PrefixRowKV, 0, 1) row := make([]*prefixes.PrefixRowKV, 0, 1)
row = append(row, kv) row = append(row, kv)
log.Debugf("iterate[%v][%v] %#v", i, j, kv) log.Debugf("iterate[%v][%v] %#v -> %#v", i, j, kv.Key, kv.Value)
out <- row out <- row
j++ j++
} }
@ -496,7 +481,7 @@ func innerJoin(db *grocksdb.DB, in <-chan []*prefixes.PrefixRowKV, selectFn func
row = append(row, kvs...) row = append(row, kvs...)
row = append(row, kv...) row = append(row, kv...)
for i, kv := range row { for i, kv := range row {
log.Debugf("row[%v] %#v", i, kv) log.Debugf("row[%v] %#v -> %#v", i, kv.Key, kv.Value)
} }
out <- row out <- row
} }
@ -546,7 +531,7 @@ func GetWriteDBCF(name string) (*grocksdb.DB, []*grocksdb.ColumnFamilyHandle, er
} }
// GetProdDB returns a db that is used for production. // GetProdDB returns a db that is used for production.
func GetProdDB(name string, secondaryPath string) (*ReadOnlyDBColumnFamily, func(), error) { func GetProdDB(name string, secondaryPath string, grp *stop.Group) (*ReadOnlyDBColumnFamily, error) {
prefixNames := prefixes.GetPrefixes() prefixNames := prefixes.GetPrefixes()
// additional prefixes that aren't in the code explicitly // additional prefixes that aren't in the code explicitly
cfNames := []string{"default", "e", "d", "c"} cfNames := []string{"default", "e", "d", "c"}
@ -555,7 +540,7 @@ func GetProdDB(name string, secondaryPath string) (*ReadOnlyDBColumnFamily, func
cfNames = append(cfNames, cfName) cfNames = append(cfNames, cfName)
} }
db, err := GetDBColumnFamilies(name, secondaryPath, cfNames) db, err := GetDBColumnFamilies(name, secondaryPath, cfNames, grp)
cleanupFiles := func() { cleanupFiles := func() {
err = os.RemoveAll(secondaryPath) err = os.RemoveAll(secondaryPath)
@ -565,7 +550,8 @@ func GetProdDB(name string, secondaryPath string) (*ReadOnlyDBColumnFamily, func
} }
if err != nil { if err != nil {
return nil, cleanupFiles, err cleanupFiles()
return nil, err
} }
cleanupDB := func() { cleanupDB := func() {
@ -574,11 +560,11 @@ func GetProdDB(name string, secondaryPath string) (*ReadOnlyDBColumnFamily, func
} }
db.Cleanup = cleanupDB db.Cleanup = cleanupDB
return db, cleanupDB, nil return db, nil
} }
// GetDBColumnFamilies gets a db with the specified column families and secondary path. // GetDBColumnFamilies gets a db with the specified column families and secondary path.
func GetDBColumnFamilies(name string, secondayPath string, cfNames []string) (*ReadOnlyDBColumnFamily, error) { func GetDBColumnFamilies(name string, secondayPath string, cfNames []string, grp *stop.Group) (*ReadOnlyDBColumnFamily, error) {
opts := grocksdb.NewDefaultOptions() opts := grocksdb.NewDefaultOptions()
roOpts := grocksdb.NewDefaultReadOptions() roOpts := grocksdb.NewDefaultReadOptions()
cfOpt := grocksdb.NewDefaultOptions() cfOpt := grocksdb.NewDefaultOptions()
@ -593,6 +579,7 @@ func GetDBColumnFamilies(name string, secondayPath string, cfNames []string) (*R
// db, handles, err := grocksdb.OpenDbColumnFamilies(opts, name, cfNames, cfOpts) // db, handles, err := grocksdb.OpenDbColumnFamilies(opts, name, cfNames, cfOpts)
if err != nil { if err != nil {
log.Errorf("open db as secondary failed: %v", err)
return nil, err return nil, err
} }
@ -614,11 +601,7 @@ func GetDBColumnFamilies(name string, secondayPath string, cfNames []string) (*R
LastState: nil, LastState: nil,
Height: 0, Height: 0,
Headers: nil, Headers: nil,
OpenIterators: make(map[string][]chan struct{}), Grp: grp,
ItMut: sync.RWMutex{},
ShutdownChan: make(chan struct{}, 1),
ShutdownCalled: false,
DoneChan: make(chan struct{}, 1),
} }
err = myDB.ReadDBState() //TODO: Figure out right place for this err = myDB.ReadDBState() //TODO: Figure out right place for this
@ -687,24 +670,6 @@ func (db *ReadOnlyDBColumnFamily) Unwind() {
// Shutdown shuts down the db. // Shutdown shuts down the db.
func (db *ReadOnlyDBColumnFamily) Shutdown() { func (db *ReadOnlyDBColumnFamily) Shutdown() {
log.Println("Sending message to ShutdownChan...")
db.ShutdownChan <- struct{}{}
log.Println("Locking iterator mutex...")
db.ItMut.Lock()
log.Println("Setting ShutdownCalled to true...")
db.ShutdownCalled = true
log.Println("Notifying iterators to shutdown...")
for _, it := range db.OpenIterators {
it[1] <- struct{}{}
}
log.Println("Waiting for iterators to shutdown...")
for _, it := range db.OpenIterators {
<-it[0]
}
log.Println("Unlocking iterator mutex...")
db.ItMut.Unlock()
log.Println("Sending message to DoneChan...")
<-db.DoneChan
log.Println("Calling cleanup...") log.Println("Calling cleanup...")
db.Cleanup() db.Cleanup()
log.Println("Leaving Shutdown...") log.Println("Leaving Shutdown...")
@ -714,12 +679,13 @@ func (db *ReadOnlyDBColumnFamily) Shutdown() {
// to keep the db readonly view up to date and handle reorgs on the // to keep the db readonly view up to date and handle reorgs on the
// blockchain. // blockchain.
func (db *ReadOnlyDBColumnFamily) RunDetectChanges(notifCh chan<- interface{}) { func (db *ReadOnlyDBColumnFamily) RunDetectChanges(notifCh chan<- interface{}) {
db.Grp.Add(1)
go func() { go func() {
lastPrint := time.Now() lastPrint := time.Now()
for { for {
// FIXME: Figure out best sleep interval // FIXME: Figure out best sleep interval
if time.Since(lastPrint) > time.Second { if time.Since(lastPrint) > time.Second {
log.Debug("DetectChanges:", db.LastState) log.Debugf("DetectChanges: %#v", db.LastState)
lastPrint = time.Now() lastPrint = time.Now()
} }
err := db.detectChanges(notifCh) err := db.detectChanges(notifCh)
@ -727,8 +693,8 @@ func (db *ReadOnlyDBColumnFamily) RunDetectChanges(notifCh chan<- interface{}) {
log.Infof("Error detecting changes: %#v", err) log.Infof("Error detecting changes: %#v", err)
} }
select { select {
case <-db.ShutdownChan: case <-db.Grp.Ch():
db.DoneChan <- struct{}{} db.Grp.Done()
return return
case <-time.After(time.Millisecond * 10): case <-time.After(time.Millisecond * 10):
} }
@ -809,7 +775,12 @@ func (db *ReadOnlyDBColumnFamily) detectChanges(notifCh chan<- interface{}) erro
log.Info("error getting block hash: ", err) log.Info("error getting block hash: ", err)
return err return err
} }
notifCh <- &internal.HeightHash{Height: uint64(height), BlockHash: hash} header, err := db.GetHeader(height)
if err != nil {
log.Info("error getting block header: ", err)
return err
}
notifCh <- &internal.HeightHash{Height: uint64(height), BlockHash: hash, BlockHeader: header}
} }
//TODO: ClearCache //TODO: ClearCache
log.Warn("implement cache clearing") log.Warn("implement cache clearing")

View file

@ -3,16 +3,19 @@ package db
// db_get.go contains the basic access functions to the database. // db_get.go contains the basic access functions to the database.
import ( import (
"bytes"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"log"
"math" "math"
"github.com/lbryio/herald.go/db/prefixes" "github.com/lbryio/herald.go/db/prefixes"
"github.com/lbryio/herald.go/db/stack" "github.com/lbryio/herald.go/db/stack"
"github.com/lbryio/lbcd/chaincfg/chainhash" "github.com/lbryio/lbcd/chaincfg/chainhash"
"github.com/lbryio/lbcd/wire"
"github.com/linxGnu/grocksdb" "github.com/linxGnu/grocksdb"
log "github.com/sirupsen/logrus"
) )
// GetExpirationHeight returns the expiration height for the given height. Uses // GetExpirationHeight returns the expiration height for the given height. Uses
@ -65,6 +68,57 @@ func (db *ReadOnlyDBColumnFamily) GetBlockHash(height uint32) ([]byte, error) {
return rawValue, nil return rawValue, nil
} }
func (db *ReadOnlyDBColumnFamily) GetBlockTXs(height uint32) ([]*chainhash.Hash, error) {
handle, err := db.EnsureHandle(prefixes.BlockTXs)
if err != nil {
return nil, err
}
key := prefixes.BlockTxsKey{
Prefix: []byte{prefixes.BlockTXs},
Height: height,
}
slice, err := db.DB.GetCF(db.Opts, handle, key.PackKey())
defer slice.Free()
if err != nil {
return nil, err
}
if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.BlockTxsValueUnpack(rawValue)
return value.TxHashes, nil
}
func (db *ReadOnlyDBColumnFamily) GetTouchedHashXs(height uint32) ([][]byte, error) {
handle, err := db.EnsureHandle(prefixes.TouchedHashX)
if err != nil {
return nil, err
}
key := prefixes.TouchedHashXKey{
Prefix: []byte{prefixes.TouchedHashX},
Height: height,
}
slice, err := db.DB.GetCF(db.Opts, handle, key.PackKey())
defer slice.Free()
if err != nil {
return nil, err
}
if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.TouchedHashXValue{}
value.UnpackValue(rawValue)
return value.TouchedHashXs, nil
}
func (db *ReadOnlyDBColumnFamily) GetHeader(height uint32) ([]byte, error) { func (db *ReadOnlyDBColumnFamily) GetHeader(height uint32) ([]byte, error) {
handle, err := db.EnsureHandle(prefixes.Header) handle, err := db.EnsureHandle(prefixes.Header)
if err != nil { if err != nil {
@ -271,6 +325,7 @@ func (db *ReadOnlyDBColumnFamily) GetStatus(hashX []byte) ([]byte, error) {
// Lookup in HashXMempoolStatus first. // Lookup in HashXMempoolStatus first.
status, err := db.getMempoolStatus(hashX) status, err := db.getMempoolStatus(hashX)
if err == nil && status != nil { if err == nil && status != nil {
log.Debugf("(mempool) status(%#v) -> %#v", hashX, status)
return status, err return status, err
} }
@ -291,6 +346,7 @@ func (db *ReadOnlyDBColumnFamily) GetStatus(hashX []byte) ([]byte, error) {
copy(rawValue, slice.Data()) copy(rawValue, slice.Data())
value := prefixes.HashXStatusValue{} value := prefixes.HashXStatusValue{}
value.UnpackValue(rawValue) value.UnpackValue(rawValue)
log.Debugf("status(%#v) -> %#v", hashX, value.Status)
return value.Status, nil return value.Status, nil
} }
@ -299,6 +355,11 @@ func (db *ReadOnlyDBColumnFamily) GetStatus(hashX []byte) ([]byte, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(txs) == 0 {
return []byte{}, err
}
hash := sha256.New() hash := sha256.New()
for _, tx := range txs { for _, tx := range txs {
hash.Write([]byte(fmt.Sprintf("%s:%d:", tx.TxHash.String(), tx.Height))) hash.Write([]byte(fmt.Sprintf("%s:%d:", tx.TxHash.String(), tx.Height)))
@ -731,6 +792,70 @@ func (db *ReadOnlyDBColumnFamily) FsGetClaimByHash(claimHash []byte) (*ResolveRe
) )
} }
func (db *ReadOnlyDBColumnFamily) GetTx(txhash *chainhash.Hash) ([]byte, *wire.MsgTx, error) {
// Lookup in MempoolTx first.
raw, tx, err := db.getMempoolTx(txhash)
if err == nil && raw != nil && tx != nil {
return raw, tx, err
}
handle, err := db.EnsureHandle(prefixes.Tx)
if err != nil {
return nil, nil, err
}
key := prefixes.TxKey{Prefix: []byte{prefixes.Tx}, TxHash: txhash}
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
defer slice.Free()
if err != nil {
return nil, nil, err
}
if slice.Size() == 0 {
return nil, nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.TxValue{}
value.UnpackValue(rawValue)
var msgTx wire.MsgTx
err = msgTx.Deserialize(bytes.NewReader(value.RawTx))
if err != nil {
return nil, nil, err
}
return value.RawTx, &msgTx, nil
}
func (db *ReadOnlyDBColumnFamily) getMempoolTx(txhash *chainhash.Hash) ([]byte, *wire.MsgTx, error) {
handle, err := db.EnsureHandle(prefixes.MempoolTx)
if err != nil {
return nil, nil, err
}
key := prefixes.MempoolTxKey{Prefix: []byte{prefixes.Tx}, TxHash: txhash}
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
defer slice.Free()
if err != nil {
return nil, nil, err
}
if slice.Size() == 0 {
return nil, nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.MempoolTxValue{}
value.UnpackValue(rawValue)
var msgTx wire.MsgTx
err = msgTx.Deserialize(bytes.NewReader(value.RawTx))
if err != nil {
return nil, nil, err
}
return value.RawTx, &msgTx, nil
}
func (db *ReadOnlyDBColumnFamily) GetTxCount(height uint32) (*prefixes.TxCountValue, error) { func (db *ReadOnlyDBColumnFamily) GetTxCount(height uint32) (*prefixes.TxCountValue, error) {
handle, err := db.EnsureHandle(prefixes.TxCount) handle, err := db.EnsureHandle(prefixes.TxCount)
if err != nil { if err != nil {
@ -754,6 +879,162 @@ func (db *ReadOnlyDBColumnFamily) GetTxCount(height uint32) (*prefixes.TxCountVa
return value, nil return value, nil
} }
func (db *ReadOnlyDBColumnFamily) GetTxHeight(txhash *chainhash.Hash) (uint32, error) {
handle, err := db.EnsureHandle(prefixes.TxNum)
if err != nil {
return 0, err
}
key := prefixes.TxNumKey{Prefix: []byte{prefixes.TxNum}, TxHash: txhash}
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
defer slice.Free()
if err != nil {
return 0, err
}
if slice.Size() == 0 {
return 0, nil
}
// No slice copy needed. Value will be abandoned.
value := prefixes.TxNumValueUnpack(slice.Data())
height := stack.BisectRight(db.TxCounts, []uint32{value.TxNum})[0]
return height, nil
}
type TxMerkle struct {
TxHash *chainhash.Hash
RawTx []byte
Height int
Pos uint32
Merkle []*chainhash.Hash
}
// merklePath selects specific transactions by position within blockTxs.
// The resulting merkle path (aka merkle branch, or merkle) is a list of TX hashes
// which are in sibling relationship with TX nodes on the path to the root.
func merklePath(pos uint32, blockTxs, partial []*chainhash.Hash) []*chainhash.Hash {
parent := func(p uint32) uint32 {
return p >> 1
}
sibling := func(p uint32) uint32 {
if p%2 == 0 {
return p + 1
} else {
return p - 1
}
}
p := parent(pos)
if p == 0 {
// No parent, path is complete.
return partial
}
// Add sibling to partial path and proceed to parent TX.
return merklePath(p, blockTxs, append(partial, blockTxs[sibling(pos)]))
}
func (db *ReadOnlyDBColumnFamily) GetTxMerkle(tx_hashes []chainhash.Hash) ([]TxMerkle, error) {
selectedTxNum := make([]*IterOptions, 0, len(tx_hashes))
for _, txhash := range tx_hashes {
key := prefixes.TxNumKey{Prefix: []byte{prefixes.TxNum}, TxHash: &txhash}
log.Debugf("%v", key)
opt, err := db.selectFrom(key.Prefix, &key, &key)
if err != nil {
return nil, err
}
selectedTxNum = append(selectedTxNum, opt...)
}
selectTxByTxNum := func(in []*prefixes.PrefixRowKV) ([]*IterOptions, error) {
txNumKey := in[0].Key.(*prefixes.TxNumKey)
log.Debugf("%v", txNumKey.TxHash.String())
out := make([]*IterOptions, 0, 100)
startKey := &prefixes.TxKey{
Prefix: []byte{prefixes.Tx},
TxHash: txNumKey.TxHash,
}
endKey := &prefixes.TxKey{
Prefix: []byte{prefixes.Tx},
TxHash: txNumKey.TxHash,
}
selectedTx, err := db.selectFrom([]byte{prefixes.Tx}, startKey, endKey)
if err != nil {
return nil, err
}
out = append(out, selectedTx...)
return out, nil
}
blockTxsCache := make(map[uint32][]*chainhash.Hash)
results := make([]TxMerkle, 0, 500)
for kvs := range innerJoin(db.DB, iterate(db.DB, selectedTxNum), selectTxByTxNum) {
if err := checkForError(kvs); err != nil {
return results, err
}
txNumKey, txNumVal := kvs[0].Key.(*prefixes.TxNumKey), kvs[0].Value.(*prefixes.TxNumValue)
_, txVal := kvs[1].Key.(*prefixes.TxKey), kvs[1].Value.(*prefixes.TxValue)
txHeight := stack.BisectRight(db.TxCounts, []uint32{txNumVal.TxNum})[0]
txPos := txNumVal.TxNum - db.TxCounts.Get(txHeight-1)
// We need all the TX hashes in order to select out the relevant ones.
if _, ok := blockTxsCache[txHeight]; !ok {
txs, err := db.GetBlockTXs(txHeight)
if err != nil {
return results, err
}
blockTxsCache[txHeight] = txs
}
blockTxs := blockTxsCache[txHeight]
results = append(results, TxMerkle{
TxHash: txNumKey.TxHash,
RawTx: txVal.RawTx,
Height: int(txHeight),
Pos: txPos,
Merkle: merklePath(txPos, blockTxs, []*chainhash.Hash{}),
})
}
return results, nil
}
func (db *ReadOnlyDBColumnFamily) GetClaimByID(claimID string) ([]*ExpandedResolveResult, []*ExpandedResolveResult, error) {
rows := make([]*ExpandedResolveResult, 0)
extras := make([]*ExpandedResolveResult, 0)
claimHash, err := hex.DecodeString(claimID)
if err != nil {
return nil, nil, err
}
stream, err := db.FsGetClaimByHash(claimHash)
if err != nil {
return nil, nil, err
}
var res = NewExpandedResolveResult()
res.Stream = &optionalResolveResultOrError{res: stream}
rows = append(rows, res)
if stream != nil && stream.ChannelHash != nil {
channel, err := db.FsGetClaimByHash(stream.ChannelHash)
if err != nil {
return nil, nil, err
}
var res = NewExpandedResolveResult()
res.Channel = &optionalResolveResultOrError{res: channel}
extras = append(extras, res)
}
if stream != nil && stream.RepostedClaimHash != nil {
repost, err := db.FsGetClaimByHash(stream.RepostedClaimHash)
if err != nil {
return nil, nil, err
}
var res = NewExpandedResolveResult()
res.Repost = &optionalResolveResultOrError{res: repost}
extras = append(extras, res)
}
return rows, extras, nil
}
func (db *ReadOnlyDBColumnFamily) GetDBState() (*prefixes.DBStateValue, error) { func (db *ReadOnlyDBColumnFamily) GetDBState() (*prefixes.DBStateValue, error) {
handle, err := db.EnsureHandle(prefixes.DBState) handle, err := db.EnsureHandle(prefixes.DBState)
if err != nil { if err != nil {

View file

@ -4,16 +4,16 @@ import (
"bytes" "bytes"
"encoding/csv" "encoding/csv"
"encoding/hex" "encoding/hex"
"log"
"os" "os"
"strings" "strings"
"sync"
"testing" "testing"
dbpkg "github.com/lbryio/herald.go/db" dbpkg "github.com/lbryio/herald.go/db"
"github.com/lbryio/herald.go/db/prefixes" "github.com/lbryio/herald.go/db/prefixes"
"github.com/lbryio/herald.go/internal" "github.com/lbryio/herald.go/internal"
"github.com/lbryio/lbry.go/v3/extras/stop"
"github.com/linxGnu/grocksdb" "github.com/linxGnu/grocksdb"
log "github.com/sirupsen/logrus"
) )
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -21,7 +21,7 @@ import (
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// OpenAndFillTmpDBColumnFamlies opens a db and fills it with data from a csv file using the given column family names // OpenAndFillTmpDBColumnFamlies opens a db and fills it with data from a csv file using the given column family names
func OpenAndFillTmpDBColumnFamlies(filePath string) (*dbpkg.ReadOnlyDBColumnFamily, [][]string, func(), error) { func OpenAndFillTmpDBColumnFamlies(filePath string) (*dbpkg.ReadOnlyDBColumnFamily, [][]string, error) {
log.Println(filePath) log.Println(filePath)
file, err := os.Open(filePath) file, err := os.Open(filePath)
@ -31,7 +31,7 @@ func OpenAndFillTmpDBColumnFamlies(filePath string) (*dbpkg.ReadOnlyDBColumnFami
reader := csv.NewReader(file) reader := csv.NewReader(file)
records, err := reader.ReadAll() records, err := reader.ReadAll()
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, err
} }
wOpts := grocksdb.NewDefaultWriteOptions() wOpts := grocksdb.NewDefaultWriteOptions()
@ -39,7 +39,7 @@ func OpenAndFillTmpDBColumnFamlies(filePath string) (*dbpkg.ReadOnlyDBColumnFami
opts.SetCreateIfMissing(true) opts.SetCreateIfMissing(true)
db, err := grocksdb.OpenDb(opts, "tmp") db, err := grocksdb.OpenDb(opts, "tmp")
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, err
} }
var handleMap map[string]*grocksdb.ColumnFamilyHandle = make(map[string]*grocksdb.ColumnFamilyHandle) var handleMap map[string]*grocksdb.ColumnFamilyHandle = make(map[string]*grocksdb.ColumnFamilyHandle)
@ -54,7 +54,7 @@ func OpenAndFillTmpDBColumnFamlies(filePath string) (*dbpkg.ReadOnlyDBColumnFami
log.Println(cfName) log.Println(cfName)
handle, err := db.CreateColumnFamily(opts, cfName) handle, err := db.CreateColumnFamily(opts, cfName)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, err
} }
handleMap[cfName] = handle handleMap[cfName] = handle
} }
@ -68,16 +68,16 @@ func OpenAndFillTmpDBColumnFamlies(filePath string) (*dbpkg.ReadOnlyDBColumnFami
for _, record := range records[1:] { for _, record := range records[1:] {
cf := record[0] cf := record[0]
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, err
} }
handle := handleMap[string(cf)] handle := handleMap[string(cf)]
key, err := hex.DecodeString(record[1]) key, err := hex.DecodeString(record[1])
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, err
} }
val, err := hex.DecodeString(record[2]) val, err := hex.DecodeString(record[2])
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, err
} }
db.PutCF(wOpts, handle, key, val) db.PutCF(wOpts, handle, key, val)
} }
@ -94,11 +94,8 @@ func OpenAndFillTmpDBColumnFamlies(filePath string) (*dbpkg.ReadOnlyDBColumnFami
LastState: nil, LastState: nil,
Height: 0, Height: 0,
Headers: nil, Headers: nil,
OpenIterators: make(map[string][]chan struct{}), Grp: stop.New(),
ItMut: sync.RWMutex{}, Cleanup: toDefer,
ShutdownChan: make(chan struct{}, 1),
DoneChan: make(chan struct{}, 1),
ShutdownCalled: false,
} }
// err = dbpkg.ReadDBState(myDB) //TODO: Figure out right place for this // err = dbpkg.ReadDBState(myDB) //TODO: Figure out right place for this
@ -108,7 +105,7 @@ func OpenAndFillTmpDBColumnFamlies(filePath string) (*dbpkg.ReadOnlyDBColumnFami
err = myDB.InitTxCounts() err = myDB.InitTxCounts()
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, err
} }
// err = dbpkg.InitHeaders(myDB) // err = dbpkg.InitHeaders(myDB)
@ -116,7 +113,7 @@ func OpenAndFillTmpDBColumnFamlies(filePath string) (*dbpkg.ReadOnlyDBColumnFami
// return nil, nil, nil, err // return nil, nil, nil, err
// } // }
return myDB, records, toDefer, nil return myDB, records, nil
} }
// OpenAndFillTmpDBCF opens a db and fills it with data from a csv file // OpenAndFillTmpDBCF opens a db and fills it with data from a csv file
@ -247,17 +244,18 @@ func CatCSV(filePath string) {
func TestCatFullDB(t *testing.T) { func TestCatFullDB(t *testing.T) {
t.Skip("Skipping full db test") t.Skip("Skipping full db test")
grp := stop.New()
// url := "lbry://@lothrop#2/lothrop-livestream-games-and-code#c" // url := "lbry://@lothrop#2/lothrop-livestream-games-and-code#c"
// "lbry://@lbry", "lbry://@lbry#3", "lbry://@lbry3f", "lbry://@lbry#3fda836a92faaceedfe398225fb9b2ee2ed1f01a", "lbry://@lbry:1", "lbry://@lbry$1" // "lbry://@lbry", "lbry://@lbry#3", "lbry://@lbry3f", "lbry://@lbry#3fda836a92faaceedfe398225fb9b2ee2ed1f01a", "lbry://@lbry:1", "lbry://@lbry$1"
// url := "lbry://@Styxhexenhammer666#2/legacy-media-baron-les-moonves-(cbs#9" // url := "lbry://@Styxhexenhammer666#2/legacy-media-baron-les-moonves-(cbs#9"
// url := "lbry://@lbry" // url := "lbry://@lbry"
// url := "lbry://@lbry#3fda836a92faaceedfe398225fb9b2ee2ed1f01a" // url := "lbry://@lbry#3fda836a92faaceedfe398225fb9b2ee2ed1f01a"
dbPath := "/mnt/sda/wallet_server/_data/lbry-rocksdb/" dbPath := "/mnt/sda1/wallet_server/_data/lbry-rocksdb/"
// dbPath := "/mnt/d/data/snapshot_1072108/lbry-rocksdb/" // dbPath := "/mnt/d/data/snapshot_1072108/lbry-rocksdb/"
secondaryPath := "asdf" secondaryPath := "asdf"
db, toDefer, err := dbpkg.GetProdDB(dbPath, secondaryPath) db, err := dbpkg.GetProdDB(dbPath, secondaryPath, grp)
defer db.Shutdown()
defer toDefer()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -277,6 +275,7 @@ func TestCatFullDB(t *testing.T) {
// TestOpenFullDB Tests running a resolve on a full db. // TestOpenFullDB Tests running a resolve on a full db.
func TestOpenFullDB(t *testing.T) { func TestOpenFullDB(t *testing.T) {
t.Skip("Skipping full db test") t.Skip("Skipping full db test")
grp := stop.New()
// url := "lbry://@lothrop#2/lothrop-livestream-games-and-code#c" // url := "lbry://@lothrop#2/lothrop-livestream-games-and-code#c"
// "lbry://@lbry", "lbry://@lbry#3", "lbry://@lbry3f", "lbry://@lbry#3fda836a92faaceedfe398225fb9b2ee2ed1f01a", "lbry://@lbry:1", "lbry://@lbry$1" // "lbry://@lbry", "lbry://@lbry#3", "lbry://@lbry3f", "lbry://@lbry#3fda836a92faaceedfe398225fb9b2ee2ed1f01a", "lbry://@lbry:1", "lbry://@lbry$1"
// url := "lbry://@Styxhexenhammer666#2/legacy-media-baron-les-moonves-(cbs#9" // url := "lbry://@Styxhexenhammer666#2/legacy-media-baron-les-moonves-(cbs#9"
@ -284,11 +283,11 @@ func TestOpenFullDB(t *testing.T) {
// url := "lbry://@lbry#3fda836a92faaceedfe398225fb9b2ee2ed1f01a" // url := "lbry://@lbry#3fda836a92faaceedfe398225fb9b2ee2ed1f01a"
// url := "lbry://@lbry$1" // url := "lbry://@lbry$1"
url := "https://lbry.tv/@lothrop:2/lothrop-livestream-games-and-code:c" url := "https://lbry.tv/@lothrop:2/lothrop-livestream-games-and-code:c"
dbPath := "/mnt/sda/wallet_server/_data/lbry-rocksdb/" dbPath := "/mnt/sda1/wallet_server/_data/lbry-rocksdb/"
// dbPath := "/mnt/d/data/snapshot_1072108/lbry-rocksdb/" // dbPath := "/mnt/d/data/snapshot_1072108/lbry-rocksdb/"
secondaryPath := "asdf" secondaryPath := "asdf"
db, toDefer, err := dbpkg.GetProdDB(dbPath, secondaryPath) db, err := dbpkg.GetProdDB(dbPath, secondaryPath, grp)
defer toDefer() defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -302,12 +301,13 @@ func TestOpenFullDB(t *testing.T) {
func TestResolve(t *testing.T) { func TestResolve(t *testing.T) {
url := "lbry://@Styxhexenhammer666#2/legacy-media-baron-les-moonves-(cbs#9" url := "lbry://@Styxhexenhammer666#2/legacy-media-baron-les-moonves-(cbs#9"
filePath := "../testdata/FULL_resolve.csv" filePath := "../testdata/FULL_resolve.csv"
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
defer toDefer()
expandedResolveResult := db.Resolve(url) expandedResolveResult := db.Resolve(url)
log.Printf("%#v\n", expandedResolveResult) log.Printf("%#v\n", expandedResolveResult)
if expandedResolveResult != nil && expandedResolveResult.Channel != nil { if expandedResolveResult != nil && expandedResolveResult.Channel != nil {
@ -321,11 +321,11 @@ func TestResolve(t *testing.T) {
func TestGetDBState(t *testing.T) { func TestGetDBState(t *testing.T) {
filePath := "../testdata/s_resolve.csv" filePath := "../testdata/s_resolve.csv"
want := uint32(1072108) want := uint32(1072108)
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
defer toDefer()
state, err := db.GetDBState() state, err := db.GetDBState()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
@ -343,11 +343,11 @@ func TestGetRepostedClaim(t *testing.T) {
// Should be non-existent // Should be non-existent
channelHash2, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bf") channelHash2, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bf")
filePath := "../testdata/W_resolve.csv" filePath := "../testdata/W_resolve.csv"
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
defer toDefer()
count, err := db.GetRepostedCount(channelHash) count, err := db.GetRepostedCount(channelHash)
if err != nil { if err != nil {
@ -376,11 +376,11 @@ func TestGetRepostedCount(t *testing.T) {
// Should be non-existent // Should be non-existent
channelHash2, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bf") channelHash2, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bf")
filePath := "../testdata/j_resolve.csv" filePath := "../testdata/j_resolve.csv"
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
defer toDefer()
count, err := db.GetRepostedCount(channelHash) count, err := db.GetRepostedCount(channelHash)
if err != nil { if err != nil {
@ -413,11 +413,11 @@ func TestGetRepost(t *testing.T) {
channelHash2, _ := hex.DecodeString("000009ca6e0caaaef16872b4bd4f6f1b8c2363e2") channelHash2, _ := hex.DecodeString("000009ca6e0caaaef16872b4bd4f6f1b8c2363e2")
filePath := "../testdata/V_resolve.csv" filePath := "../testdata/V_resolve.csv"
// want := uint32(3670) // want := uint32(3670)
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
defer toDefer()
res, err := db.GetRepost(channelHash) res, err := db.GetRepost(channelHash)
if err != nil { if err != nil {
@ -447,11 +447,12 @@ func TestGetClaimsInChannelCount(t *testing.T) {
channelHash, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bd") channelHash, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bd")
filePath := "../testdata/Z_resolve.csv" filePath := "../testdata/Z_resolve.csv"
want := uint32(3670) want := uint32(3670)
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
defer toDefer()
count, err := db.GetClaimsInChannelCount(channelHash) count, err := db.GetClaimsInChannelCount(channelHash)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
@ -476,11 +477,12 @@ func TestGetShortClaimIdUrl(t *testing.T) {
var position uint16 = 0 var position uint16 = 0
filePath := "../testdata/F_resolve.csv" filePath := "../testdata/F_resolve.csv"
log.Println(filePath) log.Println(filePath)
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
defer toDefer()
shortUrl, err := db.GetShortClaimIdUrl(name, normalName, claimHash, rootTxNum, position) shortUrl, err := db.GetShortClaimIdUrl(name, normalName, claimHash, rootTxNum, position)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
@ -494,11 +496,11 @@ func TestClaimShortIdIter(t *testing.T) {
filePath := "../testdata/F_cat.csv" filePath := "../testdata/F_cat.csv"
normalName := "cat" normalName := "cat"
claimId := "0" claimId := "0"
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
defer toDefer()
ch := db.ClaimShortIdIter(normalName, claimId) ch := db.ClaimShortIdIter(normalName, claimId)
@ -524,11 +526,12 @@ func TestGetTXOToClaim(t *testing.T) {
var txNum uint32 = 1456296 var txNum uint32 = 1456296
var position uint16 = 0 var position uint16 = 0
filePath := "../testdata/G_2.csv" filePath := "../testdata/G_2.csv"
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
defer toDefer()
val, err := db.GetCachedClaimHash(txNum, position) val, err := db.GetCachedClaimHash(txNum, position)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
@ -552,11 +555,11 @@ func TestGetClaimToChannel(t *testing.T) {
var val []byte = nil var val []byte = nil
filePath := "../testdata/I_resolve.csv" filePath := "../testdata/I_resolve.csv"
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
defer toDefer()
val, err = db.GetChannelForClaim(claimHash, txNum, position) val, err = db.GetChannelForClaim(claimHash, txNum, position)
if err != nil { if err != nil {
@ -581,11 +584,11 @@ func TestGetEffectiveAmountSupportOnly(t *testing.T) {
want := uint64(20000006) want := uint64(20000006)
claimHashStr := "00000324e40fcb63a0b517a3660645e9bd99244a" claimHashStr := "00000324e40fcb63a0b517a3660645e9bd99244a"
claimHash, _ := hex.DecodeString(claimHashStr) claimHash, _ := hex.DecodeString(claimHashStr)
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
defer toDefer()
db.Height = 999999999 db.Height = 999999999
amount, err := db.GetEffectiveAmount(claimHash, true) amount, err := db.GetEffectiveAmount(claimHash, true)
@ -611,11 +614,12 @@ func TestGetEffectiveAmount(t *testing.T) {
want := uint64(21000006) want := uint64(21000006)
claimHashStr := "00000324e40fcb63a0b517a3660645e9bd99244a" claimHashStr := "00000324e40fcb63a0b517a3660645e9bd99244a"
claimHash, _ := hex.DecodeString(claimHashStr) claimHash, _ := hex.DecodeString(claimHashStr)
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
defer toDefer()
db.Height = 999999999 db.Height = 999999999
amount, err := db.GetEffectiveAmount(claimHash, false) amount, err := db.GetEffectiveAmount(claimHash, false)
@ -648,11 +652,12 @@ func TestGetSupportAmount(t *testing.T) {
t.Error(err) t.Error(err)
} }
filePath := "../testdata/a_resolve.csv" filePath := "../testdata/a_resolve.csv"
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
defer toDefer()
res, err := db.GetSupportAmount(claimHash) res, err := db.GetSupportAmount(claimHash)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
@ -668,11 +673,12 @@ func TestGetTxHash(t *testing.T) {
want := "54e14ff0c404c29b3d39ae4d249435f167d5cd4ce5a428ecb745b3df1c8e3dde" want := "54e14ff0c404c29b3d39ae4d249435f167d5cd4ce5a428ecb745b3df1c8e3dde"
filePath := "../testdata/X_resolve.csv" filePath := "../testdata/X_resolve.csv"
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
defer toDefer()
resHash, err := db.GetTxHash(txNum) resHash, err := db.GetTxHash(txNum)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
@ -710,11 +716,12 @@ func TestGetActivation(t *testing.T) {
txNum := uint32(0x6284e3) txNum := uint32(0x6284e3)
position := uint16(0x0) position := uint16(0x0)
want := uint32(0xa6b65) want := uint32(0xa6b65)
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
defer toDefer()
activation, err := db.GetActivation(txNum, position) activation, err := db.GetActivation(txNum, position)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
@ -741,12 +748,13 @@ func TestGetClaimToTXO(t *testing.T) {
return return
} }
filePath := "../testdata/E_resolve.csv" filePath := "../testdata/E_resolve.csv"
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
defer toDefer()
res, err := db.GetCachedClaimTxo(claimHash, true) res, err := db.GetCachedClaimTxo(claimHash, true)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
@ -770,12 +778,13 @@ func TestGetControllingClaim(t *testing.T) {
claimName := internal.NormalizeName("@Styxhexenhammer666") claimName := internal.NormalizeName("@Styxhexenhammer666")
claimHash := "2556ed1cab9d17f2a9392030a9ad7f5d138f11bd" claimHash := "2556ed1cab9d17f2a9392030a9ad7f5d138f11bd"
filePath := "../testdata/P_resolve.csv" filePath := "../testdata/P_resolve.csv"
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath) db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
defer toDefer()
res, err := db.GetControllingClaim(claimName) res, err := db.GetControllingClaim(claimName)
if err != nil { if err != nil {
t.Error(err) t.Error(err)

View file

@ -6,6 +6,7 @@ import (
"bytes" "bytes"
"github.com/lbryio/herald.go/db/prefixes" "github.com/lbryio/herald.go/db/prefixes"
"github.com/lbryio/lbry.go/v3/extras/stop"
"github.com/linxGnu/grocksdb" "github.com/linxGnu/grocksdb"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@ -22,9 +23,8 @@ type IterOptions struct {
IncludeValue bool IncludeValue bool
RawKey bool RawKey bool
RawValue bool RawValue bool
ShutdownChan chan struct{} Grp *stop.Group
DoneChan chan struct{} // DB *ReadOnlyDBColumnFamily
DB *ReadOnlyDBColumnFamily
CfHandle *grocksdb.ColumnFamilyHandle CfHandle *grocksdb.ColumnFamilyHandle
It *grocksdb.Iterator It *grocksdb.Iterator
Serializer *prefixes.SerializationAPI Serializer *prefixes.SerializationAPI
@ -43,9 +43,8 @@ func NewIterateOptions() *IterOptions {
IncludeValue: false, IncludeValue: false,
RawKey: false, RawKey: false,
RawValue: false, RawValue: false,
ShutdownChan: make(chan struct{}, 1), Grp: nil,
DoneChan: make(chan struct{}, 1), // DB: nil,
DB: nil,
CfHandle: nil, CfHandle: nil,
It: nil, It: nil,
Serializer: prefixes.ProductionAPI, Serializer: prefixes.ProductionAPI,
@ -108,7 +107,9 @@ func (o *IterOptions) WithRawValue(rawValue bool) *IterOptions {
} }
func (o *IterOptions) WithDB(db *ReadOnlyDBColumnFamily) *IterOptions { func (o *IterOptions) WithDB(db *ReadOnlyDBColumnFamily) *IterOptions {
o.DB = db // o.Grp.AddNamed(1, iterKey)
o.Grp = stop.New(db.Grp)
o.Grp.Add(1)
return o return o
} }

View file

@ -7,6 +7,7 @@ import (
"strings" "strings"
"github.com/go-restruct/restruct" "github.com/go-restruct/restruct"
"github.com/lbryio/herald.go/internal"
"github.com/lbryio/lbcd/chaincfg/chainhash" "github.com/lbryio/lbcd/chaincfg/chainhash"
) )
@ -59,6 +60,34 @@ func (kv *BlockTxsValue) Unpack(buf []byte, order binary.ByteOrder) ([]byte, err
return buf[offset:], nil return buf[offset:], nil
} }
// Struct BigEndianChainHash is a chainhash.Hash stored in external
// byte-order (opposite of other 32 byte chainhash.Hash values). In order
// to reuse chainhash.Hash we need to correct the byte-order.
// Currently this type is used for field Genesis of DBStateValue.
func (kv *BigEndianChainHash) SizeOf() int {
return chainhash.HashSize
}
func (kv *BigEndianChainHash) Pack(buf []byte, order binary.ByteOrder) ([]byte, error) {
offset := 0
hash := kv.CloneBytes()
// HACK: Instances of chainhash.Hash use the internal byte-order.
// Python scribe writes bytes of genesis hash in external byte-order.
internal.ReverseBytesInPlace(hash)
offset += copy(buf[offset:chainhash.HashSize], hash[:])
return buf[offset:], nil
}
func (kv *BigEndianChainHash) Unpack(buf []byte, order binary.ByteOrder) ([]byte, error) {
offset := 0
offset += copy(kv.Hash[:], buf[offset:32])
// HACK: Instances of chainhash.Hash use the internal byte-order.
// Python scribe writes bytes of genesis hash in external byte-order.
internal.ReverseBytesInPlace(kv.Hash[:])
return buf[offset:], nil
}
func genericNew(prefix []byte, key bool) (interface{}, error) { func genericNew(prefix []byte, key bool) (interface{}, error) {
t, ok := prefixRegistry[prefix[0]] t, ok := prefixRegistry[prefix[0]]
if !ok { if !ok {

View file

@ -12,13 +12,13 @@ import (
"encoding/binary" "encoding/binary"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"log"
"reflect" "reflect"
"sort" "sort"
"strings" "strings"
"github.com/lbryio/herald.go/internal" "github.com/lbryio/herald.go/internal"
"github.com/lbryio/lbcd/chaincfg/chainhash" "github.com/lbryio/lbcd/chaincfg/chainhash"
log "github.com/sirupsen/logrus"
) )
const ( const (
@ -182,12 +182,25 @@ func NewLengthEncodedPartialClaimId(s string) LengthEncodedPartialClaimId {
} }
} }
type BigEndianChainHash struct {
chainhash.Hash
}
func NewBigEndianChainHash(hash *chainhash.Hash) BigEndianChainHash {
if hash != nil {
return BigEndianChainHash{
*hash,
}
}
return BigEndianChainHash{}
}
type DBStateKey struct { type DBStateKey struct {
Prefix []byte `struct:"[1]byte" json:"prefix"` Prefix []byte `struct:"[1]byte" json:"prefix"`
} }
type DBStateValue struct { type DBStateValue struct {
Genesis *chainhash.Hash Genesis BigEndianChainHash
Height uint32 Height uint32
TxCount uint32 TxCount uint32
Tip *chainhash.Hash Tip *chainhash.Hash
@ -203,7 +216,7 @@ type DBStateValue struct {
func NewDBStateValue() *DBStateValue { func NewDBStateValue() *DBStateValue {
return &DBStateValue{ return &DBStateValue{
Genesis: new(chainhash.Hash), Genesis: NewBigEndianChainHash(nil),
Height: 0, Height: 0,
TxCount: 0, TxCount: 0,
Tip: new(chainhash.Hash), Tip: new(chainhash.Hash),
@ -237,7 +250,11 @@ func (v *DBStateValue) PackValue() []byte {
// b'>32sLL32sLLBBlllL' // b'>32sLL32sLLBBlllL'
n := 32 + 4 + 4 + 32 + 4 + 4 + 1 + 1 + 4 + 4 + 4 + 4 n := 32 + 4 + 4 + 32 + 4 + 4 + 1 + 1 + 4 + 4 + 4 + 4
value := make([]byte, n) value := make([]byte, n)
copy(value, v.Genesis[:32]) genesis := v.Genesis.CloneBytes()
// HACK: Instances of chainhash.Hash use the internal byte-order.
// Python scribe writes bytes of genesis hash in external byte-order.
internal.ReverseBytesInPlace(genesis)
copy(value, genesis[:32])
binary.BigEndian.PutUint32(value[32:], v.Height) binary.BigEndian.PutUint32(value[32:], v.Height)
binary.BigEndian.PutUint32(value[32+4:], v.TxCount) binary.BigEndian.PutUint32(value[32+4:], v.TxCount)
copy(value[32+4+4:], v.Tip[:32]) copy(value[32+4+4:], v.Tip[:32])
@ -282,8 +299,11 @@ func DBStateKeyUnpack(key []byte) *DBStateKey {
func DBStateValueUnpack(value []byte) *DBStateValue { func DBStateValueUnpack(value []byte) *DBStateValue {
genesis := (*chainhash.Hash)(value[:32]) genesis := (*chainhash.Hash)(value[:32])
tip := (*chainhash.Hash)(value[32+4+4 : 32+4+4+32]) tip := (*chainhash.Hash)(value[32+4+4 : 32+4+4+32])
// HACK: Python scribe writes bytes of genesis hash in external byte-order.
// Instances of chainhash.Hash should use the internal byte-order.
internal.ReverseBytesInPlace(genesis[:])
x := &DBStateValue{ x := &DBStateValue{
Genesis: genesis, Genesis: NewBigEndianChainHash(genesis),
Height: binary.BigEndian.Uint32(value[32:]), Height: binary.BigEndian.Uint32(value[32:]),
TxCount: binary.BigEndian.Uint32(value[32+4:]), TxCount: binary.BigEndian.Uint32(value[32+4:]),
Tip: tip, Tip: tip,
@ -708,7 +728,7 @@ type BlockTxsKey struct {
} }
type BlockTxsValue struct { type BlockTxsValue struct {
TxHashes []*chainhash.Hash `struct-while:"!_eof" json:"tx_hashes"` TxHashes []*chainhash.Hash `struct:"*[32]byte" struct-while:"!_eof" json:"tx_hashes"`
} }
func (k *BlockTxsKey) NewBlockTxsKey(height uint32) *BlockTxsKey { func (k *BlockTxsKey) NewBlockTxsKey(height uint32) *BlockTxsKey {
@ -1050,84 +1070,6 @@ func TxNumValueUnpack(value []byte) *TxNumValue {
} }
} }
type TxKey struct {
Prefix []byte `struct:"[1]byte" json:"prefix"`
TxHash *chainhash.Hash `struct:"*[32]byte" json:"tx_hash"`
}
type TxValue struct {
RawTx []byte `struct-while:"!_eof" json:"raw_tx"`
}
func (k *TxKey) PackKey() []byte {
prefixLen := 1
// b'>L'
n := prefixLen + 32
key := make([]byte, n)
copy(key, k.Prefix)
copy(key[prefixLen:], k.TxHash[:32])
return key
}
func (v *TxValue) PackValue() []byte {
value := make([]byte, len(v.RawTx))
copy(value, v.RawTx)
return value
}
func (kv *TxKey) NumFields() int {
return 1
}
func (k *TxKey) PartialPack(fields int) []byte {
// Limit fields between 0 and number of fields, we always at least need
// the prefix, and we never need to iterate past the number of fields.
if fields > 1 {
fields = 1
}
if fields < 0 {
fields = 0
}
prefixLen := 1
var n = prefixLen
for i := 0; i <= fields; i++ {
switch i {
case 1:
n += 32
}
}
key := make([]byte, n)
for i := 0; i <= fields; i++ {
switch i {
case 0:
copy(key, k.Prefix)
case 1:
copy(key[prefixLen:], k.TxHash[:32])
}
}
return key
}
func TxKeyUnpack(key []byte) *TxKey {
prefixLen := 1
return &TxKey{
Prefix: key[:prefixLen],
TxHash: (*chainhash.Hash)(key[prefixLen : prefixLen+32]),
}
}
func TxValueUnpack(value []byte) *TxValue {
return &TxValue{
RawTx: value,
}
}
type BlockHeaderKey struct { type BlockHeaderKey struct {
Prefix []byte `struct:"[1]byte" json:"prefix"` Prefix []byte `struct:"[1]byte" json:"prefix"`
Height uint32 `json:"height"` Height uint32 `json:"height"`
@ -3351,9 +3293,12 @@ func (kv *TrendingNotificationValue) UnpackValue(buf []byte) {
offset += 8 offset += 8
} }
type TxKey = MempoolTxKey
type TxValue = MempoolTxValue
type MempoolTxKey struct { type MempoolTxKey struct {
Prefix []byte `struct:"[1]byte" json:"prefix"` Prefix []byte `struct:"[1]byte" json:"prefix"`
TxHash []byte `struct:"[32]byte" json:"tx_hash"` TxHash *chainhash.Hash `struct:"*[32]byte" json:"tx_hash"`
} }
type MempoolTxValue struct { type MempoolTxValue struct {
@ -3386,7 +3331,7 @@ func (kv *MempoolTxKey) UnpackKey(buf []byte) {
offset := 0 offset := 0
kv.Prefix = buf[offset : offset+1] kv.Prefix = buf[offset : offset+1]
offset += 1 offset += 1
kv.TxHash = buf[offset : offset+32] kv.TxHash = (*chainhash.Hash)(buf[offset : offset+32])
offset += 32 offset += 32
} }
@ -3468,7 +3413,7 @@ func (kv *TouchedHashXValue) UnpackValue(buf []byte) {
type HashXStatusKey struct { type HashXStatusKey struct {
Prefix []byte `struct:"[1]byte" json:"prefix"` Prefix []byte `struct:"[1]byte" json:"prefix"`
HashX []byte `struct:"[20]byte" json:"hashX"` HashX []byte `struct:"[11]byte" json:"hashX"`
} }
type HashXStatusValue struct { type HashXStatusValue struct {
@ -3480,15 +3425,15 @@ func (kv *HashXStatusKey) NumFields() int {
} }
func (kv *HashXStatusKey) PartialPack(fields int) []byte { func (kv *HashXStatusKey) PartialPack(fields int) []byte {
// b'>20s' // b'>20s' (really HASHX_LEN 11 bytes)
n := len(kv.Prefix) + 20 n := len(kv.Prefix) + 11
buf := make([]byte, n) buf := make([]byte, n)
offset := 0 offset := 0
offset += copy(buf[offset:], kv.Prefix[:1]) offset += copy(buf[offset:], kv.Prefix[:1])
if fields <= 0 { if fields <= 0 {
return buf[:offset] return buf[:offset]
} }
offset += copy(buf[offset:], kv.HashX[:20]) offset += copy(buf[offset:], kv.HashX[:11])
return buf[:offset] return buf[:offset]
} }
@ -3497,12 +3442,12 @@ func (kv *HashXStatusKey) PackKey() []byte {
} }
func (kv *HashXStatusKey) UnpackKey(buf []byte) { func (kv *HashXStatusKey) UnpackKey(buf []byte) {
// b'>20s' // b'>20s' (really HASHX_LEN 11 bytes)
offset := 0 offset := 0
kv.Prefix = buf[offset : offset+1] kv.Prefix = buf[offset : offset+1]
offset += 1 offset += 1
kv.HashX = buf[offset : offset+20] kv.HashX = buf[offset : offset+11]
offset += 20 offset += 11
} }
func (kv *HashXStatusValue) PackValue() []byte { func (kv *HashXStatusValue) PackValue() []byte {
@ -3925,12 +3870,6 @@ var prefixRegistry = map[byte]prefixMeta{
newValue: func() interface{} { newValue: func() interface{} {
return &TxValue{} return &TxValue{}
}, },
newKeyUnpack: func(buf []byte) interface{} {
return TxKeyUnpack(buf)
},
newValueUnpack: func(buf []byte) interface{} {
return TxValueUnpack(buf)
},
}, },
BlockHash: { BlockHash: {
newKey: func() interface{} { newKey: func() interface{} {

View file

@ -6,7 +6,6 @@ import (
"encoding/csv" "encoding/csv"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"log"
"math" "math"
"math/big" "math/big"
"os" "os"
@ -16,6 +15,7 @@ import (
dbpkg "github.com/lbryio/herald.go/db" dbpkg "github.com/lbryio/herald.go/db"
prefixes "github.com/lbryio/herald.go/db/prefixes" prefixes "github.com/lbryio/herald.go/db/prefixes"
"github.com/linxGnu/grocksdb" "github.com/linxGnu/grocksdb"
log "github.com/sirupsen/logrus"
) )
func TestPrefixRegistry(t *testing.T) { func TestPrefixRegistry(t *testing.T) {

2
go.mod
View file

@ -27,6 +27,8 @@ require (
require ( require (
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/golang/protobuf v1.5.2 // indirect github.com/golang/protobuf v1.5.2 // indirect

2
go.sum
View file

@ -68,11 +68,13 @@ github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufo
github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=

View file

@ -6,4 +6,5 @@ package internal
type HeightHash struct { type HeightHash struct {
Height uint64 Height uint64
BlockHash []byte BlockHash []byte
BlockHeader []byte
} }

30
main.go
View file

@ -10,8 +10,10 @@ import (
"github.com/lbryio/herald.go/internal" "github.com/lbryio/herald.go/internal"
pb "github.com/lbryio/herald.go/protobuf/go" pb "github.com/lbryio/herald.go/protobuf/go"
"github.com/lbryio/herald.go/server" "github.com/lbryio/herald.go/server"
"github.com/lbryio/lbry.go/v3/extras/stop"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
) )
func main() { func main() {
@ -27,40 +29,22 @@ func main() {
if args.CmdType == server.ServeCmd { if args.CmdType == server.ServeCmd {
// This will cancel goroutines with the server finishes. // This will cancel goroutines with the server finishes.
ctxWCancel, cancel := context.WithCancel(ctx) stopGroup := stop.New()
defer cancel()
initsignals() initsignals()
interrupt := interruptListener() interrupt := interruptListener()
s := server.MakeHubServer(ctxWCancel, args) s := server.MakeHubServer(stopGroup, args)
go s.Run() go s.Run()
defer func() { defer s.Stop()
log.Println("Shutting down server...")
if s.EsClient != nil {
log.Println("Stopping es client...")
s.EsClient.Stop()
}
if s.GrpcServer != nil {
log.Println("Stopping grpc server...")
s.GrpcServer.GracefulStop()
}
if s.DB != nil {
log.Println("Stopping database connection...")
s.DB.Shutdown()
}
log.Println("Returning from main...")
}()
<-interrupt <-interrupt
return return
} }
conn, err := grpc.Dial("localhost:"+args.Port, conn, err := grpc.Dial("localhost:"+fmt.Sprintf("%d", args.Port),
grpc.WithInsecure(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(), grpc.WithBlock(),
) )
if err != nil { if err != nil {

View file

@ -88,7 +88,7 @@ test_command_with_want
### blockchain.block.get_chunk ### blockchain.block.get_chunk
read -r -d '' CMD <<- EOM read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json" curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.block.get_chunk", "params": [0]}' --data '{"id": 1, "method": "blockchain.block.get_chunk", "params": [0]}'
| jq .result | sed 's/"//g' | head -c 100 | jq .result | sed 's/"//g' | head -c 100
EOM EOM
@ -97,7 +97,7 @@ test_command_with_want
### blockchain.block.get_header ### blockchain.block.get_header
read -r -d '' CMD <<- EOM read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json" curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.block.get_header", "params": []}' --data '{"id": 1, "method": "blockchain.block.get_header", "params": []}'
| jq .result.timestamp | jq .result.timestamp
EOM EOM
@ -106,7 +106,7 @@ test_command_with_want
### blockchain.block.headers ### blockchain.block.headers
read -r -d '' CMD <<- EOM read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json" curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.block.headers", "params": []}' --data '{"id": 1, "method": "blockchain.block.headers", "params": []}'
| jq .result.count | jq .result.count
EOM EOM
@ -116,7 +116,7 @@ test_command_with_want
## blockchain.claimtrie ## blockchain.claimtrie
read -r -d '' CMD <<- EOM read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json" curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.claimtrie.resolve", "params":[{"Data": ["@Styxhexenhammer666:2"]}]}' --data '{"id": 1, "method": "blockchain.claimtrie.resolve", "params":[{"Data": ["@Styxhexenhammer666:2"]}]}'
| jq .result.txos[0].tx_hash | sed 's/"//g' | jq .result.txos[0].tx_hash | sed 's/"//g'
EOM EOM
@ -128,7 +128,7 @@ test_command_with_want
### blockchain.address.get_balance ### blockchain.address.get_balance
read -r -d '' CMD <<- EOM read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json" curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.address.get_balance", "params":[{"Address": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}' --data '{"id": 1, "method": "blockchain.address.get_balance", "params":[{"Address": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
| jq .result.confirmed | jq .result.confirmed
EOM EOM
@ -138,7 +138,7 @@ test_command_with_want
## blockchain.address.get_history ## blockchain.address.get_history
read -r -d '' CMD <<- EOM read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json" curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.address.get_history", "params":[{"Address": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}' --data '{"id": 1, "method": "blockchain.address.get_history", "params":[{"Address": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
| jq '.result.confirmed | length' | jq '.result.confirmed | length'
EOM EOM
@ -148,7 +148,7 @@ test_command_with_want
## blockchain.address.listunspent ## blockchain.address.listunspent
read -r -d '' CMD <<- EOM read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json" curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.address.listunspent", "params":[{"Address": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}' --data '{"id": 1, "method": "blockchain.address.listunspent", "params":[{"Address": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
| jq '.result | length' | jq '.result | length'
EOM EOM
@ -160,7 +160,7 @@ test_command_with_want
## blockchain.scripthash.get_mempool ## blockchain.scripthash.get_mempool
read -r -d '' CMD <<- EOM read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json" curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.scripthash.get_mempool", "params":[{"scripthash": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}' --data '{"id": 1, "method": "blockchain.scripthash.get_mempool", "params":[{"scripthash": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
| jq .error | sed 's/"//g' | jq .error | sed 's/"//g'
EOM EOM
@ -170,7 +170,7 @@ test_command_with_want
## blockchain.scripthash.get_history ## blockchain.scripthash.get_history
read -r -d '' CMD <<- EOM read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json" curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.scripthash.get_history", "params":[{"scripthash": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}' --data '{"id": 1, "method": "blockchain.scripthash.get_history", "params":[{"scripthash": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
| jq .error | sed 's/"//g' | jq .error | sed 's/"//g'
EOM EOM
@ -180,13 +180,42 @@ test_command_with_want
## blockchain.scripthash.listunspent ## blockchain.scripthash.listunspent
read -r -d '' CMD <<- EOM read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json" curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.scripthash.listunspent", "params":[{"scripthash": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}' --data '{"id": 1, "method": "blockchain.scripthash.listunspent", "params":[{"scripthash": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
| jq .error | sed 's/"//g' | jq .error | sed 's/"//g'
EOM EOM
WANT="encoding/hex: invalid byte: U+0047 'G'" WANT="encoding/hex: invalid byte: U+0047 'G'"
test_command_with_want test_command_with_want
## server.banner
read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "server.banner", "params":[]}'
| jq .result | sed 's/"//g'
EOM
WANT="You are connected to an 0.107.0 server."
test_command_with_want
## server.version
read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "server.version", "params":[]}'
| jq .result | sed 's/"//g'
EOM
WANT="0.107.0"
test_command_with_want
## server.features
read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "server.features", "params":[]}'
EOM
WANT='{"result":{"hosts":{},"pruning":"","server_version":"0.107.0","protocol_min":"0.54.0","protocol_max":"0.199.0","genesis_hash":"9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463","description":"Herald","payment_address":"","donation_address":"","daily_fee":"1.0","hash_function":"sha256","trending_algorithm":"fast_ar"},"error":null,"id":1}'
test_command_with_want
# metrics endpoint testing # metrics endpoint testing
WANT=0 WANT=0

View file

@ -3,6 +3,7 @@ package server
import ( import (
"fmt" "fmt"
"log" "log"
"net/url"
"os" "os"
"strconv" "strconv"
"strings" "strings"
@ -22,13 +23,15 @@ const (
type Args struct { type Args struct {
CmdType int CmdType int
Host string Host string
Port string Port int
DBPath string DBPath string
Chain *string Chain *string
DaemonURL *url.URL
DaemonCAPath string
EsHost string EsHost string
EsPort string EsPort int
PrometheusPort string PrometheusPort int
NotifierPort string NotifierPort int
JSONRPCPort int JSONRPCPort int
JSONRPCHTTPPort int JSONRPCHTTPPort int
MaxSessions int MaxSessions int
@ -67,14 +70,15 @@ type Args struct {
const ( const (
DefaultHost = "0.0.0.0" DefaultHost = "0.0.0.0"
DefaultPort = "50051" DefaultPort = 50051
DefaultDBPath = "/mnt/d/data/snapshot_1072108/lbry-rocksdb/" // FIXME DefaultDBPath = "/mnt/d/data/snapshot_1072108/lbry-rocksdb/" // FIXME
DefaultEsHost = "http://localhost" DefaultEsHost = "http://localhost"
DefaultEsIndex = "claims" DefaultEsIndex = "claims"
DefaultEsPort = "9200" DefaultEsPort = 9200
DefaultPrometheusPort = "2112" DefaultPrometheusPort = 2112
DefaultNotifierPort = "18080" DefaultNotifierPort = 18080
DefaultJSONRPCPort = 50001 DefaultJSONRPCPort = 50001
DefaultJSONRPCHTTPPort = 50002
DefaultMaxSessions = 10000 DefaultMaxSessions = 10000
DefaultSessionTimeout = 300 DefaultSessionTimeout = 300
DefaultRefreshDelta = 5 DefaultRefreshDelta = 5
@ -83,7 +87,6 @@ const (
DefaultBannerFile = "" DefaultBannerFile = ""
DefaultCountry = "US" DefaultCountry = "US"
GENESIS_HASH = "9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463"
HUB_PROTOCOL_VERSION = "0.107.0" HUB_PROTOCOL_VERSION = "0.107.0"
PROTOCOL_MIN = "0.54.0" PROTOCOL_MIN = "0.54.0"
PROTOCOL_MAX = "0.199.0" PROTOCOL_MAX = "0.199.0"
@ -141,6 +144,9 @@ func MakeDefaultTestArgs() *Args {
PrometheusPort: DefaultPrometheusPort, PrometheusPort: DefaultPrometheusPort,
NotifierPort: DefaultNotifierPort, NotifierPort: DefaultNotifierPort,
JSONRPCPort: DefaultJSONRPCPort, JSONRPCPort: DefaultJSONRPCPort,
JSONRPCHTTPPort: DefaultJSONRPCHTTPPort,
MaxSessions: DefaultMaxSessions,
SessionTimeout: DefaultSessionTimeout,
EsIndex: DefaultEsIndex, EsIndex: DefaultEsIndex,
RefreshDelta: DefaultRefreshDelta, RefreshDelta: DefaultRefreshDelta,
CacheTTL: DefaultCacheTTL, CacheTTL: DefaultCacheTTL,
@ -148,7 +154,7 @@ func MakeDefaultTestArgs() *Args {
Banner: nil, Banner: nil,
Country: DefaultCountry, Country: DefaultCountry,
GenesisHash: GENESIS_HASH, GenesisHash: chaincfg.TestNet3Params.GenesisHash.String(),
ServerVersion: HUB_PROTOCOL_VERSION, ServerVersion: HUB_PROTOCOL_VERSION,
ProtocolMin: PROTOCOL_MIN, ProtocolMin: PROTOCOL_MIN,
ProtocolMax: PROTOCOL_MAX, ProtocolMax: PROTOCOL_MAX,
@ -200,10 +206,19 @@ func ParseArgs(searchRequest *pb.SearchRequest) *Args {
environment := GetEnvironmentStandard() environment := GetEnvironmentStandard()
parser := argparse.NewParser("herald", "herald server and client") parser := argparse.NewParser("herald", "herald server and client")
serveCmd := parser.NewCommand("serve", "start the hub server") serveCmd := parser.NewCommand("serve", "start the herald server")
searchCmd := parser.NewCommand("search", "claim search") searchCmd := parser.NewCommand("search", "claim search")
dbCmd := parser.NewCommand("db", "db testing") dbCmd := parser.NewCommand("db", "db testing")
defaultDaemonURL := "http://localhost:9245"
if url, ok := environment["DAEMON_URL"]; ok {
defaultDaemonURL = url
}
validateURL := func(arg []string) error {
_, err := url.Parse(arg[0])
return err
}
validatePort := func(arg []string) error { validatePort := func(arg []string) error {
_, err := strconv.ParseUint(arg[0], 10, 16) _, err := strconv.ParseUint(arg[0], 10, 16)
return err return err
@ -211,16 +226,18 @@ func ParseArgs(searchRequest *pb.SearchRequest) *Args {
// main server config arguments // main server config arguments
host := parser.String("", "rpchost", &argparse.Options{Required: false, Help: "RPC host", Default: DefaultHost}) host := parser.String("", "rpchost", &argparse.Options{Required: false, Help: "RPC host", Default: DefaultHost})
port := parser.String("", "rpcport", &argparse.Options{Required: false, Help: "RPC port", Default: DefaultPort}) port := parser.Int("", "rpcport", &argparse.Options{Required: false, Help: "RPC port", Validate: validatePort, Default: DefaultPort})
dbPath := parser.String("", "db-path", &argparse.Options{Required: false, Help: "RocksDB path", Default: DefaultDBPath}) dbPath := parser.String("", "db-path", &argparse.Options{Required: false, Help: "RocksDB path", Default: DefaultDBPath})
chain := parser.Selector("", "chain", []string{chaincfg.MainNetParams.Name, chaincfg.TestNet3Params.Name, chaincfg.RegressionNetParams.Name, "testnet"}, chain := parser.Selector("", "chain", []string{chaincfg.MainNetParams.Name, chaincfg.TestNet3Params.Name, chaincfg.RegressionNetParams.Name, "testnet"},
&argparse.Options{Required: false, Help: "Which chain to use, default is 'mainnet'. Values 'regtest' and 'testnet' are for testing", Default: chaincfg.MainNetParams.Name}) &argparse.Options{Required: false, Help: "Which chain to use, default is 'mainnet'. Values 'regtest' and 'testnet' are for testing", Default: chaincfg.MainNetParams.Name})
daemonURLStr := parser.String("", "daemon-url", &argparse.Options{Required: false, Help: "URL for rpc to lbrycrd or lbcd, <rpcuser>:<rpcpassword>@<lbcd rpc ip><lbrcd rpc port>.", Validate: validateURL, Default: defaultDaemonURL})
daemonCAPath := parser.String("", "daemon-ca-path", &argparse.Options{Required: false, Help: "Path to the lbcd CA file. Use SSL certificate to verify connection to lbcd."})
esHost := parser.String("", "eshost", &argparse.Options{Required: false, Help: "elasticsearch host", Default: DefaultEsHost}) esHost := parser.String("", "eshost", &argparse.Options{Required: false, Help: "elasticsearch host", Default: DefaultEsHost})
esPort := parser.String("", "esport", &argparse.Options{Required: false, Help: "elasticsearch port", Default: DefaultEsPort}) esPort := parser.Int("", "esport", &argparse.Options{Required: false, Help: "elasticsearch port", Default: DefaultEsPort})
prometheusPort := parser.String("", "prometheus-port", &argparse.Options{Required: false, Help: "prometheus port", Default: DefaultPrometheusPort}) prometheusPort := parser.Int("", "prometheus-port", &argparse.Options{Required: false, Help: "prometheus port", Default: DefaultPrometheusPort})
notifierPort := parser.String("", "notifier-port", &argparse.Options{Required: false, Help: "notifier port", Default: DefaultNotifierPort}) notifierPort := parser.Int("", "notifier-port", &argparse.Options{Required: false, Help: "notifier port", Default: DefaultNotifierPort})
jsonRPCPort := parser.Int("", "json-rpc-port", &argparse.Options{Required: false, Help: "JSON RPC port", Validate: validatePort}) jsonRPCPort := parser.Int("", "json-rpc-port", &argparse.Options{Required: false, Help: "JSON RPC port", Validate: validatePort, Default: DefaultJSONRPCPort})
jsonRPCHTTPPort := parser.Int("", "json-rpc-http-port", &argparse.Options{Required: false, Help: "JSON RPC over HTTP port", Validate: validatePort}) jsonRPCHTTPPort := parser.Int("", "json-rpc-http-port", &argparse.Options{Required: false, Help: "JSON RPC over HTTP port", Validate: validatePort, Default: DefaultJSONRPCHTTPPort})
maxSessions := parser.Int("", "max-sessions", &argparse.Options{Required: false, Help: "Maximum number of electrum clients that can be connected", Default: DefaultMaxSessions}) maxSessions := parser.Int("", "max-sessions", &argparse.Options{Required: false, Help: "Maximum number of electrum clients that can be connected", Default: DefaultMaxSessions})
sessionTimeout := parser.Int("", "session-timeout", &argparse.Options{Required: false, Help: "Session inactivity timeout (seconds)", Default: DefaultSessionTimeout}) sessionTimeout := parser.Int("", "session-timeout", &argparse.Options{Required: false, Help: "Session inactivity timeout (seconds)", Default: DefaultSessionTimeout})
esIndex := parser.String("", "esindex", &argparse.Options{Required: false, Help: "elasticsearch index name", Default: DefaultEsIndex}) esIndex := parser.String("", "esindex", &argparse.Options{Required: false, Help: "elasticsearch index name", Default: DefaultEsIndex})
@ -274,6 +291,11 @@ func ParseArgs(searchRequest *pb.SearchRequest) *Args {
*jsonRPCPort = DefaultJSONRPCPort *jsonRPCPort = DefaultJSONRPCPort
} }
daemonURL, err := url.Parse(*daemonURLStr)
if err != nil {
log.Fatalf("URL parse failed: %v", err)
}
banner := loadBanner(bannerFile, HUB_PROTOCOL_VERSION) banner := loadBanner(bannerFile, HUB_PROTOCOL_VERSION)
args := &Args{ args := &Args{
@ -282,6 +304,8 @@ func ParseArgs(searchRequest *pb.SearchRequest) *Args {
Port: *port, Port: *port,
DBPath: *dbPath, DBPath: *dbPath,
Chain: chain, Chain: chain,
DaemonURL: daemonURL,
DaemonCAPath: *daemonCAPath,
EsHost: *esHost, EsHost: *esHost,
EsPort: *esPort, EsPort: *esPort,
PrometheusPort: *prometheusPort, PrometheusPort: *prometheusPort,
@ -299,7 +323,7 @@ func ParseArgs(searchRequest *pb.SearchRequest) *Args {
BlockingChannelIds: *blockingChannelIds, BlockingChannelIds: *blockingChannelIds,
FilteringChannelIds: *filteringChannelIds, FilteringChannelIds: *filteringChannelIds,
GenesisHash: GENESIS_HASH, GenesisHash: "",
ServerVersion: HUB_PROTOCOL_VERSION, ServerVersion: HUB_PROTOCOL_VERSION,
ProtocolMin: PROTOCOL_MIN, ProtocolMin: PROTOCOL_MIN,
ProtocolMax: PROTOCOL_MAX, ProtocolMax: PROTOCOL_MAX,
@ -331,11 +355,17 @@ func ParseArgs(searchRequest *pb.SearchRequest) *Args {
} }
if esPort, ok := environment["ELASTIC_PORT"]; ok { if esPort, ok := environment["ELASTIC_PORT"]; ok {
args.EsPort = esPort args.EsPort, err = strconv.Atoi(esPort)
if err != nil {
log.Fatal(err)
}
} }
if prometheusPort, ok := environment["GOHUB_PROMETHEUS_PORT"]; ok { if prometheusPort, ok := environment["GOHUB_PROMETHEUS_PORT"]; ok {
args.PrometheusPort = prometheusPort args.PrometheusPort, err = strconv.Atoi(prometheusPort)
if err != nil {
log.Fatal(err)
}
} }
/* /*

View file

@ -3,17 +3,19 @@ package server
import ( import (
"bufio" "bufio"
"context" "context"
"log"
"math" "math"
"net" "net"
"os" "os"
"strconv"
"strings" "strings"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/lbryio/herald.go/internal/metrics" "github.com/lbryio/herald.go/internal/metrics"
pb "github.com/lbryio/herald.go/protobuf/go" pb "github.com/lbryio/herald.go/protobuf/go"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
) )
// Peer holds relevant information about peers that we know about. // Peer holds relevant information about peers that we know about.
@ -86,7 +88,7 @@ func (s *Server) getAndSetExternalIp(ip, port string) error {
// storing them as known peers. Returns a map of peerKey -> object // storing them as known peers. Returns a map of peerKey -> object
func (s *Server) loadPeers() error { func (s *Server) loadPeers() error {
peerFile := s.Args.PeerFile peerFile := s.Args.PeerFile
port := s.Args.Port port := strconv.Itoa(s.Args.Port)
// First we make sure our server has come up, so we can answer back to peers. // First we make sure our server has come up, so we can answer back to peers.
var failures = 0 var failures = 0
@ -98,7 +100,7 @@ retry:
time.Sleep(time.Second * time.Duration(math.Pow(float64(failures), 2))) time.Sleep(time.Second * time.Duration(math.Pow(float64(failures), 2)))
conn, err := grpc.DialContext(ctx, conn, err := grpc.DialContext(ctx,
"0.0.0.0:"+port, "0.0.0.0:"+port,
grpc.WithInsecure(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(), grpc.WithBlock(),
) )
@ -171,7 +173,7 @@ func (s *Server) subscribeToPeer(peer *Peer) error {
conn, err := grpc.DialContext(ctx, conn, err := grpc.DialContext(ctx,
peer.Address+":"+peer.Port, peer.Address+":"+peer.Port,
grpc.WithInsecure(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(), grpc.WithBlock(),
) )
if err != nil { if err != nil {
@ -181,12 +183,12 @@ func (s *Server) subscribeToPeer(peer *Peer) error {
msg := &pb.ServerMessage{ msg := &pb.ServerMessage{
Address: s.ExternalIP.String(), Address: s.ExternalIP.String(),
Port: s.Args.Port, Port: strconv.Itoa(s.Args.Port),
} }
c := pb.NewHubClient(conn) c := pb.NewHubClient(conn)
log.Printf("%s:%s subscribing to %+v\n", s.ExternalIP, s.Args.Port, peer) log.Printf("%s:%d subscribing to %+v\n", s.ExternalIP, s.Args.Port, peer)
_, err = c.PeerSubscribe(ctx, msg) _, err = c.PeerSubscribe(ctx, msg)
if err != nil { if err != nil {
return err return err
@ -207,7 +209,7 @@ func (s *Server) helloPeer(peer *Peer) (*pb.HelloMessage, error) {
conn, err := grpc.DialContext(ctx, conn, err := grpc.DialContext(ctx,
peer.Address+":"+peer.Port, peer.Address+":"+peer.Port,
grpc.WithInsecure(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(), grpc.WithBlock(),
) )
if err != nil { if err != nil {
@ -219,12 +221,12 @@ func (s *Server) helloPeer(peer *Peer) (*pb.HelloMessage, error) {
c := pb.NewHubClient(conn) c := pb.NewHubClient(conn)
msg := &pb.HelloMessage{ msg := &pb.HelloMessage{
Port: s.Args.Port, Port: strconv.Itoa(s.Args.Port),
Host: s.ExternalIP.String(), Host: s.ExternalIP.String(),
Servers: []*pb.ServerMessage{}, Servers: []*pb.ServerMessage{},
} }
log.Printf("%s:%s saying hello to %+v\n", s.ExternalIP, s.Args.Port, peer) log.Printf("%s:%d saying hello to %+v\n", s.ExternalIP, s.Args.Port, peer)
res, err := c.Hello(ctx, msg) res, err := c.Hello(ctx, msg)
if err != nil { if err != nil {
log.Println(err) log.Println(err)
@ -277,7 +279,7 @@ func (s *Server) notifyPeer(peerToNotify *Peer, newPeer *Peer) error {
conn, err := grpc.DialContext(ctx, conn, err := grpc.DialContext(ctx,
peerToNotify.Address+":"+peerToNotify.Port, peerToNotify.Address+":"+peerToNotify.Port,
grpc.WithInsecure(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(), grpc.WithBlock(),
) )
if err != nil { if err != nil {
@ -345,15 +347,15 @@ func (s *Server) addPeer(newPeer *Peer, ping bool, subscribe bool) error {
} }
} }
if s.Args.Port == newPeer.Port && if strconv.Itoa(s.Args.Port) == newPeer.Port &&
(localHosts[newPeer.Address] || newPeer.Address == s.ExternalIP.String()) { (localHosts[newPeer.Address] || newPeer.Address == s.ExternalIP.String()) {
log.Printf("%s:%s addPeer: Self peer, skipping...\n", s.ExternalIP, s.Args.Port) log.Printf("%s:%d addPeer: Self peer, skipping...\n", s.ExternalIP, s.Args.Port)
return nil return nil
} }
k := peerKey(newPeer) k := peerKey(newPeer)
log.Printf("%s:%s adding peer %+v\n", s.ExternalIP, s.Args.Port, newPeer) log.Printf("%s:%d adding peer %+v\n", s.ExternalIP, s.Args.Port, newPeer)
if oldServer, loaded := s.PeerServersLoadOrStore(newPeer); !loaded { if oldServer, loaded := s.PeerServersLoadOrStore(newPeer); !loaded {
if ping { if ping {
_, err := s.helloPeer(newPeer) _, err := s.helloPeer(newPeer)
@ -369,6 +371,10 @@ func (s *Server) addPeer(newPeer *Peer, ping bool, subscribe bool) error {
metrics.PeersKnown.Inc() metrics.PeersKnown.Inc()
s.writePeers() s.writePeers()
s.notifyPeerSubs(newPeer) s.notifyPeerSubs(newPeer)
// This is weird because we're doing grpc and jsonrpc here.
// Do we still want to custom grpc?
log.Warn("Sending peer to NotifierChan")
s.NotifierChan <- peerNotification{newPeer.Address, newPeer.Port}
// Subscribe to all our peers for now // Subscribe to all our peers for now
if subscribe { if subscribe {
@ -415,7 +421,7 @@ func (s *Server) makeHelloMessage() *pb.HelloMessage {
s.PeerServersMut.RUnlock() s.PeerServersMut.RUnlock()
return &pb.HelloMessage{ return &pb.HelloMessage{
Port: s.Args.Port, Port: strconv.Itoa(s.Args.Port),
Host: s.ExternalIP.String(), Host: s.ExternalIP.String(),
Servers: servers, Servers: servers,
} }

View file

@ -4,23 +4,31 @@ import (
"bufio" "bufio"
"context" "context"
"fmt" "fmt"
"log"
"net" "net"
"os" "os"
"strconv"
"strings" "strings"
"testing" "testing"
"github.com/lbryio/herald.go/internal/metrics" "github.com/lbryio/herald.go/internal/metrics"
pb "github.com/lbryio/herald.go/protobuf/go" pb "github.com/lbryio/herald.go/protobuf/go"
server "github.com/lbryio/herald.go/server" "github.com/lbryio/herald.go/server"
"github.com/lbryio/lbry.go/v3/extras/stop"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
) )
// lineCountFile takes a fileName and counts the number of lines in it. // lineCountFile takes a fileName and counts the number of lines in it.
func lineCountFile(fileName string) int { func lineCountFile(fileName string) int {
f, err := os.Open(fileName) f, err := os.Open(fileName)
defer f.Close() defer func() {
err := f.Close()
if err != nil {
log.Warn(err)
}
}()
if err != nil { if err != nil {
log.Println(err) log.Println(err)
return 0 return 0
@ -46,8 +54,10 @@ func removeFile(fileName string) {
// TestAddPeer tests the ability to add peers // TestAddPeer tests the ability to add peers
func TestAddPeer(t *testing.T) { func TestAddPeer(t *testing.T) {
ctx := context.Background() // ctx := context.Background()
ctx := stop.NewDebug()
args := server.MakeDefaultTestArgs() args := server.MakeDefaultTestArgs()
args.DisableStartNotifier = false
tests := []struct { tests := []struct {
name string name string
@ -97,6 +107,7 @@ func TestAddPeer(t *testing.T) {
if got != tt.want { if got != tt.want {
t.Errorf("len(server.PeerServers) = %d, want %d\n", got, tt.want) t.Errorf("len(server.PeerServers) = %d, want %d\n", got, tt.want)
} }
hubServer.Stop()
}) })
} }
@ -104,9 +115,10 @@ func TestAddPeer(t *testing.T) {
// TestPeerWriter tests that peers get written properly // TestPeerWriter tests that peers get written properly
func TestPeerWriter(t *testing.T) { func TestPeerWriter(t *testing.T) {
ctx := context.Background() ctx := stop.NewDebug()
args := server.MakeDefaultTestArgs() args := server.MakeDefaultTestArgs()
args.DisableWritePeers = false args.DisableWritePeers = false
args.DisableStartNotifier = false
tests := []struct { tests := []struct {
name string name string
@ -141,17 +153,16 @@ func TestPeerWriter(t *testing.T) {
Port: "50051", Port: "50051",
} }
} }
//log.Printf("Adding peer %+v\n", peer)
err := hubServer.AddPeerExported()(peer, false, false) err := hubServer.AddPeerExported()(peer, false, false)
if err != nil { if err != nil {
log.Println(err) log.Println(err)
} }
} }
//log.Println("Counting lines...")
got := lineCountFile(hubServer.Args.PeerFile) got := lineCountFile(hubServer.Args.PeerFile)
if got != tt.want { if got != tt.want {
t.Errorf("lineCountFile(peers.txt) = %d, want %d", got, tt.want) t.Errorf("lineCountFile(peers.txt) = %d, want %d", got, tt.want)
} }
hubServer.Stop()
}) })
} }
@ -160,10 +171,13 @@ func TestPeerWriter(t *testing.T) {
// TestAddPeerEndpoint tests the ability to add peers // TestAddPeerEndpoint tests the ability to add peers
func TestAddPeerEndpoint(t *testing.T) { func TestAddPeerEndpoint(t *testing.T) {
ctx := context.Background() ctx := stop.NewDebug()
args := server.MakeDefaultTestArgs() args := server.MakeDefaultTestArgs()
args.DisableStartNotifier = false
args2 := server.MakeDefaultTestArgs() args2 := server.MakeDefaultTestArgs()
args2.Port = "50052" args2.DisableStartNotifier = false
args2.Port = 50052
args2.NotifierPort = 18081
tests := []struct { tests := []struct {
name string name string
@ -193,9 +207,8 @@ func TestAddPeerEndpoint(t *testing.T) {
metrics.PeersKnown.Set(0) metrics.PeersKnown.Set(0)
go hubServer.Run() go hubServer.Run()
go hubServer2.Run() go hubServer2.Run()
//go hubServer.Run() conn, err := grpc.Dial("localhost:"+strconv.Itoa(args.Port),
conn, err := grpc.Dial("localhost:"+args.Port, grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithInsecure(),
grpc.WithBlock(), grpc.WithBlock(),
) )
if err != nil { if err != nil {
@ -214,8 +227,6 @@ func TestAddPeerEndpoint(t *testing.T) {
log.Println(err) log.Println(err)
} }
hubServer.GrpcServer.GracefulStop()
hubServer2.GrpcServer.GracefulStop()
got1 := hubServer.GetNumPeersExported()() got1 := hubServer.GetNumPeersExported()()
got2 := hubServer2.GetNumPeersExported()() got2 := hubServer2.GetNumPeersExported()()
if got1 != tt.wantServerOne { if got1 != tt.wantServerOne {
@ -224,6 +235,8 @@ func TestAddPeerEndpoint(t *testing.T) {
if got2 != tt.wantServerTwo { if got2 != tt.wantServerTwo {
t.Errorf("len(hubServer2.PeerServers) = %d, want %d\n", got2, tt.wantServerTwo) t.Errorf("len(hubServer2.PeerServers) = %d, want %d\n", got2, tt.wantServerTwo)
} }
hubServer.Stop()
hubServer2.Stop()
}) })
} }
@ -231,12 +244,17 @@ func TestAddPeerEndpoint(t *testing.T) {
// TestAddPeerEndpoint2 tests the ability to add peers // TestAddPeerEndpoint2 tests the ability to add peers
func TestAddPeerEndpoint2(t *testing.T) { func TestAddPeerEndpoint2(t *testing.T) {
ctx := context.Background() ctx := stop.NewDebug()
args := server.MakeDefaultTestArgs() args := server.MakeDefaultTestArgs()
args2 := server.MakeDefaultTestArgs() args2 := server.MakeDefaultTestArgs()
args3 := server.MakeDefaultTestArgs() args3 := server.MakeDefaultTestArgs()
args2.Port = "50052" args2.Port = 50052
args3.Port = "50053" args3.Port = 50053
args.DisableStartNotifier = false
args2.DisableStartNotifier = false
args3.DisableStartNotifier = false
args2.NotifierPort = 18081
args3.NotifierPort = 18082
tests := []struct { tests := []struct {
name string name string
@ -261,8 +279,8 @@ func TestAddPeerEndpoint2(t *testing.T) {
go hubServer.Run() go hubServer.Run()
go hubServer2.Run() go hubServer2.Run()
go hubServer3.Run() go hubServer3.Run()
conn, err := grpc.Dial("localhost:"+args.Port, conn, err := grpc.Dial("localhost:"+strconv.Itoa(args.Port),
grpc.WithInsecure(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(), grpc.WithBlock(),
) )
if err != nil { if err != nil {
@ -290,9 +308,6 @@ func TestAddPeerEndpoint2(t *testing.T) {
log.Println(err) log.Println(err)
} }
hubServer.GrpcServer.GracefulStop()
hubServer2.GrpcServer.GracefulStop()
hubServer3.GrpcServer.GracefulStop()
got1 := hubServer.GetNumPeersExported()() got1 := hubServer.GetNumPeersExported()()
got2 := hubServer2.GetNumPeersExported()() got2 := hubServer2.GetNumPeersExported()()
got3 := hubServer3.GetNumPeersExported()() got3 := hubServer3.GetNumPeersExported()()
@ -305,6 +320,9 @@ func TestAddPeerEndpoint2(t *testing.T) {
if got3 != tt.wantServerThree { if got3 != tt.wantServerThree {
t.Errorf("len(hubServer3.PeerServers) = %d, want %d\n", got3, tt.wantServerThree) t.Errorf("len(hubServer3.PeerServers) = %d, want %d\n", got3, tt.wantServerThree)
} }
hubServer.Stop()
hubServer2.Stop()
hubServer3.Stop()
}) })
} }
@ -312,12 +330,17 @@ func TestAddPeerEndpoint2(t *testing.T) {
// TestAddPeerEndpoint3 tests the ability to add peers // TestAddPeerEndpoint3 tests the ability to add peers
func TestAddPeerEndpoint3(t *testing.T) { func TestAddPeerEndpoint3(t *testing.T) {
ctx := context.Background() ctx := stop.NewDebug()
args := server.MakeDefaultTestArgs() args := server.MakeDefaultTestArgs()
args2 := server.MakeDefaultTestArgs() args2 := server.MakeDefaultTestArgs()
args3 := server.MakeDefaultTestArgs() args3 := server.MakeDefaultTestArgs()
args2.Port = "50052" args2.Port = 50052
args3.Port = "50053" args3.Port = 50053
args.DisableStartNotifier = false
args2.DisableStartNotifier = false
args3.DisableStartNotifier = false
args2.NotifierPort = 18081
args3.NotifierPort = 18082
tests := []struct { tests := []struct {
name string name string
@ -342,15 +365,15 @@ func TestAddPeerEndpoint3(t *testing.T) {
go hubServer.Run() go hubServer.Run()
go hubServer2.Run() go hubServer2.Run()
go hubServer3.Run() go hubServer3.Run()
conn, err := grpc.Dial("localhost:"+args.Port, conn, err := grpc.Dial("localhost:"+strconv.Itoa(args.Port),
grpc.WithInsecure(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(), grpc.WithBlock(),
) )
if err != nil { if err != nil {
log.Fatalf("did not connect: %v", err) log.Fatalf("did not connect: %v", err)
} }
conn2, err := grpc.Dial("localhost:50052", conn2, err := grpc.Dial("localhost:50052",
grpc.WithInsecure(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(), grpc.WithBlock(),
) )
if err != nil { if err != nil {
@ -379,9 +402,9 @@ func TestAddPeerEndpoint3(t *testing.T) {
log.Println(err) log.Println(err)
} }
hubServer.GrpcServer.GracefulStop() hubServer.Stop()
hubServer2.GrpcServer.GracefulStop() hubServer2.Stop()
hubServer3.GrpcServer.GracefulStop() hubServer3.Stop()
got1 := hubServer.GetNumPeersExported()() got1 := hubServer.GetNumPeersExported()()
got2 := hubServer2.GetNumPeersExported()() got2 := hubServer2.GetNumPeersExported()()
got3 := hubServer3.GetNumPeersExported()() got3 := hubServer3.GetNumPeersExported()()
@ -401,11 +424,11 @@ func TestAddPeerEndpoint3(t *testing.T) {
// TestAddPeer tests the ability to add peers // TestAddPeer tests the ability to add peers
func TestUDPServer(t *testing.T) { func TestUDPServer(t *testing.T) {
ctx := context.Background() ctx := stop.NewDebug()
args := server.MakeDefaultTestArgs() args := server.MakeDefaultTestArgs()
args.DisableStartUDP = false
args2 := server.MakeDefaultTestArgs() args2 := server.MakeDefaultTestArgs()
args2.Port = "50052" args2.Port = 50052
args.DisableStartUDP = false
args2.DisableStartUDP = false args2.DisableStartUDP = false
tests := []struct { tests := []struct {
@ -436,18 +459,18 @@ func TestUDPServer(t *testing.T) {
log.Println(err) log.Println(err)
} }
hubServer.GrpcServer.GracefulStop() hubServer.Stop()
hubServer2.GrpcServer.GracefulStop() hubServer2.Stop()
got1 := hubServer.ExternalIP.String() got1 := hubServer.ExternalIP.String()
if got1 != tt.want { if got1 != tt.want {
t.Errorf("hubServer.ExternalIP = %s, want %s\n", got1, tt.want) t.Errorf("hubServer.ExternalIP = %s, want %s\n", got1, tt.want)
t.Errorf("hubServer.Args.Port = %s\n", hubServer.Args.Port) t.Errorf("hubServer.Args.Port = %d\n", hubServer.Args.Port)
} }
got2 := hubServer2.ExternalIP.String() got2 := hubServer2.ExternalIP.String()
if got2 != tt.want { if got2 != tt.want {
t.Errorf("hubServer2.ExternalIP = %s, want %s\n", got2, tt.want) t.Errorf("hubServer2.ExternalIP = %s, want %s\n", got2, tt.want)
t.Errorf("hubServer2.Args.Port = %s\n", hubServer2.Args.Port) t.Errorf("hubServer2.Args.Port = %d\n", hubServer2.Args.Port)
} }
}) })
} }

View file

@ -7,6 +7,7 @@ import (
"encoding/base64" "encoding/base64"
"encoding/binary" "encoding/binary"
"encoding/hex" "encoding/hex"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
@ -55,6 +56,14 @@ type BlockchainScripthashService struct {
session *session session *session
} }
// BlockchainTransactionService methods handle "blockchain.transaction.*" RPCs
type BlockchainTransactionService struct {
DB *db.ReadOnlyDBColumnFamily
Chain *chaincfg.Params
// needed for broadcast TX
sessionMgr *sessionManager
}
const CHUNK_SIZE = 96 const CHUNK_SIZE = 96
const MAX_CHUNK_SIZE = 40960 const MAX_CHUNK_SIZE = 40960
const HEADER_SIZE = wire.MaxBlockHeaderPayload const HEADER_SIZE = wire.MaxBlockHeaderPayload
@ -162,9 +171,42 @@ type BlockHeadersReq struct {
B64 bool `json:"b64"` B64 bool `json:"b64"`
} }
func (req *BlockHeadersReq) UnmarshalJSON(b []byte) error {
var params [4]interface{}
err := json.Unmarshal(b, &params)
if err != nil {
return err
}
switch params[0].(type) {
case float64:
req.StartHeight = uint32(params[0].(float64))
default:
return fmt.Errorf("expected numeric argument #0 (start_height)")
}
switch params[1].(type) {
case float64:
req.Count = uint32(params[1].(float64))
default:
return fmt.Errorf("expected numeric argument #1 (count)")
}
switch params[2].(type) {
case float64:
req.CpHeight = uint32(params[2].(float64))
default:
return fmt.Errorf("expected numeric argument #2 (cp_height)")
}
switch params[3].(type) {
case bool:
req.B64 = params[3].(bool)
default:
return fmt.Errorf("expected boolean argument #3 (b64)")
}
return nil
}
type BlockHeadersResp struct { type BlockHeadersResp struct {
Base64 string `json:"base64,omitempty"` Base64 string `json:"base64,omitempty"`
Hex string `json:"hex,omitempty"` Hex string `json:"hex"`
Count uint32 `json:"count"` Count uint32 `json:"count"`
Max uint32 `json:"max"` Max uint32 `json:"max"`
Branch string `json:"branch,omitempty"` Branch string `json:"branch,omitempty"`
@ -209,6 +251,21 @@ type HeadersSubscribeReq struct {
Raw bool `json:"raw"` Raw bool `json:"raw"`
} }
func (req *HeadersSubscribeReq) UnmarshalJSON(b []byte) error {
var params [1]interface{}
err := json.Unmarshal(b, &params)
if err != nil {
return err
}
switch params[0].(type) {
case bool:
req.Raw = params[0].(bool)
default:
return fmt.Errorf("expected bool argument #0 (raw)")
}
return nil
}
type HeadersSubscribeResp struct { type HeadersSubscribeResp struct {
BlockHeaderElectrum BlockHeaderElectrum
} }
@ -336,6 +393,23 @@ func (s *BlockchainScripthashService) Get_balance(req *scripthashGetBalanceReq,
type AddressGetHistoryReq struct { type AddressGetHistoryReq struct {
Address string `json:"address"` Address string `json:"address"`
} }
func (req *AddressGetHistoryReq) UnmarshalJSON(b []byte) error {
var params [1]interface{}
json.Unmarshal(b, &params)
err := json.Unmarshal(b, &params)
if err != nil {
return err
}
switch params[0].(type) {
case string:
req.Address = params[0].(string)
default:
return fmt.Errorf("expected string argument #0 (address)")
}
return nil
}
type TxInfo struct { type TxInfo struct {
TxHash string `json:"tx_hash"` TxHash string `json:"tx_hash"`
Height uint32 `json:"height"` Height uint32 `json:"height"`
@ -344,10 +418,7 @@ type TxInfoFee struct {
TxInfo TxInfo
Fee uint64 `json:"fee"` Fee uint64 `json:"fee"`
} }
type AddressGetHistoryResp struct { type AddressGetHistoryResp []TxInfoFee
Confirmed []TxInfo `json:"confirmed"`
Unconfirmed []TxInfoFee `json:"unconfirmed"`
}
// 'blockchain.address.get_history' // 'blockchain.address.get_history'
func (s *BlockchainAddressService) Get_history(req *AddressGetHistoryReq, resp **AddressGetHistoryResp) error { func (s *BlockchainAddressService) Get_history(req *AddressGetHistoryReq, resp **AddressGetHistoryResp) error {
@ -375,11 +446,18 @@ func (s *BlockchainAddressService) Get_history(req *AddressGetHistoryReq, resp *
Height: tx.Height, Height: tx.Height,
}) })
} }
result := &AddressGetHistoryResp{ unconfirmed := []TxInfoFee{} // TODO
Confirmed: confirmed, result := make(AddressGetHistoryResp, len(confirmed)+len(unconfirmed))
Unconfirmed: []TxInfoFee{}, // TODO i := 0
for _, tx := range confirmed {
result[i].TxInfo = tx
i += 1
} }
*resp = result for _, tx := range unconfirmed {
result[i] = tx
i += 1
}
*resp = &result
return err return err
} }
@ -626,3 +704,221 @@ func (s *BlockchainScripthashService) Unsubscribe(req *ScripthashSubscribeReq, r
*resp = (*ScripthashSubscribeResp)(nil) *resp = (*ScripthashSubscribeResp)(nil)
return nil return nil
} }
type TransactionBroadcastReq [1]string
type TransactionBroadcastResp string
// 'blockchain.transaction.broadcast'
func (s *BlockchainTransactionService) Broadcast(req *TransactionBroadcastReq, resp **TransactionBroadcastResp) error {
if s.sessionMgr == nil {
return errors.New("no session manager, rpc not supported")
}
strTx := string((*req)[0])
rawTx, err := hex.DecodeString(strTx)
if err != nil {
return err
}
txhash, err := s.sessionMgr.broadcastTx(rawTx)
if err != nil {
return err
}
result := txhash.String()
*resp = (*TransactionBroadcastResp)(&result)
return nil
}
type TransactionGetReq string
type TXFullDetail struct {
Height int `json:"block_height"`
Pos uint32 `json:"pos"`
Merkle []string `json:"merkle"`
}
type TXDetail struct {
Height int `json:"block_height"`
}
type TXGetItem struct {
TxHash string
TxRaw string
Detail interface{} // TXFullDetail or TXDetail struct
}
type TransactionGetResp TXGetItem
// 'blockchain.transaction.get'
func (s *BlockchainTransactionService) Get(req *TransactionGetReq, resp **TransactionGetResp) error {
txids := [1]string{string(*req)}
request := TransactionGetBatchReq(txids[:])
var response *TransactionGetBatchResp
err := s.Get_batch(&request, &response)
if err != nil {
return err
}
if len(*response) < 1 {
return errors.New("tx not found")
}
switch (*response)[0].Detail.(type) {
case TXFullDetail:
break
case TXDetail:
default:
return errors.New("tx not confirmed")
}
*resp = (*TransactionGetResp)(&(*response)[0])
return err
}
type TransactionGetBatchReq []string
func (req *TransactionGetBatchReq) UnmarshalJSON(b []byte) error {
var params []interface{}
json.Unmarshal(b, &params)
if len(params) > 100 {
return fmt.Errorf("too many tx hashes in request: %v", len(params))
}
for i, txhash := range params {
switch params[0].(type) {
case string:
*req = append(*req, txhash.(string))
default:
return fmt.Errorf("expected string argument #%d (tx_hash)", i)
}
}
return nil
}
type TransactionGetBatchResp []TXGetItem
func (resp *TransactionGetBatchResp) MarshalJSON() ([]byte, error) {
// encode key/value pairs as variable length JSON object
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
buf.WriteString("{")
for i, r := range *resp {
if i > 0 {
buf.WriteString(",")
}
txhash, raw, detail := r.TxHash, r.TxRaw, r.Detail
err := enc.Encode(txhash)
if err != nil {
return nil, err
}
buf.WriteString(":[")
err = enc.Encode(raw)
if err != nil {
return nil, err
}
buf.WriteString(",")
err = enc.Encode(detail)
if err != nil {
return nil, err
}
buf.WriteString("]")
}
buf.WriteString("}")
return buf.Bytes(), nil
}
// 'blockchain.transaction.get_batch'
func (s *BlockchainTransactionService) Get_batch(req *TransactionGetBatchReq, resp **TransactionGetBatchResp) error {
if len(*req) > 100 {
return fmt.Errorf("too many tx hashes in request: %v", len(*req))
}
tx_hashes := make([]chainhash.Hash, 0, len(*req))
for i, txid := range *req {
tx_hashes = append(tx_hashes, chainhash.Hash{})
if err := chainhash.Decode(&tx_hashes[i], txid); err != nil {
return err
}
}
dbResult, err := s.DB.GetTxMerkle(tx_hashes)
if err != nil {
return err
}
result := make([]TXGetItem, 0, len(dbResult))
for _, r := range dbResult {
merkles := make([]string, len(r.Merkle))
for i, m := range r.Merkle {
merkles[i] = m.String()
}
detail := TXFullDetail{
Height: r.Height,
Pos: r.Pos,
Merkle: merkles,
}
result = append(result, TXGetItem{r.TxHash.String(), hex.EncodeToString(r.RawTx), &detail})
}
*resp = (*TransactionGetBatchResp)(&result)
return err
}
type TransactionGetMerkleReq struct {
TxHash string `json:"tx_hash"`
Height uint32 `json:"height"`
}
type TransactionGetMerkleResp TXGetItem
// 'blockchain.transaction.get_merkle'
func (s *BlockchainTransactionService) Get_merkle(req *TransactionGetMerkleReq, resp **TransactionGetMerkleResp) error {
txids := [1]string{string(req.TxHash)}
request := TransactionGetBatchReq(txids[:])
var response *TransactionGetBatchResp
err := s.Get_batch(&request, &response)
if err != nil {
return err
}
if len(*response) < 1 {
return errors.New("tx not found")
}
switch (*response)[0].Detail.(type) {
case TXFullDetail:
break
case TXDetail:
default:
return errors.New("tx not confirmed")
}
*resp = (*TransactionGetMerkleResp)(&(*response)[0])
return err
}
type TransactionGetHeightReq string
type TransactionGetHeightResp uint32
// 'blockchain.transaction.get_height'
func (s *BlockchainTransactionService) Get_height(req *TransactionGetHeightReq, resp **TransactionGetHeightResp) error {
txid := string(*(req))
txhash, err := chainhash.NewHashFromStr(txid)
if err != nil {
return err
}
height, err := s.DB.GetTxHeight(txhash)
*resp = (*TransactionGetHeightResp)(&height)
return err
}
type TransactionInfoReq string
type TransactionInfoResp TXGetItem
// 'blockchain.transaction.info'
func (s *BlockchainTransactionService) Info(req *TransactionInfoReq, resp **TransactionInfoResp) error {
txids := [1]string{string(*req)}
request := TransactionGetBatchReq(txids[:])
var response *TransactionGetBatchResp
err := s.Get_batch(&request, &response)
if err != nil {
return err
}
if len(*response) < 1 {
return errors.New("tx not found")
}
switch (*response)[0].Detail.(type) {
case TXFullDetail:
break
case TXDetail:
default:
if (*response)[0].TxHash == "" {
return errors.New("no such mempool or blockchain transaction")
}
}
*resp = (*TransactionInfoResp)(&(*response)[0])
return err
}

View file

@ -13,6 +13,7 @@ import (
"github.com/lbryio/lbcd/chaincfg" "github.com/lbryio/lbcd/chaincfg"
"github.com/lbryio/lbcd/txscript" "github.com/lbryio/lbcd/txscript"
"github.com/lbryio/lbcutil" "github.com/lbryio/lbcutil"
"github.com/lbryio/lbry.go/v3/extras/stop"
) )
// Source: test_variety_of_transactions_and_longish_history (lbry-sdk/tests/integration/transactions) // Source: test_variety_of_transactions_and_longish_history (lbry-sdk/tests/integration/transactions)
@ -57,8 +58,9 @@ var regTestAddrs = [30]string{
func TestServerGetHeight(t *testing.T) { func TestServerGetHeight(t *testing.T) {
secondaryPath := "asdf" secondaryPath := "asdf"
db, toDefer, err := db.GetProdDB(regTestDBPath, secondaryPath) grp := stop.NewDebug()
defer toDefer() db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -87,8 +89,9 @@ func TestServerGetHeight(t *testing.T) {
func TestGetChunk(t *testing.T) { func TestGetChunk(t *testing.T) {
secondaryPath := "asdf" secondaryPath := "asdf"
db, toDefer, err := db.GetProdDB(regTestDBPath, secondaryPath) grp := stop.NewDebug()
defer toDefer() db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -130,8 +133,9 @@ func TestGetChunk(t *testing.T) {
func TestGetHeader(t *testing.T) { func TestGetHeader(t *testing.T) {
secondaryPath := "asdf" secondaryPath := "asdf"
db, toDefer, err := db.GetProdDB(regTestDBPath, secondaryPath) grp := stop.NewDebug()
defer toDefer() db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -159,8 +163,9 @@ func TestGetHeader(t *testing.T) {
func TestHeaders(t *testing.T) { func TestHeaders(t *testing.T) {
secondaryPath := "asdf" secondaryPath := "asdf"
db, toDefer, err := db.GetProdDB(regTestDBPath, secondaryPath) grp := stop.NewDebug()
defer toDefer() db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -180,6 +185,9 @@ func TestHeaders(t *testing.T) {
} }
var resp *BlockHeadersResp var resp *BlockHeadersResp
err := s.Headers(&req, &resp) err := s.Headers(&req, &resp)
if err != nil {
t.Errorf("Headers: %v", err)
}
marshalled, err := json.MarshalIndent(resp, "", " ") marshalled, err := json.MarshalIndent(resp, "", " ")
if err != nil { if err != nil {
t.Errorf("height: %v unmarshal err: %v", height, err) t.Errorf("height: %v unmarshal err: %v", height, err)
@ -189,15 +197,17 @@ func TestHeaders(t *testing.T) {
} }
func TestHeadersSubscribe(t *testing.T) { func TestHeadersSubscribe(t *testing.T) {
args := MakeDefaultTestArgs()
grp := stop.NewDebug()
secondaryPath := "asdf" secondaryPath := "asdf"
db, toDefer, err := db.GetProdDB(regTestDBPath, secondaryPath) db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
defer toDefer() defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
sm := newSessionManager(db, &chaincfg.RegressionNetParams, DefaultMaxSessions, DefaultSessionTimeout) sm := newSessionManager(nil, db, args, grp, &chaincfg.RegressionNetParams, nil)
sm.start() sm.start()
defer sm.stop() defer sm.stop()
@ -209,13 +219,13 @@ func TestHeadersSubscribe(t *testing.T) {
// Set up logic to read a notification. // Set up logic to read a notification.
var received sync.WaitGroup var received sync.WaitGroup
recv := func(client net.Conn) { recv := func(client net.Conn) {
defer received.Done()
buf := make([]byte, 1024) buf := make([]byte, 1024)
len, err := client.Read(buf) len, err := client.Read(buf)
if err != nil { if err != nil {
t.Errorf("read err: %v", err) t.Errorf("read err: %v", err)
} }
t.Logf("len: %v notification: %v", len, string(buf)) t.Logf("len: %v notification: %v", len, string(buf))
received.Done()
} }
received.Add(2) received.Add(2)
go recv(client1) go recv(client1)
@ -266,12 +276,10 @@ func TestHeadersSubscribe(t *testing.T) {
t.Errorf("decode err: %v", err) t.Errorf("decode err: %v", err)
} }
note1 := headerNotification{ note1 := headerNotification{
HeightHash: internal.HeightHash{Height: 500}, HeightHash: internal.HeightHash{Height: 500, BlockHeader: header500},
blockHeader: [112]byte{},
blockHeaderElectrum: nil, blockHeaderElectrum: nil,
blockHeaderStr: "", blockHeaderStr: "",
} }
copy(note1.blockHeader[:], header500)
t.Logf("sending notification") t.Logf("sending notification")
sm.doNotify(note1) sm.doNotify(note1)
@ -281,8 +289,9 @@ func TestHeadersSubscribe(t *testing.T) {
func TestGetBalance(t *testing.T) { func TestGetBalance(t *testing.T) {
secondaryPath := "asdf" secondaryPath := "asdf"
db, toDefer, err := db.GetProdDB(regTestDBPath, secondaryPath) grp := stop.NewDebug()
defer toDefer() db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -310,8 +319,9 @@ func TestGetBalance(t *testing.T) {
func TestGetHistory(t *testing.T) { func TestGetHistory(t *testing.T) {
secondaryPath := "asdf" secondaryPath := "asdf"
db, toDefer, err := db.GetProdDB(regTestDBPath, secondaryPath) grp := stop.NewDebug()
defer toDefer() db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -339,8 +349,9 @@ func TestGetHistory(t *testing.T) {
func TestListUnspent(t *testing.T) { func TestListUnspent(t *testing.T) {
secondaryPath := "asdf" secondaryPath := "asdf"
db, toDefer, err := db.GetProdDB(regTestDBPath, secondaryPath) grp := stop.NewDebug()
defer toDefer() db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -367,15 +378,17 @@ func TestListUnspent(t *testing.T) {
} }
func TestAddressSubscribe(t *testing.T) { func TestAddressSubscribe(t *testing.T) {
args := MakeDefaultTestArgs()
grp := stop.NewDebug()
secondaryPath := "asdf" secondaryPath := "asdf"
db, toDefer, err := db.GetProdDB(regTestDBPath, secondaryPath) db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
defer toDefer() defer db.Shutdown()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
sm := newSessionManager(db, &chaincfg.RegressionNetParams, DefaultMaxSessions, DefaultSessionTimeout) sm := newSessionManager(nil, db, args, grp, &chaincfg.RegressionNetParams, nil)
sm.start() sm.start()
defer sm.stop() defer sm.stop()

View file

@ -1,13 +1,19 @@
package server package server
import ( import (
"context"
"fmt"
"github.com/lbryio/herald.go/db" "github.com/lbryio/herald.go/db"
"github.com/lbryio/herald.go/internal/metrics"
pb "github.com/lbryio/herald.go/protobuf/go" pb "github.com/lbryio/herald.go/protobuf/go"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
type ClaimtrieService struct { type ClaimtrieService struct {
DB *db.ReadOnlyDBColumnFamily DB *db.ReadOnlyDBColumnFamily
Server *Server
} }
type ResolveData struct { type ResolveData struct {
@ -18,6 +24,10 @@ type Result struct {
Data string `json:"data"` Data string `json:"data"`
} }
type GetClaimByIDData struct {
ClaimID string `json:"claim_id"`
}
// Resolve is the json rpc endpoint for 'blockchain.claimtrie.resolve'. // Resolve is the json rpc endpoint for 'blockchain.claimtrie.resolve'.
func (t *ClaimtrieService) Resolve(args *ResolveData, result **pb.Outputs) error { func (t *ClaimtrieService) Resolve(args *ResolveData, result **pb.Outputs) error {
log.Println("Resolve") log.Println("Resolve")
@ -25,3 +35,74 @@ func (t *ClaimtrieService) Resolve(args *ResolveData, result **pb.Outputs) error
*result = res *result = res
return err return err
} }
// Search is the json rpc endpoint for 'blockchain.claimtrie.search'.
func (t *ClaimtrieService) Search(args *pb.SearchRequest, result **pb.Outputs) error {
log.Println("Search")
if t.Server == nil {
log.Warnln("Server is nil in Search")
*result = nil
return nil
}
ctx := context.Background()
res, err := t.Server.Search(ctx, args)
*result = res
return err
}
// GetClaimByID is the json rpc endpoint for 'blockchain.claimtrie.getclaimbyid'.
func (t *ClaimtrieService) GetClaimByID(args *GetClaimByIDData, result **pb.Outputs) error {
log.Println("GetClaimByID")
if len(args.ClaimID) != 40 {
*result = nil
return fmt.Errorf("len(claim_id) != 40")
}
rows, extras, err := t.DB.GetClaimByID(args.ClaimID)
if err != nil {
*result = nil
return err
}
metrics.RequestsCount.With(prometheus.Labels{"method": "blockchain.claimtrie.getclaimbyid"}).Inc()
// FIXME: this has txos and extras and so does GetClaimById?
allTxos := make([]*pb.Output, 0)
allExtraTxos := make([]*pb.Output, 0)
for _, row := range rows {
txos, extraTxos, err := row.ToOutputs()
if err != nil {
*result = nil
return err
}
// TODO: there may be a more efficient way to do this.
allTxos = append(allTxos, txos...)
allExtraTxos = append(allExtraTxos, extraTxos...)
}
for _, extra := range extras {
txos, extraTxos, err := extra.ToOutputs()
if err != nil {
*result = nil
return err
}
// TODO: there may be a more efficient way to do this.
allTxos = append(allTxos, txos...)
allExtraTxos = append(allExtraTxos, extraTxos...)
}
res := &pb.Outputs{
Txos: allTxos,
ExtraTxos: allExtraTxos,
Total: uint32(len(allTxos) + len(allExtraTxos)),
Offset: 0, //TODO
Blocked: nil, //TODO
BlockedTotal: 0, //TODO
}
log.Warn(res)
*result = res
return nil
}

View file

@ -0,0 +1,39 @@
package server
import (
"errors"
log "github.com/sirupsen/logrus"
)
type PeersService struct {
Server *Server
// needed for subscribe/unsubscribe
sessionMgr *sessionManager
session *session
}
type PeersSubscribeReq struct {
Ip string `json:"ip"`
Host string `json:"host"`
Details []string `json:"details"`
}
type PeersSubscribeResp string
// Features is the json rpc endpoint for 'server.peers.subcribe'.
func (t *PeersService) Subscribe(req *PeersSubscribeReq, res **PeersSubscribeResp) error {
log.Println("PeersSubscribe")
// var port = "50001"
// FIXME: Get the actual port from the request details
if t.sessionMgr == nil || t.session == nil {
*res = nil
return errors.New("no session, rpc not supported")
}
t.sessionMgr.peersSubscribe(t.session, true /*subscribe*/)
*res = nil
return nil
}

View file

@ -4,6 +4,10 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
type ServerService struct {
Args *Args
}
type ServerFeatureService struct { type ServerFeatureService struct {
Args *Args Args *Args
} }
@ -26,7 +30,7 @@ type ServerFeaturesRes struct {
} }
// Features is the json rpc endpoint for 'server.features'. // Features is the json rpc endpoint for 'server.features'.
func (t *ServerFeatureService) Features(req *ServerFeaturesReq, res **ServerFeaturesRes) error { func (t *ServerService) Features(req *ServerFeaturesReq, res **ServerFeaturesRes) error {
log.Println("Features") log.Println("Features")
features := &ServerFeaturesRes{ features := &ServerFeaturesRes{
@ -35,7 +39,7 @@ func (t *ServerFeatureService) Features(req *ServerFeaturesReq, res **ServerFeat
ServerVersion: HUB_PROTOCOL_VERSION, ServerVersion: HUB_PROTOCOL_VERSION,
ProtocolMin: PROTOCOL_MIN, ProtocolMin: PROTOCOL_MIN,
ProtocolMax: PROTOCOL_MAX, ProtocolMax: PROTOCOL_MAX,
GenesisHash: GENESIS_HASH, GenesisHash: t.Args.GenesisHash,
Description: t.Args.ServerDescription, Description: t.Args.ServerDescription,
PaymentAddress: t.Args.PaymentAddress, PaymentAddress: t.Args.PaymentAddress,
DonationAddress: t.Args.DonationAddress, DonationAddress: t.Args.DonationAddress,
@ -57,7 +61,7 @@ type ServerBannerReq struct{}
type ServerBannerRes string type ServerBannerRes string
// Banner is the json rpc endpoint for 'server.banner'. // Banner is the json rpc endpoint for 'server.banner'.
func (t *ServerBannerService) Banner(req *ServerBannerReq, res **ServerBannerRes) error { func (t *ServerService) Banner(req *ServerBannerReq, res **ServerBannerRes) error {
log.Println("Banner") log.Println("Banner")
*res = (*ServerBannerRes)(t.Args.Banner) *res = (*ServerBannerRes)(t.Args.Banner)
@ -69,17 +73,16 @@ type ServerVersionService struct {
Args *Args Args *Args
} }
type ServerVersionReq struct{} type ServerVersionReq [2]string // [client_name, client_version]
type ServerVersionRes string type ServerVersionRes [2]string // [version, protocol_version]
// Banner is the json rpc endpoint for 'server.version'.
// FIXME: This should return a struct with the version and the protocol version.
// <<-- that comment was written by github, scary shit because it's true
func (t *ServerVersionService) Version(req *ServerVersionReq, res **ServerVersionRes) error {
log.Println("Version")
*res = (*ServerVersionRes)(&t.Args.ServerVersion)
// Version is the json rpc endpoint for 'server.version'.
func (t *ServerService) Version(req *ServerVersionReq, res **ServerVersionRes) error {
// FIXME: We may need to do the computation of a negotiated version here.
// Also return an error if client is not supported?
result := [2]string{t.Args.ServerVersion, t.Args.ServerVersion}
*res = (*ServerVersionRes)(&result)
log.Printf("Version(%v) -> %v", *req, **res)
return nil return nil
} }

View file

@ -52,24 +52,23 @@ func (cr *gorillaRpcCodecRequest) Method() (string, error) {
// StartJsonRPC starts the json rpc server and registers the endpoints. // StartJsonRPC starts the json rpc server and registers the endpoints.
func (s *Server) StartJsonRPC() error { func (s *Server) StartJsonRPC() error {
s.sessionManager.start()
defer s.sessionManager.stop()
// Set up the pure JSONRPC server with persistent connections/sessions. // Set up the pure JSONRPC server with persistent connections/sessions.
if s.Args.JSONRPCPort != 0 { if s.Args.JSONRPCPort != 0 {
port := ":" + strconv.FormatUint(uint64(s.Args.JSONRPCPort), 10) port := ":" + strconv.Itoa(s.Args.JSONRPCPort)
laddr, err := net.ResolveTCPAddr("tcp", port) laddr, err := net.ResolveTCPAddr("tcp4", port)
if err != nil { if err != nil {
log.Errorf("ResoveIPAddr: %v\n", err) log.Errorf("ResoveIPAddr: %v\n", err)
goto fail1 goto fail1
} }
listener, err := net.ListenTCP("tcp", laddr) listener, err := net.ListenTCP("tcp4", laddr)
if err != nil { if err != nil {
log.Errorf("ListenTCP: %v\n", err) log.Errorf("ListenTCP: %v\n", err)
goto fail1 goto fail1
} }
log.Infof("JSONRPC server listening on %s", listener.Addr().String()) log.Infof("JSONRPC server listening on %s", listener.Addr().String())
s.sessionManager.start()
acceptConnections := func(listener net.Listener) { acceptConnections := func(listener net.Listener) {
defer s.sessionManager.stop()
for { for {
conn, err := listener.Accept() conn, err := listener.Accept()
if err != nil { if err != nil {
@ -78,6 +77,7 @@ func (s *Server) StartJsonRPC() error {
} }
log.Infof("Accepted: %v", conn.RemoteAddr()) log.Infof("Accepted: %v", conn.RemoteAddr())
s.sessionManager.addSession(conn) s.sessionManager.addSession(conn)
} }
} }
go acceptConnections(netutil.LimitListener(listener, s.sessionManager.sessionsMax)) go acceptConnections(netutil.LimitListener(listener, s.sessionManager.sessionsMax))
@ -91,53 +91,52 @@ fail1:
s1.RegisterCodec(&gorillaRpcCodec{gorilla_json.NewCodec()}, "application/json") s1.RegisterCodec(&gorillaRpcCodec{gorilla_json.NewCodec()}, "application/json")
// Register "blockchain.claimtrie.*"" handlers. // Register "blockchain.claimtrie.*"" handlers.
claimtrieSvc := &ClaimtrieService{s.DB} claimtrieSvc := &ClaimtrieService{s.DB, s}
err := s1.RegisterTCPService(claimtrieSvc, "blockchain_claimtrie") err := s1.RegisterTCPService(claimtrieSvc, "blockchain_claimtrie")
if err != nil { if err != nil {
log.Errorf("RegisterTCPService: %v\n", err) log.Errorf("RegisterTCPService: %v\n", err)
goto fail2 goto fail2
} }
// Register other "blockchain.{block,address,scripthash}.*" handlers. // Register "blockchain.{block,address,scripthash,transaction}.*" handlers.
blockchainSvc := &BlockchainBlockService{s.DB, s.Chain} blockchainSvc := &BlockchainBlockService{s.DB, s.Chain}
err = s1.RegisterTCPService(blockchainSvc, "blockchain_block") err = s1.RegisterTCPService(blockchainSvc, "blockchain_block")
if err != nil { if err != nil {
log.Errorf("RegisterTCPService: %v\n", err) log.Errorf("RegisterTCPService: %v\n", err)
goto fail2 goto fail2
} }
err = s1.RegisterTCPService(&BlockchainHeadersService{s.DB, s.Chain, nil, nil}, "blockchain_headers") err = s1.RegisterTCPService(&BlockchainHeadersService{s.DB, s.Chain, s.sessionManager, nil}, "blockchain_headers")
if err != nil { if err != nil {
log.Errorf("RegisterTCPService: %v\n", err) log.Errorf("RegisterTCPService: %v\n", err)
goto fail2 goto fail2
} }
err = s1.RegisterTCPService(&BlockchainAddressService{s.DB, s.Chain, nil, nil}, "blockchain_address") err = s1.RegisterTCPService(&BlockchainAddressService{s.DB, s.Chain, s.sessionManager, nil}, "blockchain_address")
if err != nil { if err != nil {
log.Errorf("RegisterTCPService: %v\n", err) log.Errorf("RegisterTCPService: %v\n", err)
goto fail2 goto fail2
} }
err = s1.RegisterTCPService(&BlockchainScripthashService{s.DB, s.Chain, nil, nil}, "blockchain_scripthash") err = s1.RegisterTCPService(&BlockchainScripthashService{s.DB, s.Chain, s.sessionManager, nil}, "blockchain_scripthash")
if err != nil {
log.Errorf("RegisterTCPService: %v\n", err)
goto fail2
}
err = s1.RegisterTCPService(&BlockchainTransactionService{s.DB, s.Chain, s.sessionManager}, "blockchain_transaction")
if err != nil { if err != nil {
log.Errorf("RegisterTCPService: %v\n", err) log.Errorf("RegisterTCPService: %v\n", err)
goto fail2 goto fail2
} }
// Register "server.{features,banner,version}" handlers. // Register "server.{features,banner,version}" handlers.
serverFeatureSvc := &ServerFeatureService{s.Args} serverSvc := &ServerService{s.Args}
err = s1.RegisterTCPService(serverFeatureSvc, "server_features") err = s1.RegisterTCPService(serverSvc, "server")
if err != nil { if err != nil {
log.Errorf("RegisterTCPService: %v\n", err) log.Errorf("RegisterTCPService: %v\n", err)
goto fail2 goto fail2
} }
serverBannerSvc := &ServerBannerService{s.Args} // Register "server.peers" handlers.
err = s1.RegisterTCPService(serverBannerSvc, "server_banner") peersSvc := &PeersService{Server: s}
if err != nil { err = s1.RegisterTCPService(peersSvc, "server_peers")
log.Errorf("RegisterTCPService: %v\n", err)
goto fail2
}
serverVersionSvc := &ServerVersionService{s.Args}
err = s1.RegisterTCPService(serverVersionSvc, "server_version")
if err != nil { if err != nil {
log.Errorf("RegisterTCPService: %v\n", err) log.Errorf("RegisterTCPService: %v\n", err)
goto fail2 goto fail2

View file

@ -2,6 +2,7 @@ package server
import ( import (
"encoding/binary" "encoding/binary"
"fmt"
"net" "net"
"github.com/lbryio/herald.go/internal" "github.com/lbryio/herald.go/internal"
@ -53,10 +54,16 @@ func (s *Server) DoNotify(heightHash *internal.HeightHash) error {
// RunNotifier Runs the notfying action forever // RunNotifier Runs the notfying action forever
func (s *Server) RunNotifier() error { func (s *Server) RunNotifier() error {
for notification := range s.NotifierChan { for notification := range s.NotifierChan {
switch notification.(type) { switch note := notification.(type) {
case internal.HeightHash: case internal.HeightHash:
heightHash, _ := notification.(internal.HeightHash) heightHash := note
s.DoNotify(&heightHash) s.DoNotify(&heightHash)
// Do we need this?
// case peerNotification:
// peer, _ := notification.(peerNotification)
// s.notifyPeerSubs(&Peer{Address: peer.address, Port: peer.port})
default:
logrus.Warn("unknown notification type")
} }
s.sessionManager.doNotify(notification) s.sessionManager.doNotify(notification)
} }
@ -65,7 +72,8 @@ func (s *Server) RunNotifier() error {
// NotifierServer implements the TCP protocol for height/blockheader notifications // NotifierServer implements the TCP protocol for height/blockheader notifications
func (s *Server) NotifierServer() error { func (s *Server) NotifierServer() error {
address := ":" + s.Args.NotifierPort s.Grp.Add(1)
address := ":" + fmt.Sprintf("%d", s.Args.NotifierPort)
addr, err := net.ResolveTCPAddr("tcp", address) addr, err := net.ResolveTCPAddr("tcp", address)
if err != nil { if err != nil {
return err return err
@ -77,11 +85,27 @@ func (s *Server) NotifierServer() error {
} }
defer listen.Close() defer listen.Close()
rdyCh := make(chan bool)
for { for {
var conn net.Conn
var err error
logrus.Info("Waiting for connection") logrus.Info("Waiting for connection")
conn, err := listen.Accept()
go func() {
conn, err = listen.Accept()
rdyCh <- true
}()
select {
case <-s.Grp.Ch():
s.Grp.Done()
return nil
case <-rdyCh:
logrus.Info("Connection accepted")
}
if err != nil { if err != nil {
logrus.Warn(err) logrus.Warn(err)
continue continue

View file

@ -1,7 +1,6 @@
package server_test package server_test
import ( import (
"context"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"net" "net"
@ -10,11 +9,32 @@ import (
"github.com/lbryio/herald.go/internal" "github.com/lbryio/herald.go/internal"
"github.com/lbryio/herald.go/server" "github.com/lbryio/herald.go/server"
"github.com/lbryio/lbry.go/v3/extras/stop"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
const defaultBufferSize = 1024 const defaultBufferSize = 1024
func subReady(s *server.Server) error {
sleepTime := time.Millisecond * 100
for {
if sleepTime > time.Second {
return fmt.Errorf("timeout")
}
s.HeightSubsMut.RLock()
if len(s.HeightSubs) > 0 {
s.HeightSubsMut.RUnlock()
return nil
}
s.HeightSubsMut.RUnlock()
logrus.Warn("waiting for subscriber")
time.Sleep(sleepTime)
sleepTime = sleepTime * 2
}
}
func tcpConnReady(addr string) (net.Conn, error) { func tcpConnReady(addr string) (net.Conn, error) {
sleepTime := time.Millisecond * 100 sleepTime := time.Millisecond * 100
for { for {
@ -48,13 +68,13 @@ func tcpRead(conn net.Conn) ([]byte, error) {
func TestNotifierServer(t *testing.T) { func TestNotifierServer(t *testing.T) {
args := server.MakeDefaultTestArgs() args := server.MakeDefaultTestArgs()
ctx := context.Background() ctx := stop.NewDebug()
hub := server.MakeHubServer(ctx, args) hub := server.MakeHubServer(ctx, args)
go hub.NotifierServer() go hub.NotifierServer()
go hub.RunNotifier() go hub.RunNotifier()
addr := fmt.Sprintf(":%s", args.NotifierPort) addr := fmt.Sprintf(":%d", args.NotifierPort)
logrus.Info(addr) logrus.Info(addr)
conn, err := tcpConnReady(addr) conn, err := tcpConnReady(addr)
if err != nil { if err != nil {
@ -76,7 +96,10 @@ func TestNotifierServer(t *testing.T) {
// Hacky but needed because if the reader isn't ready // Hacky but needed because if the reader isn't ready
// before the writer sends it won't get the data // before the writer sends it won't get the data
time.Sleep(time.Second) err = subReady(hub)
if err != nil {
t.Fatal(err)
}
hash, _ := hex.DecodeString("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") hash, _ := hex.DecodeString("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
logrus.Warn("sending hash") logrus.Warn("sending hash")

View file

@ -11,6 +11,7 @@ import (
pb "github.com/lbryio/herald.go/protobuf/go" pb "github.com/lbryio/herald.go/protobuf/go"
server "github.com/lbryio/herald.go/server" server "github.com/lbryio/herald.go/server"
"github.com/lbryio/lbry.go/v3/extras/stop"
"github.com/olivere/elastic/v7" "github.com/olivere/elastic/v7"
) )
@ -55,13 +56,14 @@ func TestSearch(t *testing.T) {
w.Write([]byte(resp)) w.Write([]byte(resp))
} }
context := context.Background() ctx := context.Background()
stopGroup := stop.NewDebug()
args := server.MakeDefaultTestArgs() args := server.MakeDefaultTestArgs()
hubServer := server.MakeHubServer(context, args) hubServer := server.MakeHubServer(stopGroup, args)
req := &pb.SearchRequest{ req := &pb.SearchRequest{
Text: "asdf", Text: "asdf",
} }
out, err := hubServer.Search(context, req) out, err := hubServer.Search(ctx, req)
if err != nil { if err != nil {
log.Println(err) log.Println(err)
} }

View file

@ -8,11 +8,12 @@ import (
"fmt" "fmt"
"hash" "hash"
"io/ioutil" "io/ioutil"
"log" golog "log"
"net" "net"
"net/http" "net/http"
"os" "os"
"regexp" "regexp"
"strconv"
"sync" "sync"
"time" "time"
@ -22,10 +23,12 @@ import (
"github.com/lbryio/herald.go/meta" "github.com/lbryio/herald.go/meta"
pb "github.com/lbryio/herald.go/protobuf/go" pb "github.com/lbryio/herald.go/protobuf/go"
"github.com/lbryio/lbcd/chaincfg" "github.com/lbryio/lbcd/chaincfg"
lbcd "github.com/lbryio/lbcd/rpcclient"
"github.com/lbryio/lbry.go/v3/extras/stop"
"github.com/olivere/elastic/v7" "github.com/olivere/elastic/v7"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
logrus "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/reflection" "google.golang.org/grpc/reflection"
) )
@ -37,6 +40,7 @@ type Server struct {
WeirdCharsRe *regexp.Regexp WeirdCharsRe *regexp.Regexp
DB *db.ReadOnlyDBColumnFamily DB *db.ReadOnlyDBColumnFamily
Chain *chaincfg.Params Chain *chaincfg.Params
DaemonClient *lbcd.Client
EsClient *elastic.Client EsClient *elastic.Client
QueryCache *ttlcache.Cache QueryCache *ttlcache.Cache
S256 *hash.Hash S256 *hash.Hash
@ -53,6 +57,8 @@ type Server struct {
HeightSubs map[net.Addr]net.Conn HeightSubs map[net.Addr]net.Conn
HeightSubsMut sync.RWMutex HeightSubsMut sync.RWMutex
NotifierChan chan interface{} NotifierChan chan interface{}
Grp *stop.Group
notiferListener *net.TCPListener
sessionManager *sessionManager sessionManager *sessionManager
pb.UnimplementedHubServer pb.UnimplementedHubServer
} }
@ -134,7 +140,8 @@ func (s *Server) PeerServersLoadOrStore(peer *Peer) (actual *Peer, loaded bool)
// Run "main" function for starting the server. This blocks. // Run "main" function for starting the server. This blocks.
func (s *Server) Run() { func (s *Server) Run() {
l, err := net.Listen("tcp", ":"+s.Args.Port) address := ":" + strconv.Itoa(s.Args.Port)
l, err := net.Listen("tcp", address)
if err != nil { if err != nil {
log.Fatalf("failed to listen: %v", err) log.Fatalf("failed to listen: %v", err)
} }
@ -149,31 +156,50 @@ func (s *Server) Run() {
} }
} }
func LoadDatabase(args *Args) (*db.ReadOnlyDBColumnFamily, error) { func (s *Server) Stop() {
tmpName, err := ioutil.TempDir("", "go-lbry-hub") log.Println("Shutting down server...")
if s.EsClient != nil {
log.Println("Stopping es client...")
s.EsClient.Stop()
}
if s.GrpcServer != nil {
log.Println("Stopping grpc server...")
s.GrpcServer.GracefulStop()
}
log.Println("Stopping other server threads...")
s.Grp.StopAndWait()
if s.DB != nil {
log.Println("Stopping database connection...")
s.DB.Shutdown()
}
log.Println("Returning from Stop...")
}
func LoadDatabase(args *Args, grp *stop.Group) (*db.ReadOnlyDBColumnFamily, error) {
tmpName, err := os.MkdirTemp("", "go-lbry-hub")
if err != nil { if err != nil {
logrus.Info(err) log.Info(err)
log.Fatal(err) log.Fatal(err)
} }
logrus.Info("tmpName", tmpName) log.Info("tmpName", tmpName)
if err != nil { if err != nil {
logrus.Info(err) log.Info(err)
} }
myDB, _, err := db.GetProdDB(args.DBPath, tmpName) myDB, err := db.GetProdDB(args.DBPath, tmpName, grp)
// dbShutdown = func() {
// db.Shutdown(myDB)
// }
if err != nil { if err != nil {
// Can't load the db, fail loudly // Can't load the db, fail loudly
logrus.Info(err) log.Info(err)
log.Fatalln(err) log.Fatalln(err)
} }
if myDB.LastState != nil { if myDB.LastState != nil {
logrus.Infof("DB version: %v", myDB.LastState.DBVersion) log.Infof("DB version: %v", myDB.LastState.DBVersion)
logrus.Infof("height: %v", myDB.LastState.Height) log.Infof("height: %v", myDB.LastState.Height)
logrus.Infof("tip: %v", myDB.LastState.Tip.String()) log.Infof("genesis: %v", myDB.LastState.Genesis.String())
logrus.Infof("tx count: %v", myDB.LastState.TxCount) log.Infof("tip: %v", myDB.LastState.Tip.String())
log.Infof("tx count: %v", myDB.LastState.TxCount)
} }
blockingChannelHashes := make([][]byte, 0, 10) blockingChannelHashes := make([][]byte, 0, 10)
@ -184,7 +210,7 @@ func LoadDatabase(args *Args) (*db.ReadOnlyDBColumnFamily, error) {
for _, id := range args.BlockingChannelIds { for _, id := range args.BlockingChannelIds {
hash, err := hex.DecodeString(id) hash, err := hex.DecodeString(id)
if err != nil { if err != nil {
logrus.Warn("Invalid channel id: ", id) log.Warn("Invalid channel id: ", id)
continue continue
} }
blockingChannelHashes = append(blockingChannelHashes, hash) blockingChannelHashes = append(blockingChannelHashes, hash)
@ -194,7 +220,7 @@ func LoadDatabase(args *Args) (*db.ReadOnlyDBColumnFamily, error) {
for _, id := range args.FilteringChannelIds { for _, id := range args.FilteringChannelIds {
hash, err := hex.DecodeString(id) hash, err := hex.DecodeString(id)
if err != nil { if err != nil {
logrus.Warn("Invalid channel id: ", id) log.Warn("Invalid channel id: ", id)
continue continue
} }
filteringChannelHashes = append(filteringChannelHashes, hash) filteringChannelHashes = append(filteringChannelHashes, hash)
@ -205,10 +231,10 @@ func LoadDatabase(args *Args) (*db.ReadOnlyDBColumnFamily, error) {
myDB.FilteringChannelHashes = filteringChannelHashes myDB.FilteringChannelHashes = filteringChannelHashes
if len(filteringIds) > 0 { if len(filteringIds) > 0 {
logrus.Infof("filtering claims reposted by channels: %+s", filteringIds) log.Infof("filtering claims reposted by channels: %+s", filteringIds)
} }
if len(blockingIds) > 0 { if len(blockingIds) > 0 {
logrus.Infof("blocking claims reposted by channels: %+s", blockingIds) log.Infof("blocking claims reposted by channels: %+s", blockingIds)
} }
return myDB, nil return myDB, nil
@ -217,7 +243,7 @@ func LoadDatabase(args *Args) (*db.ReadOnlyDBColumnFamily, error) {
// MakeHubServer takes the arguments given to a hub when it's started and // MakeHubServer takes the arguments given to a hub when it's started and
// initializes everything. It loads information about previously known peers, // initializes everything. It loads information about previously known peers,
// creates needed internal data structures, and initializes goroutines. // creates needed internal data structures, and initializes goroutines.
func MakeHubServer(ctx context.Context, args *Args) *Server { func MakeHubServer(grp *stop.Group, args *Args) *Server {
grpcServer := grpc.NewServer(grpc.NumStreamWorkers(0)) grpcServer := grpc.NewServer(grpc.NumStreamWorkers(0))
multiSpaceRe, err := regexp.Compile(`\s{2,}`) multiSpaceRe, err := regexp.Compile(`\s{2,}`)
@ -230,9 +256,34 @@ func MakeHubServer(ctx context.Context, args *Args) *Server {
log.Fatal(err) log.Fatal(err)
} }
var client *elastic.Client = nil var lbcdClient *lbcd.Client = nil
if args.DaemonURL != nil && args.DaemonURL.Host != "" {
var rpcCertificate []byte
if args.DaemonCAPath != "" {
rpcCertificate, err = ioutil.ReadFile(args.DaemonCAPath)
if err != nil {
log.Fatalf("failed to read SSL certificate from path: %v", args.DaemonCAPath)
}
}
log.Warnf("connecting to lbcd daemon at %v...", args.DaemonURL.Host)
password, _ := args.DaemonURL.User.Password()
cfg := &lbcd.ConnConfig{
Host: args.DaemonURL.Host,
User: args.DaemonURL.User.Username(),
Pass: password,
HTTPPostMode: true,
DisableTLS: rpcCertificate == nil,
Certificates: rpcCertificate,
}
lbcdClient, err = lbcd.New(cfg, nil)
if err != nil {
log.Fatalf("lbcd daemon connection failed: %v", err)
}
}
var esClient *elastic.Client = nil
if !args.DisableEs { if !args.DisableEs {
esUrl := args.EsHost + ":" + args.EsPort esUrl := args.EsHost + ":" + fmt.Sprintf("%d", args.EsPort)
opts := []elastic.ClientOptionFunc{ opts := []elastic.ClientOptionFunc{
elastic.SetSniff(true), elastic.SetSniff(true),
elastic.SetSnifferTimeoutStartup(time.Second * 60), elastic.SetSnifferTimeoutStartup(time.Second * 60),
@ -240,9 +291,9 @@ func MakeHubServer(ctx context.Context, args *Args) *Server {
elastic.SetURL(esUrl), elastic.SetURL(esUrl),
} }
if args.Debug { if args.Debug {
opts = append(opts, elastic.SetTraceLog(log.New(os.Stderr, "[[ELASTIC]]", 0))) opts = append(opts, elastic.SetTraceLog(golog.New(os.Stderr, "[[ELASTIC]]", 0)))
} }
client, err = elastic.NewClient(opts...) esClient, err = elastic.NewClient(opts...)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -266,18 +317,18 @@ func MakeHubServer(ctx context.Context, args *Args) *Server {
//TODO: is this the right place to load the db? //TODO: is this the right place to load the db?
var myDB *db.ReadOnlyDBColumnFamily var myDB *db.ReadOnlyDBColumnFamily
// var dbShutdown = func() {}
if !args.DisableResolve { if !args.DisableResolve {
myDB, err = LoadDatabase(args) myDB, err = LoadDatabase(args, grp)
if err != nil { if err != nil {
logrus.Warning(err) log.Warning(err)
} }
} }
// Determine which chain to use based on db and cli values
dbChain := (*chaincfg.Params)(nil) dbChain := (*chaincfg.Params)(nil)
if myDB != nil && myDB.LastState != nil && myDB.LastState.Genesis != nil { if myDB != nil && myDB.LastState != nil {
// The chain params can be inferred from DBStateValue. // The chain params can be inferred from DBStateValue.
switch *myDB.LastState.Genesis { switch myDB.LastState.Genesis.Hash {
case *chaincfg.MainNetParams.GenesisHash: case *chaincfg.MainNetParams.GenesisHash:
dbChain = &chaincfg.MainNetParams dbChain = &chaincfg.MainNetParams
case *chaincfg.TestNet3Params.GenesisHash: case *chaincfg.TestNet3Params.GenesisHash:
@ -300,7 +351,7 @@ func MakeHubServer(ctx context.Context, args *Args) *Server {
chain := chaincfg.MainNetParams chain := chaincfg.MainNetParams
if dbChain != nil && cliChain != nil { if dbChain != nil && cliChain != nil {
if dbChain != cliChain { if dbChain != cliChain {
logrus.Warnf("network: %v (from db) conflicts with %v (from cli)", dbChain.Name, cliChain.Name) log.Warnf("network: %v (from db) conflicts with %v (from cli)", dbChain.Name, cliChain.Name)
} }
chain = *dbChain chain = *dbChain
} else if dbChain != nil { } else if dbChain != nil {
@ -308,7 +359,11 @@ func MakeHubServer(ctx context.Context, args *Args) *Server {
} else if cliChain != nil { } else if cliChain != nil {
chain = *cliChain chain = *cliChain
} }
logrus.Infof("network: %v", chain.Name) log.Infof("network: %v", chain.Name)
args.GenesisHash = chain.GenesisHash.String()
sessionGrp := stop.New(grp)
s := &Server{ s := &Server{
GrpcServer: grpcServer, GrpcServer: grpcServer,
@ -317,7 +372,8 @@ func MakeHubServer(ctx context.Context, args *Args) *Server {
WeirdCharsRe: weirdCharsRe, WeirdCharsRe: weirdCharsRe,
DB: myDB, DB: myDB,
Chain: &chain, Chain: &chain,
EsClient: client, DaemonClient: lbcdClient,
EsClient: esClient,
QueryCache: cache, QueryCache: cache,
S256: &s256, S256: &s256,
LastRefreshCheck: time.Now(), LastRefreshCheck: time.Now(),
@ -332,28 +388,39 @@ func MakeHubServer(ctx context.Context, args *Args) *Server {
ExternalIP: net.IPv4(127, 0, 0, 1), ExternalIP: net.IPv4(127, 0, 0, 1),
HeightSubs: make(map[net.Addr]net.Conn), HeightSubs: make(map[net.Addr]net.Conn),
HeightSubsMut: sync.RWMutex{}, HeightSubsMut: sync.RWMutex{},
NotifierChan: make(chan interface{}), NotifierChan: make(chan interface{}, 1),
sessionManager: newSessionManager(myDB, &chain, args.MaxSessions, args.SessionTimeout), Grp: grp,
sessionManager: nil,
} }
// FIXME: HACK
s.sessionManager = newSessionManager(s, myDB, args, sessionGrp, &chain, lbcdClient)
// Start up our background services // Start up our background services
if !args.DisableResolve && !args.DisableRocksDBRefresh { if !args.DisableResolve && !args.DisableRocksDBRefresh {
logrus.Info("Running detect changes") log.Info("Running detect changes")
myDB.RunDetectChanges(s.NotifierChan) myDB.RunDetectChanges(s.NotifierChan)
} }
if !args.DisableBlockingAndFiltering { if !args.DisableBlockingAndFiltering {
myDB.RunGetBlocksAndFilters() myDB.RunGetBlocksAndFilters()
} }
if !args.DisableStartPrometheus { if !args.DisableStartPrometheus {
go s.prometheusEndpoint(s.Args.PrometheusPort, "metrics") go s.prometheusEndpoint(fmt.Sprintf("%d", s.Args.PrometheusPort), "metrics")
} }
if !args.DisableStartUDP { if !args.DisableStartUDP {
go func() { go func() {
err := s.UDPServer() err := s.UDPServer(s.Args.Port)
if err != nil { if err != nil {
log.Println("UDP Server failed!", err) log.Errorf("UDP Server (%d) failed! %v", s.Args.Port, err)
} }
}() }()
if s.Args.JSONRPCPort != 0 {
go func() {
err := s.UDPServer(s.Args.JSONRPCPort)
if err != nil {
log.Errorf("UDP Server (%d) failed! %v", s.Args.JSONRPCPort, err)
}
}()
}
} }
if !args.DisableStartNotifier { if !args.DisableStartNotifier {
go func() { go func() {
@ -394,7 +461,7 @@ func MakeHubServer(ctx context.Context, args *Args) *Server {
// for this hub to allow for metric tracking. // for this hub to allow for metric tracking.
func (s *Server) prometheusEndpoint(port string, endpoint string) { func (s *Server) prometheusEndpoint(port string, endpoint string) {
http.Handle("/"+endpoint, promhttp.Handler()) http.Handle("/"+endpoint, promhttp.Handler())
log.Println(fmt.Sprintf("listening on :%s /%s", port, endpoint)) log.Printf("listening on :%s /%s\n", port, endpoint)
err := http.ListenAndServe(":"+port, nil) err := http.ListenAndServe(":"+port, nil)
log.Fatalln("Shouldn't happen??!?!", err) log.Fatalln("Shouldn't happen??!?!", err)
} }
@ -552,7 +619,7 @@ func InternalResolve(urls []string, DB *db.ReadOnlyDBColumnFamily) (*pb.Outputs,
BlockedTotal: 0, //TODO BlockedTotal: 0, //TODO
} }
logrus.Warn(res) log.Warn(res)
return res, nil return res, nil
} }

View file

@ -1,5 +1,11 @@
package server package server
import (
"github.com/lbryio/herald.go/db"
"github.com/lbryio/lbcd/chaincfg"
"github.com/lbryio/lbry.go/v3/extras/stop"
)
func (s *Server) AddPeerExported() func(*Peer, bool, bool) error { func (s *Server) AddPeerExported() func(*Peer, bool, bool) error {
return s.addPeer return s.addPeer
} }
@ -7,3 +13,7 @@ func (s *Server) AddPeerExported() func(*Peer, bool, bool) error {
func (s *Server) GetNumPeersExported() func() int64 { func (s *Server) GetNumPeersExported() func() int64 {
return s.getNumPeers return s.getNumPeers
} }
func NewSessionManagerExported(server *Server, db *db.ReadOnlyDBColumnFamily, args *Args, grp *stop.Group, chain *chaincfg.Params) *sessionManager {
return newSessionManager(server, db, args, grp, chain, nil)
}

View file

@ -1,7 +1,9 @@
package server package server
import ( import (
"bytes"
"encoding/hex" "encoding/hex"
"encoding/json"
"fmt" "fmt"
"net" "net"
"net/rpc" "net/rpc"
@ -14,12 +16,15 @@ import (
"github.com/lbryio/herald.go/db" "github.com/lbryio/herald.go/db"
"github.com/lbryio/herald.go/internal" "github.com/lbryio/herald.go/internal"
"github.com/lbryio/lbcd/chaincfg" "github.com/lbryio/lbcd/chaincfg"
"github.com/lbryio/lbcd/chaincfg/chainhash"
lbcd "github.com/lbryio/lbcd/rpcclient"
"github.com/lbryio/lbcd/wire"
"github.com/lbryio/lbry.go/v3/extras/stop"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
type headerNotification struct { type headerNotification struct {
internal.HeightHash internal.HeightHash
blockHeader [HEADER_SIZE]byte
blockHeaderElectrum *BlockHeaderElectrum blockHeaderElectrum *BlockHeaderElectrum
blockHeaderStr string blockHeaderStr string
} }
@ -30,6 +35,11 @@ type hashXNotification struct {
statusStr string statusStr string
} }
type peerNotification struct {
address string
port string
}
type session struct { type session struct {
id uintptr id uintptr
addr net.Addr addr net.Addr
@ -38,6 +48,8 @@ type session struct {
hashXSubs map[[HASHX_LEN]byte]string hashXSubs map[[HASHX_LEN]byte]string
// headersSub indicates header subscription // headersSub indicates header subscription
headersSub bool headersSub bool
// peersSub indicates peer subscription
peersSub bool
// headersSubRaw indicates the header subscription mode // headersSubRaw indicates the header subscription mode
headersSubRaw bool headersSubRaw bool
// client provides the ability to send notifications // client provides the ability to send notifications
@ -52,18 +64,17 @@ type session struct {
func (s *session) doNotify(notification interface{}) { func (s *session) doNotify(notification interface{}) {
var method string var method string
var params interface{} var params interface{}
switch notification.(type) { switch note := notification.(type) {
case headerNotification: case headerNotification:
if !s.headersSub { if !s.headersSub {
return return
} }
note, _ := notification.(headerNotification)
heightHash := note.HeightHash heightHash := note.HeightHash
method = "blockchain.headers.subscribe" method = "blockchain.headers.subscribe"
if s.headersSubRaw { if s.headersSubRaw {
header := note.blockHeaderStr header := note.blockHeaderStr
if len(header) == 0 { if len(header) == 0 {
header = hex.EncodeToString(note.blockHeader[:]) header = hex.EncodeToString(note.BlockHeader[:])
} }
params = &HeadersSubscribeRawResp{ params = &HeadersSubscribeRawResp{
Hex: header, Hex: header,
@ -72,12 +83,11 @@ func (s *session) doNotify(notification interface{}) {
} else { } else {
header := note.blockHeaderElectrum header := note.blockHeaderElectrum
if header == nil { // not initialized if header == nil { // not initialized
header = newBlockHeaderElectrum(&note.blockHeader, uint32(heightHash.Height)) header = newBlockHeaderElectrum((*[HEADER_SIZE]byte)(note.BlockHeader), uint32(heightHash.Height))
} }
params = header params = header
} }
case hashXNotification: case hashXNotification:
note, _ := notification.(hashXNotification)
orig, ok := s.hashXSubs[note.hashX] orig, ok := s.hashXSubs[note.hashX]
if !ok { if !ok {
return return
@ -92,6 +102,13 @@ func (s *session) doNotify(notification interface{}) {
status = hex.EncodeToString(note.status) status = hex.EncodeToString(note.status)
} }
params = []string{orig, status} params = []string{orig, status}
case peerNotification:
if !s.peersSub {
return
}
method = "server.peers.subscribe"
params = []string{note.address, note.port}
default: default:
log.Warnf("unknown notification type: %v", notification) log.Warnf("unknown notification type: %v", notification)
return return
@ -116,32 +133,44 @@ type sessionManager struct {
// sessionsMut protects sessions, headerSubs, hashXSubs state // sessionsMut protects sessions, headerSubs, hashXSubs state
sessionsMut sync.RWMutex sessionsMut sync.RWMutex
sessions sessionMap sessions sessionMap
sessionsWait sync.WaitGroup // sessionsWait sync.WaitGroup
grp *stop.Group
sessionsMax int sessionsMax int
sessionTimeout time.Duration sessionTimeout time.Duration
manageTicker *time.Ticker manageTicker *time.Ticker
db *db.ReadOnlyDBColumnFamily db *db.ReadOnlyDBColumnFamily
args *Args
server *Server
chain *chaincfg.Params chain *chaincfg.Params
lbcd *lbcd.Client
// peerSubs are sessions subscribed via 'blockchain.peers.subscribe'
peerSubs sessionMap
// headerSubs are sessions subscribed via 'blockchain.headers.subscribe' // headerSubs are sessions subscribed via 'blockchain.headers.subscribe'
headerSubs sessionMap headerSubs sessionMap
// hashXSubs are sessions subscribed via 'blockchain.{address,scripthash}.subscribe' // hashXSubs are sessions subscribed via 'blockchain.{address,scripthash}.subscribe'
hashXSubs map[[HASHX_LEN]byte]sessionMap hashXSubs map[[HASHX_LEN]byte]sessionMap
} }
func newSessionManager(db *db.ReadOnlyDBColumnFamily, chain *chaincfg.Params, sessionsMax, sessionTimeout int) *sessionManager { func newSessionManager(server *Server, db *db.ReadOnlyDBColumnFamily, args *Args, grp *stop.Group, chain *chaincfg.Params, lbcd *lbcd.Client) *sessionManager {
return &sessionManager{ return &sessionManager{
sessions: make(sessionMap), sessions: make(sessionMap),
sessionsMax: sessionsMax, grp: grp,
sessionTimeout: time.Duration(sessionTimeout) * time.Second, sessionsMax: args.MaxSessions,
manageTicker: time.NewTicker(time.Duration(max(5, sessionTimeout/20)) * time.Second), sessionTimeout: time.Duration(args.SessionTimeout) * time.Second,
manageTicker: time.NewTicker(time.Duration(max(5, args.SessionTimeout/20)) * time.Second),
db: db, db: db,
args: args,
server: server,
chain: chain, chain: chain,
lbcd: lbcd,
peerSubs: make(sessionMap),
headerSubs: make(sessionMap), headerSubs: make(sessionMap),
hashXSubs: make(map[[HASHX_LEN]byte]sessionMap), hashXSubs: make(map[[HASHX_LEN]byte]sessionMap),
} }
} }
func (sm *sessionManager) start() { func (sm *sessionManager) start() {
sm.grp.Add(1)
go sm.manage() go sm.manage()
} }
@ -168,7 +197,13 @@ func (sm *sessionManager) manage() {
} }
sm.sessionsMut.Unlock() sm.sessionsMut.Unlock()
// Wait for next management clock tick. // Wait for next management clock tick.
<-sm.manageTicker.C select {
case <-sm.grp.Ch():
sm.grp.Done()
return
case <-sm.manageTicker.C:
continue
}
} }
} }
@ -190,14 +225,28 @@ func (sm *sessionManager) addSession(conn net.Conn) *session {
// each request and update subscriptions. // each request and update subscriptions.
s1 := rpc.NewServer() s1 := rpc.NewServer()
// Register "blockchain.claimtrie.*"" handlers. // Register "server.{features,banner,version}" handlers.
claimtrieSvc := &ClaimtrieService{sm.db} serverSvc := &ServerService{sm.args}
err := s1.RegisterName("blockchain.claimtrie", claimtrieSvc) err := s1.RegisterName("server", serverSvc)
if err != nil { if err != nil {
log.Errorf("RegisterService: %v\n", err) log.Errorf("RegisterName: %v\n", err)
} }
// Register other "blockchain.{block,address,scripthash}.*" handlers. // Register "server.peers" handlers.
peersSvc := &PeersService{Server: sm.server}
err = s1.RegisterName("server.peers", peersSvc)
if err != nil {
log.Errorf("RegisterName: %v\n", err)
}
// Register "blockchain.claimtrie.*"" handlers.
claimtrieSvc := &ClaimtrieService{sm.db, sm.server}
err = s1.RegisterName("blockchain.claimtrie", claimtrieSvc)
if err != nil {
log.Errorf("RegisterName: %v\n", err)
}
// Register "blockchain.{block,address,scripthash,transaction}.*" handlers.
blockchainSvc := &BlockchainBlockService{sm.db, sm.chain} blockchainSvc := &BlockchainBlockService{sm.db, sm.chain}
err = s1.RegisterName("blockchain.block", blockchainSvc) err = s1.RegisterName("blockchain.block", blockchainSvc)
if err != nil { if err != nil {
@ -219,12 +268,17 @@ func (sm *sessionManager) addSession(conn net.Conn) *session {
log.Errorf("RegisterName: %v\n", err) log.Errorf("RegisterName: %v\n", err)
goto fail goto fail
} }
err = s1.RegisterName("blockchain.transaction", &BlockchainTransactionService{sm.db, sm.chain, sm})
if err != nil {
log.Errorf("RegisterName: %v\n", err)
goto fail
}
sm.sessionsWait.Add(1) sm.grp.Add(1)
go func() { go func() {
s1.ServeCodec(&SessionServerCodec{jsonrpc.NewServerCodec(conn), sess}) s1.ServeCodec(&sessionServerCodec{jsonrpc.NewServerCodec(newJsonPatchingCodec(conn)), sess})
log.Infof("session %v goroutine exit", sess.addr.String()) log.Infof("session %v goroutine exit", sess.addr.String())
sm.sessionsWait.Done() sm.grp.Done()
}() }()
return sess return sess
@ -255,6 +309,27 @@ func (sm *sessionManager) removeSessionLocked(sess *session) {
sess.conn.Close() sess.conn.Close()
} }
func (sm *sessionManager) broadcastTx(rawTx []byte) (*chainhash.Hash, error) {
var msgTx wire.MsgTx
err := msgTx.Deserialize(bytes.NewReader(rawTx))
if err != nil {
return nil, err
}
return sm.lbcd.SendRawTransaction(&msgTx, false)
}
func (sm *sessionManager) peersSubscribe(sess *session, subscribe bool) {
sm.sessionsMut.Lock()
defer sm.sessionsMut.Unlock()
if subscribe {
sm.peerSubs[sess.id] = sess
sess.peersSub = true
return
}
delete(sm.peerSubs, sess.id)
sess.peersSub = false
}
func (sm *sessionManager) headersSubscribe(sess *session, raw bool, subscribe bool) { func (sm *sessionManager) headersSubscribe(sess *session, raw bool, subscribe bool) {
sm.sessionsMut.Lock() sm.sessionsMut.Lock()
defer sm.sessionsMut.Unlock() defer sm.sessionsMut.Unlock()
@ -294,18 +369,26 @@ func (sm *sessionManager) hashXSubscribe(sess *session, hashX []byte, original s
} }
func (sm *sessionManager) doNotify(notification interface{}) { func (sm *sessionManager) doNotify(notification interface{}) {
switch note := notification.(type) {
case internal.HeightHash:
// The HeightHash notification translates to headerNotification.
notification = &headerNotification{HeightHash: note}
}
sm.sessionsMut.RLock() sm.sessionsMut.RLock()
var subsCopy sessionMap var subsCopy sessionMap
switch notification.(type) { switch note := notification.(type) {
case headerNotification: case headerNotification:
note, _ := notification.(headerNotification) log.Infof("header notification @ %#v", note)
subsCopy = sm.headerSubs subsCopy = sm.headerSubs
if len(subsCopy) > 0 { if len(subsCopy) > 0 {
note.blockHeaderElectrum = newBlockHeaderElectrum(&note.blockHeader, uint32(note.Height)) hdr := [HEADER_SIZE]byte{}
note.blockHeaderStr = hex.EncodeToString(note.blockHeader[:]) copy(hdr[:], note.BlockHeader)
note.blockHeaderElectrum = newBlockHeaderElectrum(&hdr, uint32(note.Height))
note.blockHeaderStr = hex.EncodeToString(note.BlockHeader[:])
} }
case hashXNotification: case hashXNotification:
note, _ := notification.(hashXNotification) log.Infof("hashX notification @ %#v", note)
hashXSubs, ok := sm.hashXSubs[note.hashX] hashXSubs, ok := sm.hashXSubs[note.hashX]
if ok { if ok {
subsCopy = hashXSubs subsCopy = hashXSubs
@ -313,6 +396,8 @@ func (sm *sessionManager) doNotify(notification interface{}) {
if len(subsCopy) > 0 { if len(subsCopy) > 0 {
note.statusStr = hex.EncodeToString(note.status) note.statusStr = hex.EncodeToString(note.status)
} }
case peerNotification:
subsCopy = sm.peerSubs
default: default:
log.Warnf("unknown notification type: %v", notification) log.Warnf("unknown notification type: %v", notification)
} }
@ -322,26 +407,50 @@ func (sm *sessionManager) doNotify(notification interface{}) {
for _, sess := range subsCopy { for _, sess := range subsCopy {
sess.doNotify(notification) sess.doNotify(notification)
} }
// Produce secondary hashXNotification(s) corresponding to the headerNotification.
switch note := notification.(type) {
case headerNotification:
touched, err := sm.db.GetTouchedHashXs(uint32(note.Height))
if err != nil {
log.Errorf("failed to get touched hashXs at height %v, error: %v", note.Height, err)
break
}
for _, hashX := range touched {
hashXstatus, err := sm.db.GetStatus(hashX)
if err != nil {
log.Errorf("failed to get status of hashX %v, error: %v", hashX, err)
continue
}
note2 := hashXNotification{}
copy(note2.hashX[:], hashX)
note2.status = hashXstatus
sm.doNotify(note2)
}
}
} }
type SessionServerCodec struct { type sessionServerCodec struct {
rpc.ServerCodec rpc.ServerCodec
sess *session sess *session
} }
// ReadRequestHeader provides ability to rewrite the incoming // ReadRequestHeader provides ability to rewrite the incoming
// request "method" field. For example: // request "method" field. For example:
//
// blockchain.block.get_header -> blockchain.block.Get_header // blockchain.block.get_header -> blockchain.block.Get_header
// blockchain.address.listunspent -> blockchain.address.Listunspent // blockchain.address.listunspent -> blockchain.address.Listunspent
//
// This makes the "method" string compatible with rpc.Server // This makes the "method" string compatible with rpc.Server
// requirements. // requirements.
func (c *SessionServerCodec) ReadRequestHeader(req *rpc.Request) error { func (c *sessionServerCodec) ReadRequestHeader(req *rpc.Request) error {
log.Infof("receive header from %v", c.sess.addr.String()) log.Infof("from %v receive header", c.sess.addr.String())
err := c.ServerCodec.ReadRequestHeader(req) err := c.ServerCodec.ReadRequestHeader(req)
if err != nil { if err != nil {
log.Warnf("error: %v", err) log.Warnf("error: %v", err)
return err return err
} }
log.Infof("from %v receive header: %#v", c.sess.addr.String(), *req)
rawMethod := req.ServiceMethod rawMethod := req.ServiceMethod
parts := strings.Split(rawMethod, ".") parts := strings.Split(rawMethod, ".")
if len(parts) < 2 { if len(parts) < 2 {
@ -358,20 +467,21 @@ func (c *SessionServerCodec) ReadRequestHeader(req *rpc.Request) error {
} }
// ReadRequestBody wraps the regular implementation, but updates session stats too. // ReadRequestBody wraps the regular implementation, but updates session stats too.
func (c *SessionServerCodec) ReadRequestBody(params any) error { func (c *sessionServerCodec) ReadRequestBody(params any) error {
log.Infof("from %v receive body", c.sess.addr.String())
err := c.ServerCodec.ReadRequestBody(params) err := c.ServerCodec.ReadRequestBody(params)
if err != nil { if err != nil {
log.Warnf("error: %v", err) log.Warnf("error: %v", err)
return err return err
} }
log.Infof("receive body from %v", c.sess.addr.String()) log.Infof("from %v receive body: %#v", c.sess.addr.String(), params)
// Bump last receive time. // Bump last receive time.
c.sess.lastRecv = time.Now() c.sess.lastRecv = time.Now()
return err return err
} }
// WriteResponse wraps the regular implementation, but updates session stats too. // WriteResponse wraps the regular implementation, but updates session stats too.
func (c *SessionServerCodec) WriteResponse(resp *rpc.Response, reply any) error { func (c *sessionServerCodec) WriteResponse(resp *rpc.Response, reply any) error {
log.Infof("respond to %v", c.sess.addr.String()) log.Infof("respond to %v", c.sess.addr.String())
err := c.ServerCodec.WriteResponse(resp, reply) err := c.ServerCodec.WriteResponse(resp, reply)
if err != nil { if err != nil {
@ -381,3 +491,141 @@ func (c *SessionServerCodec) WriteResponse(resp *rpc.Response, reply any) error
c.sess.lastSend = time.Now() c.sess.lastSend = time.Now()
return err return err
} }
// serverRequest is a duplicate of serverRequest from
// net/rpc/jsonrpc/server.go with an added Version which
// we can check.
type serverRequest struct {
Version string `json:"jsonrpc"`
Method string `json:"method"`
Params *json.RawMessage `json:"params"`
Id *json.RawMessage `json:"id"`
}
// serverResponse is a duplicate of serverResponse from
// net/rpc/jsonrpc/server.go with an added Version which
// we can set at will.
type serverResponse struct {
Version string `json:"jsonrpc"`
Id *json.RawMessage `json:"id"`
Result any `json:"result,omitempty"`
Error any `json:"error,omitempty"`
}
// jsonPatchingCodec is able to intercept the JSON requests/responses
// and tweak them. Currently, it appears we need to make several changes:
// 1) add "jsonrpc": "2.0" (or "jsonrpc": "1.0") in response
// 2) add newline to frame response
// 3) add "params": [] when "params" is missing
// 4) replace params ["arg1", "arg2", ...] with [["arg1", "arg2", ...]]
type jsonPatchingCodec struct {
conn net.Conn
inBuffer *bytes.Buffer
dec *json.Decoder
enc *json.Encoder
outBuffer *bytes.Buffer
}
func newJsonPatchingCodec(conn net.Conn) *jsonPatchingCodec {
buf1, buf2 := bytes.NewBuffer(nil), bytes.NewBuffer(nil)
return &jsonPatchingCodec{
conn: conn,
inBuffer: buf1,
dec: json.NewDecoder(buf1),
enc: json.NewEncoder(buf2),
outBuffer: buf2,
}
}
func (c *jsonPatchingCodec) Read(p []byte) (n int, err error) {
if c.outBuffer.Len() > 0 {
// Return remaining decoded bytes.
return c.outBuffer.Read(p)
}
// Buffer contents consumed. Try to decode more JSON.
// Read until framing newline. This allows us to print the raw request.
for !bytes.ContainsAny(c.inBuffer.Bytes(), "\n") {
var buf [1024]byte
n, err = c.conn.Read(buf[:])
if err != nil {
return 0, err
}
c.inBuffer.Write(buf[:n])
}
log.Infof("raw request: %v", c.inBuffer.String())
var req serverRequest
err = c.dec.Decode(&req)
if err != nil {
return 0, err
}
if req.Params != nil {
n := len(*req.Params)
if n < 2 || (*req.Params)[0] != '[' && (*req.Params)[n-1] != ']' {
// This is an error, but we're not going to try to correct it.
goto encode
}
// FIXME: The heuristics here don't cover all possibilities.
// For example: [{obj1}, {obj2}] or ["foo,bar"] would not
// be handled correctly.
bracketed := (*req.Params)[1 : n-1]
n = len(bracketed)
if n > 1 && (bracketed[0] == '{' || bracketed[0] == '[') {
// Probable single object or list argument.
goto encode
}
// The params look like ["arg1", "arg2", "arg3", ...].
// We're in trouble because our jsonrpc library does not
// handle this. So pack these args in an inner list.
// The handler method will receive ONE list argument.
params := json.RawMessage(fmt.Sprintf("[[%s]]", bracketed))
req.Params = &params
} else {
// Add empty argument list if params omitted.
params := json.RawMessage("[]")
req.Params = &params
}
encode:
// Encode the request. This allows us to print the patched request.
buf, err := json.Marshal(req)
if err != nil {
return 0, err
}
log.Infof("patched request: %v", string(buf))
err = c.enc.Encode(req)
if err != nil {
return 0, err
}
return c.outBuffer.Read(p)
}
func (c *jsonPatchingCodec) Write(p []byte) (n int, err error) {
log.Infof("raw response: %v", string(p))
var resp serverResponse
err = json.Unmarshal(p, &resp)
if err != nil {
return 0, err
}
// Add "jsonrpc": "2.0" if missing.
if len(resp.Version) == 0 {
resp.Version = "2.0"
}
buf, err := json.Marshal(resp)
if err != nil {
return 0, err
}
log.Infof("patched response: %v", string(buf))
// Add newline for framing.
return c.conn.Write(append(buf, '\n'))
}
func (c *jsonPatchingCodec) Close() error {
return c.conn.Close()
}

View file

@ -219,8 +219,9 @@ func UDPPing(ip, port string) (*SPVPong, error) {
// UDPServer is a goroutine that starts an udp server that implements the hubs // UDPServer is a goroutine that starts an udp server that implements the hubs
// Ping/Pong protocol to find out about each other without making full TCP // Ping/Pong protocol to find out about each other without making full TCP
// connections. // connections.
func (s *Server) UDPServer() error { func (s *Server) UDPServer(port int) error {
address := ":" + s.Args.Port address := ":" + strconv.Itoa(port)
tip := make([]byte, 32) tip := make([]byte, 32)
addr, err := net.ResolveUDPAddr("udp", address) addr, err := net.ResolveUDPAddr("udp", address)
if err != nil { if err != nil {

View file

@ -8,12 +8,13 @@ import (
"os" "os"
"os/signal" "os/signal"
"github.com/lbryio/lbry.go/v3/extras/stop"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
// shutdownRequestChannel is used to initiate shutdown from one of the // shutdownRequestChannel is used to initiate shutdown from one of the
// subsystems using the same code paths as when an interrupt signal is received. // subsystems using the same code paths as when an interrupt signal is received.
var shutdownRequestChannel = make(chan struct{}) var shutdownRequestChannel = make(stop.Chan)
// interruptSignals defines the default signals to catch in order to do a proper // interruptSignals defines the default signals to catch in order to do a proper
// shutdown. This may be modified during init depending on the platform. // shutdown. This may be modified during init depending on the platform.
@ -48,7 +49,6 @@ func interruptListener() <-chan struct{} {
case sig := <-interruptChannel: case sig := <-interruptChannel:
log.Infof("Received signal (%s). Already "+ log.Infof("Received signal (%s). Already "+
"shutting down...", sig) "shutting down...", sig)
case <-shutdownRequestChannel: case <-shutdownRequestChannel:
log.Info("Shutdown requested. Already " + log.Info("Shutdown requested. Already " +
"shutting down...") "shutting down...")

40
testdata/f.csv vendored
View file

@ -1,21 +1,21 @@
f, f,
660d649ba1defa4ab5ab71f8a977d7f7cedb11056e,919be5811844077f4660af66afa9a59a5ad17cf5c541524e780fe2137bfa250c 660d649ba1defa4ab5ab71f8,919be5811844077f4660af66afa9a59a5ad17cf5c541524e780fe2137bfa250c
6623c6895027f70a5330bbcb1153d635abcb4d5224,8dadcde1a6f676d4004eacd399f825006ddf136d1e92b1c92113377b3e1741b4 6623c6895027f70a5330bbcb,8dadcde1a6f676d4004eacd399f825006ddf136d1e92b1c92113377b3e1741b4
664f095b24484ebce8f31fbf008e63cc4aa163d401,c0c4a751f569c1f9c01531f57ba674b2ad2338d9c08f9e9fc85b0209d15466b2 664f095b24484ebce8f31fbf,c0c4a751f569c1f9c01531f57ba674b2ad2338d9c08f9e9fc85b0209d15466b2
665201a38de7d7243df717c9f9279cdd30105f0f77,d9293577cc0d51fe3a5bee78fea9b2b2222e6c2aa0d26a4ef4bfb7dd095587e8 665201a38de7d7243df717c9,d9293577cc0d51fe3a5bee78fea9b2b2222e6c2aa0d26a4ef4bfb7dd095587e8
665328b2449e537b0ca4733f87ac5ebcdf033c5ebd,624f80a361e47c7eb1b815e8714a40f67b4f642a5546547a3fcb5bf5593d8fab 665328b2449e537b0ca4733f,624f80a361e47c7eb1b815e8714a40f67b4f642a5546547a3fcb5bf5593d8fab
665ec882021f55b1fbaa5fad00df5c5d07633b7af3,1e917fbc04385290d654f711bdef12773dd54b6b5ea26fe2a9d58ed051f2cb7f 665ec882021f55b1fbaa5fad,1e917fbc04385290d654f711bdef12773dd54b6b5ea26fe2a9d58ed051f2cb7f
6671c131cd433750ba6d3908150ca4910841164b74,a2ebfbdf7a23024c340a45f201645aa46f48bc1fdd8d34ed83fcffbf1ee90523 6671c131cd433750ba6d3908,a2ebfbdf7a23024c340a45f201645aa46f48bc1fdd8d34ed83fcffbf1ee90523
667fb93d9ae877ba11f337f21422b0679852580802,4710649e06619e13250754937e9c17c20b07434751171aac2f2f78b184aa0146 667fb93d9ae877ba11f337f2,4710649e06619e13250754937e9c17c20b07434751171aac2f2f78b184aa0146
668ed5f39a5db059dc3261377f2a47728f7a357d33,8dd8ca749b87f43e290904749a546fe319c9d53e765f065bb8beb234a117655e 668ed5f39a5db059dc326137,8dd8ca749b87f43e290904749a546fe319c9d53e765f065bb8beb234a117655e
66951782f6ba94f2b71e46d0cc4a2411b14d81eb70,4f5c9434dd0886c57c2530991cebd973e1b50d5ba8fcfc019e54561217a49bbb 66951782f6ba94f2b71e46d0,4f5c9434dd0886c57c2530991cebd973e1b50d5ba8fcfc019e54561217a49bbb
66970565dfe2b01cad49b73a085a3c3f7a3be61c4c,f6ca0ae18c896d9bc97c5a9d0c3a06256485f59c77fb91780b213f933b80f48b 66970565dfe2b01cad49b73a,f6ca0ae18c896d9bc97c5a9d0c3a06256485f59c77fb91780b213f933b80f48b
669f6a30a6712062da0cc27181845c04d7430abf73,5c6604bfd63b871daceb7893dd618850458974fe4108871c1a1323fb8ae34e4e 669f6a30a6712062da0cc271,5c6604bfd63b871daceb7893dd618850458974fe4108871c1a1323fb8ae34e4e
66a9a7b89b78553592acf3dfc417c1d7654dab3273,0561f28c3a5ea0027ecb3c53fa068772a6b7cb73d23104a14f9aba8cd1f070a2 66a9a7b89b78553592acf3df,0561f28c3a5ea0027ecb3c53fa068772a6b7cb73d23104a14f9aba8cd1f070a2
66aba81567ba48f001f843f01354d575c2e2687847,b0f6ae2c1db8263f7e11fc79423109e718d1f3c30bd123c4243401b5e4f1fee6 66aba81567ba48f001f843f0,b0f6ae2c1db8263f7e11fc79423109e718d1f3c30bd123c4243401b5e4f1fee6
66b569cc3d28be4466fb28d147f66d6d8769598964,ecee392ad8217f325508ba38d280436fb0a520b79a9627e5e18197bf55540885 66b569cc3d28be4466fb28d1,ecee392ad8217f325508ba38d280436fb0a520b79a9627e5e18197bf55540885
66d4662cd100d66055917d6342d48f49d948fcc255,5762a8ac767fa30d2ca76db7081f8a2e4f5da4f0bf92d29e1322da9a154cc3d6 66d4662cd100d66055917d63,5762a8ac767fa30d2ca76db7081f8a2e4f5da4f0bf92d29e1322da9a154cc3d6
66d6fa6ac71d0255dd3f185de6480d5b4316b6b050,5fc193e5e51b3bd8e95f4eb9df63236da7abf678fc47c0b339ceb5c127d0f488 66d6fa6ac71d0255dd3f185d,5fc193e5e51b3bd8e95f4eb9df63236da7abf678fc47c0b339ceb5c127d0f488
66e5b6c7c231a02a32eedd8383a5750fd135244a03,58c70ffbfada12550f24bf7931cee06eb2e267dec3560e2e46843e383415f163 66e5b6c7c231a02a32eedd83,58c70ffbfada12550f24bf7931cee06eb2e267dec3560e2e46843e383415f163
66e673cce02c2163f756491ef05d7535ceb578e215,b8db43d1f6e62361e2e3b8fa765f79c08ddfb3035caa06f8250d6d1b063a7140 66e673cce02c2163f756491e,b8db43d1f6e62361e2e3b8fa765f79c08ddfb3035caa06f8250d6d1b063a7140
66fc4ad75184e6029c805d9494eed4e81be770c002,fc7ac5e785f73732d95183d6bdc3423d41a074fc3f04b1304bae1efa652edde1 66fc4ad75184e6029c805d94,fc7ac5e785f73732d95183d6bdc3423d41a074fc3f04b1304bae1efa652edde1

1 f
2 660d649ba1defa4ab5ab71f8a977d7f7cedb11056e 660d649ba1defa4ab5ab71f8 919be5811844077f4660af66afa9a59a5ad17cf5c541524e780fe2137bfa250c
3 6623c6895027f70a5330bbcb1153d635abcb4d5224 6623c6895027f70a5330bbcb 8dadcde1a6f676d4004eacd399f825006ddf136d1e92b1c92113377b3e1741b4
4 664f095b24484ebce8f31fbf008e63cc4aa163d401 664f095b24484ebce8f31fbf c0c4a751f569c1f9c01531f57ba674b2ad2338d9c08f9e9fc85b0209d15466b2
5 665201a38de7d7243df717c9f9279cdd30105f0f77 665201a38de7d7243df717c9 d9293577cc0d51fe3a5bee78fea9b2b2222e6c2aa0d26a4ef4bfb7dd095587e8
6 665328b2449e537b0ca4733f87ac5ebcdf033c5ebd 665328b2449e537b0ca4733f 624f80a361e47c7eb1b815e8714a40f67b4f642a5546547a3fcb5bf5593d8fab
7 665ec882021f55b1fbaa5fad00df5c5d07633b7af3 665ec882021f55b1fbaa5fad 1e917fbc04385290d654f711bdef12773dd54b6b5ea26fe2a9d58ed051f2cb7f
8 6671c131cd433750ba6d3908150ca4910841164b74 6671c131cd433750ba6d3908 a2ebfbdf7a23024c340a45f201645aa46f48bc1fdd8d34ed83fcffbf1ee90523
9 667fb93d9ae877ba11f337f21422b0679852580802 667fb93d9ae877ba11f337f2 4710649e06619e13250754937e9c17c20b07434751171aac2f2f78b184aa0146
10 668ed5f39a5db059dc3261377f2a47728f7a357d33 668ed5f39a5db059dc326137 8dd8ca749b87f43e290904749a546fe319c9d53e765f065bb8beb234a117655e
11 66951782f6ba94f2b71e46d0cc4a2411b14d81eb70 66951782f6ba94f2b71e46d0 4f5c9434dd0886c57c2530991cebd973e1b50d5ba8fcfc019e54561217a49bbb
12 66970565dfe2b01cad49b73a085a3c3f7a3be61c4c 66970565dfe2b01cad49b73a f6ca0ae18c896d9bc97c5a9d0c3a06256485f59c77fb91780b213f933b80f48b
13 669f6a30a6712062da0cc27181845c04d7430abf73 669f6a30a6712062da0cc271 5c6604bfd63b871daceb7893dd618850458974fe4108871c1a1323fb8ae34e4e
14 66a9a7b89b78553592acf3dfc417c1d7654dab3273 66a9a7b89b78553592acf3df 0561f28c3a5ea0027ecb3c53fa068772a6b7cb73d23104a14f9aba8cd1f070a2
15 66aba81567ba48f001f843f01354d575c2e2687847 66aba81567ba48f001f843f0 b0f6ae2c1db8263f7e11fc79423109e718d1f3c30bd123c4243401b5e4f1fee6
16 66b569cc3d28be4466fb28d147f66d6d8769598964 66b569cc3d28be4466fb28d1 ecee392ad8217f325508ba38d280436fb0a520b79a9627e5e18197bf55540885
17 66d4662cd100d66055917d6342d48f49d948fcc255 66d4662cd100d66055917d63 5762a8ac767fa30d2ca76db7081f8a2e4f5da4f0bf92d29e1322da9a154cc3d6
18 66d6fa6ac71d0255dd3f185de6480d5b4316b6b050 66d6fa6ac71d0255dd3f185d 5fc193e5e51b3bd8e95f4eb9df63236da7abf678fc47c0b339ceb5c127d0f488
19 66e5b6c7c231a02a32eedd8383a5750fd135244a03 66e5b6c7c231a02a32eedd83 58c70ffbfada12550f24bf7931cee06eb2e267dec3560e2e46843e383415f163
20 66e673cce02c2163f756491ef05d7535ceb578e215 66e673cce02c2163f756491e b8db43d1f6e62361e2e3b8fa765f79c08ddfb3035caa06f8250d6d1b063a7140
21 66fc4ad75184e6029c805d9494eed4e81be770c002 66fc4ad75184e6029c805d94 fc7ac5e785f73732d95183d6bdc3423d41a074fc3f04b1304bae1efa652edde1

40
testdata/g.csv vendored
View file

@ -1,21 +1,21 @@
g, g,
6702c124856d5168381a32971d8933440a1728fc41,575696fd653a4de2f9a8c1f580cf0c229631b0f5d95fceb354cda133e2eb2d34 6702c124856d5168381a3297,575696fd653a4de2f9a8c1f580cf0c229631b0f5d95fceb354cda133e2eb2d34
6707f1511e3a2cb28493f91b85e9e4a9d9d07c86a5,ba368e0f859ee36da8701df1c0b52cbf0c0f8a4b1a91f6d0db83a408f5a937d1 6707f1511e3a2cb28493f91b,ba368e0f859ee36da8701df1c0b52cbf0c0f8a4b1a91f6d0db83a408f5a937d1
6707fd4213cae8d5342a98ba49b255fa80b2a9a6e4,bd3a44d30f66444f8732119bc7e0cf0bb47f8f0ab2840987fc06b629f3e6d3f4 6707fd4213cae8d5342a98ba,bd3a44d30f66444f8732119bc7e0cf0bb47f8f0ab2840987fc06b629f3e6d3f4
6710294a5693224a6222404ba45fd38eb2e77979a4,de35a8ea0a26d17445e2f509db23188961b5cd1229b96d2411565adf63731b5c 6710294a5693224a6222404b,de35a8ea0a26d17445e2f509db23188961b5cd1229b96d2411565adf63731b5c
6716a9f84e02143b50d9034aec126b12d7f2708cc4,5823640ae4529f8df2dab20386c887d0a1ba1ffa4583b99dff761c01f670c2fa 6716a9f84e02143b50d9034a,5823640ae4529f8df2dab20386c887d0a1ba1ffa4583b99dff761c01f670c2fa
672e51bc65c9b97d482b0b720e6cb673c41fe7b5c5,0687df449bd8cb8d8f526f4189973d084d786ab0927d81c127f56b03c61aa955 672e51bc65c9b97d482b0b72,0687df449bd8cb8d8f526f4189973d084d786ab0927d81c127f56b03c61aa955
67682620db65932047689e5eaf392d6b85be801864,b262d40758edb28d1c04fa3a24d8268990516de6846ad94d002ce55640866239 67682620db65932047689e5e,b262d40758edb28d1c04fa3a24d8268990516de6846ad94d002ce55640866239
676e8c320dbbf5eebc2969a93fbc51dd7f6062a7d1,c9e2a8e7181a70e2a488b884c8baadb4043a075c6876cb012c67fbec5aa9f615 676e8c320dbbf5eebc2969a9,c9e2a8e7181a70e2a488b884c8baadb4043a075c6876cb012c67fbec5aa9f615
6772e2ac48891ee3c2c727835702a374ad0cb70fd6,985a9c9ee7a0626d78dab431e663289762ce6959be314f91f7b08b1466097fd6 6772e2ac48891ee3c2c72783,985a9c9ee7a0626d78dab431e663289762ce6959be314f91f7b08b1466097fd6
67847dd1dac117b85d1e20d93580cdf42f00001a77,62e6b1b8c2961703a90276dcde6dad182b2d14e23f27dccc927cca7770b9890e 67847dd1dac117b85d1e20d9,62e6b1b8c2961703a90276dcde6dad182b2d14e23f27dccc927cca7770b9890e
678f49948c72b7295f12092a24d300eeff894f1dd7,2e7c456dac5206c5627736924e96ac016a09a88ec5f4835fbe0cf9e294611c88 678f49948c72b7295f12092a,2e7c456dac5206c5627736924e96ac016a09a88ec5f4835fbe0cf9e294611c88
67948b9633ab2ec07d7525936254e66f8c957d026c,66b5c54b3a685de3ea18f9e69254eec065eb3207ac1f93494fdcd585e9a267a0 67948b9633ab2ec07d752593,66b5c54b3a685de3ea18f9e69254eec065eb3207ac1f93494fdcd585e9a267a0
679674c162db8d3bb57c434fe87825625c4d4daf63,05425880d80258f7441859b3494415a3fd7398c9e209a19674abd48372b283c6 679674c162db8d3bb57c434f,05425880d80258f7441859b3494415a3fd7398c9e209a19674abd48372b283c6
67a8d3f17df85502bd644a364721e6364d61635b73,1efce69a3a05c505e9f9cc5c2241d02099c043d934389b430fd8b185e6dfe6cb 67a8d3f17df85502bd644a36,1efce69a3a05c505e9f9cc5c2241d02099c043d934389b430fd8b185e6dfe6cb
67bad7f4fb3c6828b6fc4624d43786fc8f55d6eb0f,04a1c0a7ffe7acbf974ca18cf3debbd8e1be3d6703f842f57ef14af6d4c336d3 67bad7f4fb3c6828b6fc4624,04a1c0a7ffe7acbf974ca18cf3debbd8e1be3d6703f842f57ef14af6d4c336d3
67c13fb0c65acca5520bc2f59bd91ca3482dbec156,7fdc6989cd778baad45cd98358ea060237b169a4aeaeb14da6ac4686b7858c9f 67c13fb0c65acca5520bc2f5,7fdc6989cd778baad45cd98358ea060237b169a4aeaeb14da6ac4686b7858c9f
67d4314588b4424b0ee026536b9bd7857f11cab2ee,c63fd7a85a533b8591577bab805104708ba5458fab0e343d46b3e24a28b92cb5 67d4314588b4424b0ee02653,c63fd7a85a533b8591577bab805104708ba5458fab0e343d46b3e24a28b92cb5
67d734244f85f32a58e34e2d9cadf225a56973d32f,d19a6307c24470b3973973319770bdb896218bb58d1f2d07c7226266075057d0 67d734244f85f32a58e34e2d,d19a6307c24470b3973973319770bdb896218bb58d1f2d07c7226266075057d0
67d9c159c5d5e407e6b0a4cacf9d6fe62a55b0fedc,89cbdb903fdfe0b44e74b0a69eed3de7029f18c28f77e5509f8ace766ab86610 67d9c159c5d5e407e6b0a4ca,89cbdb903fdfe0b44e74b0a69eed3de7029f18c28f77e5509f8ace766ab86610
67fafc73d674250f11e559ab08b287f5714e531761,1752ffbf9807bb2e4e480bf045b4bacc472befe755287384b5a526065a58c065 67fafc73d674250f11e559ab,1752ffbf9807bb2e4e480bf045b4bacc472befe755287384b5a526065a58c065

1 g
2 6702c124856d5168381a32971d8933440a1728fc41 6702c124856d5168381a3297 575696fd653a4de2f9a8c1f580cf0c229631b0f5d95fceb354cda133e2eb2d34
3 6707f1511e3a2cb28493f91b85e9e4a9d9d07c86a5 6707f1511e3a2cb28493f91b ba368e0f859ee36da8701df1c0b52cbf0c0f8a4b1a91f6d0db83a408f5a937d1
4 6707fd4213cae8d5342a98ba49b255fa80b2a9a6e4 6707fd4213cae8d5342a98ba bd3a44d30f66444f8732119bc7e0cf0bb47f8f0ab2840987fc06b629f3e6d3f4
5 6710294a5693224a6222404ba45fd38eb2e77979a4 6710294a5693224a6222404b de35a8ea0a26d17445e2f509db23188961b5cd1229b96d2411565adf63731b5c
6 6716a9f84e02143b50d9034aec126b12d7f2708cc4 6716a9f84e02143b50d9034a 5823640ae4529f8df2dab20386c887d0a1ba1ffa4583b99dff761c01f670c2fa
7 672e51bc65c9b97d482b0b720e6cb673c41fe7b5c5 672e51bc65c9b97d482b0b72 0687df449bd8cb8d8f526f4189973d084d786ab0927d81c127f56b03c61aa955
8 67682620db65932047689e5eaf392d6b85be801864 67682620db65932047689e5e b262d40758edb28d1c04fa3a24d8268990516de6846ad94d002ce55640866239
9 676e8c320dbbf5eebc2969a93fbc51dd7f6062a7d1 676e8c320dbbf5eebc2969a9 c9e2a8e7181a70e2a488b884c8baadb4043a075c6876cb012c67fbec5aa9f615
10 6772e2ac48891ee3c2c727835702a374ad0cb70fd6 6772e2ac48891ee3c2c72783 985a9c9ee7a0626d78dab431e663289762ce6959be314f91f7b08b1466097fd6
11 67847dd1dac117b85d1e20d93580cdf42f00001a77 67847dd1dac117b85d1e20d9 62e6b1b8c2961703a90276dcde6dad182b2d14e23f27dccc927cca7770b9890e
12 678f49948c72b7295f12092a24d300eeff894f1dd7 678f49948c72b7295f12092a 2e7c456dac5206c5627736924e96ac016a09a88ec5f4835fbe0cf9e294611c88
13 67948b9633ab2ec07d7525936254e66f8c957d026c 67948b9633ab2ec07d752593 66b5c54b3a685de3ea18f9e69254eec065eb3207ac1f93494fdcd585e9a267a0
14 679674c162db8d3bb57c434fe87825625c4d4daf63 679674c162db8d3bb57c434f 05425880d80258f7441859b3494415a3fd7398c9e209a19674abd48372b283c6
15 67a8d3f17df85502bd644a364721e6364d61635b73 67a8d3f17df85502bd644a36 1efce69a3a05c505e9f9cc5c2241d02099c043d934389b430fd8b185e6dfe6cb
16 67bad7f4fb3c6828b6fc4624d43786fc8f55d6eb0f 67bad7f4fb3c6828b6fc4624 04a1c0a7ffe7acbf974ca18cf3debbd8e1be3d6703f842f57ef14af6d4c336d3
17 67c13fb0c65acca5520bc2f59bd91ca3482dbec156 67c13fb0c65acca5520bc2f5 7fdc6989cd778baad45cd98358ea060237b169a4aeaeb14da6ac4686b7858c9f
18 67d4314588b4424b0ee026536b9bd7857f11cab2ee 67d4314588b4424b0ee02653 c63fd7a85a533b8591577bab805104708ba5458fab0e343d46b3e24a28b92cb5
19 67d734244f85f32a58e34e2d9cadf225a56973d32f 67d734244f85f32a58e34e2d d19a6307c24470b3973973319770bdb896218bb58d1f2d07c7226266075057d0
20 67d9c159c5d5e407e6b0a4cacf9d6fe62a55b0fedc 67d9c159c5d5e407e6b0a4ca 89cbdb903fdfe0b44e74b0a69eed3de7029f18c28f77e5509f8ace766ab86610
21 67fafc73d674250f11e559ab08b287f5714e531761 67fafc73d674250f11e559ab 1752ffbf9807bb2e4e480bf045b4bacc472befe755287384b5a526065a58c065

View file

@ -1 +1 @@
v0.2022.10.04.1 v0.2022.10.05.1