integration testing scripts (#64)

* integration testing scripts

some scripts for integration testing and a docker file for an action.
Still need to figure out how to properly run a more realistic version
in ci.

* update

* changes

* db shutdown racecondition fix

* changes per pr

* changes per code review

* fix testing

* add shutdowncalled bool to db
This commit is contained in:
Jeffrey Picard 2022-10-04 20:25:44 +03:00 committed by GitHub
parent 8fb3db8136
commit 537b8c7ddd
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 341 additions and 14 deletions

View file

@ -8,6 +8,7 @@ import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"os" "os"
"sync"
"time" "time"
"github.com/lbryio/herald.go/db/prefixes" "github.com/lbryio/herald.go/db/prefixes"
@ -58,8 +59,11 @@ type ReadOnlyDBColumnFamily struct {
BlockedChannels map[string][]byte BlockedChannels map[string][]byte
FilteredStreams map[string][]byte FilteredStreams map[string][]byte
FilteredChannels map[string][]byte FilteredChannels map[string][]byte
OpenIterators map[string][]chan struct{}
ItMut sync.RWMutex
ShutdownChan chan struct{} ShutdownChan chan struct{}
DoneChan chan struct{} DoneChan chan struct{}
ShutdownCalled bool
Cleanup func() Cleanup func()
} }
@ -318,9 +322,38 @@ func intMin(a, b int) int {
return b return b
} }
// FIXME: This was copied from the signal.go file, maybe move it to a more common place?
// interruptRequested returns true when the channel returned by
// interruptListener was closed. This simplifies early shutdown slightly since
// the caller can just use an if statement instead of a select.
func interruptRequested(interrupted <-chan struct{}) bool {
select {
case <-interrupted:
return true
default:
}
return false
}
func IterCF(db *grocksdb.DB, opts *IterOptions) <-chan *prefixes.PrefixRowKV { func IterCF(db *grocksdb.DB, opts *IterOptions) <-chan *prefixes.PrefixRowKV {
ch := make(chan *prefixes.PrefixRowKV) ch := make(chan *prefixes.PrefixRowKV)
iterKey := fmt.Sprintf("%p", opts)
if opts.DB != nil {
opts.DB.ItMut.Lock()
// There is a tiny chance that we were wating on the above lock while shutdown was
// being called and by the time we get it the db has already notified all active
// iterators to shutdown. In this case we go to the else branch.
if !opts.DB.ShutdownCalled {
opts.DB.OpenIterators[iterKey] = []chan struct{}{opts.DoneChan, opts.ShutdownChan}
opts.DB.ItMut.Unlock()
} else {
opts.DB.ItMut.Unlock()
return ch
}
}
ro := grocksdb.NewDefaultReadOptions() ro := grocksdb.NewDefaultReadOptions()
ro.SetFillCache(opts.FillCache) ro.SetFillCache(opts.FillCache)
it := db.NewIteratorCF(ro, opts.CfHandle) it := db.NewIteratorCF(ro, opts.CfHandle)
@ -336,6 +369,12 @@ func IterCF(db *grocksdb.DB, opts *IterOptions) <-chan *prefixes.PrefixRowKV {
it.Close() it.Close()
close(ch) close(ch)
ro.Destroy() ro.Destroy()
if opts.DB != nil {
opts.DoneChan <- struct{}{}
opts.DB.ItMut.Lock()
delete(opts.DB.OpenIterators, iterKey)
opts.DB.ItMut.Unlock()
}
}() }()
var prevKey []byte var prevKey []byte
@ -355,6 +394,9 @@ func IterCF(db *grocksdb.DB, opts *IterOptions) <-chan *prefixes.PrefixRowKV {
if kv = opts.ReadRow(&prevKey); kv != nil { if kv = opts.ReadRow(&prevKey); kv != nil {
ch <- kv ch <- kv
} }
if interruptRequested(opts.ShutdownChan) {
return
}
} }
}() }()
@ -412,7 +454,7 @@ func (db *ReadOnlyDBColumnFamily) selectFrom(prefix []byte, startKey, stopKey pr
return nil, err return nil, err
} }
// Prefix and handle // Prefix and handle
options := NewIterateOptions().WithPrefix(prefix).WithCfHandle(handle) options := NewIterateOptions().WithDB(db).WithPrefix(prefix).WithCfHandle(handle)
// Start and stop bounds // Start and stop bounds
options = options.WithStart(startKey.PackKey()).WithStop(stopKey.PackKey()).WithIncludeStop(true) options = options.WithStart(startKey.PackKey()).WithStop(stopKey.PackKey()).WithIncludeStop(true)
// Don't include the key // Don't include the key
@ -514,6 +556,7 @@ func GetProdDB(name string, secondaryPath string) (*ReadOnlyDBColumnFamily, func
} }
db, err := GetDBColumnFamilies(name, secondaryPath, cfNames) db, err := GetDBColumnFamilies(name, secondaryPath, cfNames)
db.OpenIterators = make(map[string][]chan struct{})
cleanupFiles := func() { cleanupFiles := func() {
err = os.RemoveAll(secondaryPath) err = os.RemoveAll(secondaryPath)
@ -572,8 +615,11 @@ func GetDBColumnFamilies(name string, secondayPath string, cfNames []string) (*R
LastState: nil, LastState: nil,
Height: 0, Height: 0,
Headers: nil, Headers: nil,
ShutdownChan: make(chan struct{}), OpenIterators: make(map[string][]chan struct{}),
DoneChan: make(chan struct{}), ItMut: sync.RWMutex{},
ShutdownChan: make(chan struct{}, 1),
ShutdownCalled: false,
DoneChan: make(chan struct{}, 1),
} }
err = myDB.ReadDBState() //TODO: Figure out right place for this err = myDB.ReadDBState() //TODO: Figure out right place for this
@ -643,6 +689,15 @@ func (db *ReadOnlyDBColumnFamily) Unwind() {
// Shutdown shuts down the db. // Shutdown shuts down the db.
func (db *ReadOnlyDBColumnFamily) Shutdown() { func (db *ReadOnlyDBColumnFamily) Shutdown() {
db.ShutdownChan <- struct{}{} db.ShutdownChan <- struct{}{}
db.ItMut.Lock()
db.ShutdownCalled = true
for _, it := range db.OpenIterators {
it[1] <- struct{}{}
}
for _, it := range db.OpenIterators {
<-it[0]
}
db.ItMut.Unlock()
<-db.DoneChan <-db.DoneChan
db.Cleanup() db.Cleanup()
} }
@ -790,7 +845,7 @@ func (db *ReadOnlyDBColumnFamily) InitHeaders() error {
// endKey := prefixes.NewHeaderKey(db.LastState.Height) // endKey := prefixes.NewHeaderKey(db.LastState.Height)
startKeyRaw := startKey.PackKey() startKeyRaw := startKey.PackKey()
// endKeyRaw := endKey.PackKey() // endKeyRaw := endKey.PackKey()
options := NewIterateOptions().WithPrefix([]byte{prefixes.Header}).WithCfHandle(handle) options := NewIterateOptions().WithDB(db).WithPrefix([]byte{prefixes.Header}).WithCfHandle(handle)
options = options.WithIncludeKey(false).WithIncludeValue(true) //.WithIncludeStop(true) options = options.WithIncludeKey(false).WithIncludeValue(true) //.WithIncludeStop(true)
options = options.WithStart(startKeyRaw) //.WithStop(endKeyRaw) options = options.WithStart(startKeyRaw) //.WithStop(endKeyRaw)
@ -813,7 +868,7 @@ func (db *ReadOnlyDBColumnFamily) InitTxCounts() error {
db.TxCounts = stack.NewSliceBacked[uint32](InitialTxCountSize) db.TxCounts = stack.NewSliceBacked[uint32](InitialTxCountSize)
options := NewIterateOptions().WithPrefix([]byte{prefixes.TxCount}).WithCfHandle(handle) options := NewIterateOptions().WithDB(db).WithPrefix([]byte{prefixes.TxCount}).WithCfHandle(handle)
options = options.WithIncludeKey(false).WithIncludeValue(true).WithIncludeStop(true) options = options.WithIncludeKey(false).WithIncludeValue(true).WithIncludeStop(true)
ch := IterCF(db.DB, options) ch := IterCF(db.DB, options)

View file

@ -94,7 +94,7 @@ func (db *ReadOnlyDBColumnFamily) GetHeaders(height uint32, count uint32) ([][11
startKeyRaw := prefixes.NewHeaderKey(height).PackKey() startKeyRaw := prefixes.NewHeaderKey(height).PackKey()
endKeyRaw := prefixes.NewHeaderKey(height + count).PackKey() endKeyRaw := prefixes.NewHeaderKey(height + count).PackKey()
options := NewIterateOptions().WithPrefix([]byte{prefixes.Header}).WithCfHandle(handle) options := NewIterateOptions().WithDB(db).WithPrefix([]byte{prefixes.Header}).WithCfHandle(handle)
options = options.WithIncludeKey(false).WithIncludeValue(true) //.WithIncludeStop(true) options = options.WithIncludeKey(false).WithIncludeValue(true) //.WithIncludeStop(true)
options = options.WithStart(startKeyRaw).WithStop(endKeyRaw) options = options.WithStart(startKeyRaw).WithStop(endKeyRaw)
@ -130,7 +130,7 @@ func (db *ReadOnlyDBColumnFamily) GetBalance(hashX []byte) (uint64, uint64, erro
startKeyRaw := startKey.PackKey() startKeyRaw := startKey.PackKey()
endKeyRaw := endKey.PackKey() endKeyRaw := endKey.PackKey()
// Prefix and handle // Prefix and handle
options := NewIterateOptions().WithPrefix([]byte{prefixes.UTXO}).WithCfHandle(handle) options := NewIterateOptions().WithDB(db).WithPrefix([]byte{prefixes.UTXO}).WithCfHandle(handle)
// Start and stop bounds // Start and stop bounds
options = options.WithStart(startKeyRaw).WithStop(endKeyRaw).WithIncludeStop(true) options = options.WithStart(startKeyRaw).WithStop(endKeyRaw).WithIncludeStop(true)
// Don't include the key // Don't include the key
@ -346,7 +346,7 @@ func (db *ReadOnlyDBColumnFamily) GetStreamsAndChannelRepostedByChannelHashes(re
for _, reposterChannelHash := range reposterChannelHashes { for _, reposterChannelHash := range reposterChannelHashes {
key := prefixes.NewChannelToClaimKeyWHash(reposterChannelHash) key := prefixes.NewChannelToClaimKeyWHash(reposterChannelHash)
rawKeyPrefix := key.PartialPack(1) rawKeyPrefix := key.PartialPack(1)
options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix) options := NewIterateOptions().WithDB(db).WithCfHandle(handle).WithPrefix(rawKeyPrefix)
options = options.WithIncludeKey(false).WithIncludeValue(true) options = options.WithIncludeKey(false).WithIncludeValue(true)
ch := IterCF(db.DB, options) ch := IterCF(db.DB, options)
// for stream := range Iterate(db.DB, prefixes.ChannelToClaim, []byte{reposterChannelHash}, false) { // for stream := range Iterate(db.DB, prefixes.ChannelToClaim, []byte{reposterChannelHash}, false) {
@ -420,7 +420,7 @@ func (db *ReadOnlyDBColumnFamily) GetShortClaimIdUrl(name string, normalizedName
log.Printf("partialKey: %#v\n", partialKey) log.Printf("partialKey: %#v\n", partialKey)
keyPrefix := partialKey.PartialPack(2) keyPrefix := partialKey.PartialPack(2)
// Prefix and handle // Prefix and handle
options := NewIterateOptions().WithPrefix(prefix).WithCfHandle(handle) options := NewIterateOptions().WithDB(db).WithPrefix(prefix).WithCfHandle(handle)
// Start and stop bounds // Start and stop bounds
options = options.WithStart(keyPrefix).WithStop(keyPrefix) options = options.WithStart(keyPrefix).WithStop(keyPrefix)
// Don't include the key // Don't include the key
@ -518,7 +518,7 @@ func (db *ReadOnlyDBColumnFamily) GetActiveAmount(claimHash []byte, txoType uint
startKeyRaw := startKey.PartialPack(3) startKeyRaw := startKey.PartialPack(3)
endKeyRaw := endKey.PartialPack(3) endKeyRaw := endKey.PartialPack(3)
// Prefix and handle // Prefix and handle
options := NewIterateOptions().WithPrefix([]byte{prefixes.ActiveAmount}).WithCfHandle(handle) options := NewIterateOptions().WithDB(db).WithPrefix([]byte{prefixes.ActiveAmount}).WithCfHandle(handle)
// Start and stop bounds // Start and stop bounds
options = options.WithStart(startKeyRaw).WithStop(endKeyRaw) options = options.WithStart(startKeyRaw).WithStop(endKeyRaw)
// Don't include the key // Don't include the key
@ -674,7 +674,7 @@ func (db *ReadOnlyDBColumnFamily) ControllingClaimIter() <-chan *prefixes.Prefix
key := prefixes.NewClaimTakeoverKey("") key := prefixes.NewClaimTakeoverKey("")
var rawKeyPrefix []byte = nil var rawKeyPrefix []byte = nil
rawKeyPrefix = key.PartialPack(0) rawKeyPrefix = key.PartialPack(0)
options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix) options := NewIterateOptions().WithDB(db).WithCfHandle(handle).WithPrefix(rawKeyPrefix)
options = options.WithIncludeValue(true) //.WithIncludeStop(true) options = options.WithIncludeValue(true) //.WithIncludeStop(true)
ch := IterCF(db.DB, options) ch := IterCF(db.DB, options)
return ch return ch
@ -785,7 +785,7 @@ func (db *ReadOnlyDBColumnFamily) BidOrderNameIter(normalizedName string) <-chan
key := prefixes.NewBidOrderKey(normalizedName) key := prefixes.NewBidOrderKey(normalizedName)
var rawKeyPrefix []byte = nil var rawKeyPrefix []byte = nil
rawKeyPrefix = key.PartialPack(1) rawKeyPrefix = key.PartialPack(1)
options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix) options := NewIterateOptions().WithDB(db).WithCfHandle(handle).WithPrefix(rawKeyPrefix)
options = options.WithIncludeValue(true) //.WithIncludeStop(true) options = options.WithIncludeValue(true) //.WithIncludeStop(true)
ch := IterCF(db.DB, options) ch := IterCF(db.DB, options)
return ch return ch
@ -803,7 +803,7 @@ func (db *ReadOnlyDBColumnFamily) ClaimShortIdIter(normalizedName string, claimI
} else { } else {
rawKeyPrefix = key.PartialPack(1) rawKeyPrefix = key.PartialPack(1)
} }
options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix) options := NewIterateOptions().WithDB(db).WithCfHandle(handle).WithPrefix(rawKeyPrefix)
options = options.WithIncludeValue(true) //.WithIncludeStop(true) options = options.WithIncludeValue(true) //.WithIncludeStop(true)
ch := IterCF(db.DB, options) ch := IterCF(db.DB, options)
return ch return ch

View file

@ -326,7 +326,7 @@ func (db *ReadOnlyDBColumnFamily) ResolveClaimInChannel(channelHash []byte, norm
key := prefixes.NewChannelToClaimKey(channelHash, normalizedName) key := prefixes.NewChannelToClaimKey(channelHash, normalizedName)
rawKeyPrefix := key.PartialPack(2) rawKeyPrefix := key.PartialPack(2)
options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix) options := NewIterateOptions().WithDB(db).WithCfHandle(handle).WithPrefix(rawKeyPrefix)
options = options.WithIncludeValue(true) //.WithIncludeStop(true) options = options.WithIncludeValue(true) //.WithIncludeStop(true)
ch := IterCF(db.DB, options) ch := IterCF(db.DB, options)
// TODO: what's a good default size for this? // TODO: what's a good default size for this?

View file

@ -7,6 +7,7 @@ import (
"log" "log"
"os" "os"
"strings" "strings"
"sync"
"testing" "testing"
dbpkg "github.com/lbryio/herald.go/db" dbpkg "github.com/lbryio/herald.go/db"
@ -93,6 +94,11 @@ func OpenAndFillTmpDBColumnFamlies(filePath string) (*dbpkg.ReadOnlyDBColumnFami
LastState: nil, LastState: nil,
Height: 0, Height: 0,
Headers: nil, Headers: nil,
OpenIterators: make(map[string][]chan struct{}),
ItMut: sync.RWMutex{},
ShutdownChan: make(chan struct{}, 1),
DoneChan: make(chan struct{}, 1),
ShutdownCalled: false,
} }
// err = dbpkg.ReadDBState(myDB) //TODO: Figure out right place for this // err = dbpkg.ReadDBState(myDB) //TODO: Figure out right place for this

View file

@ -22,6 +22,9 @@ type IterOptions struct {
IncludeValue bool IncludeValue bool
RawKey bool RawKey bool
RawValue bool RawValue bool
ShutdownChan chan struct{}
DoneChan chan struct{}
DB *ReadOnlyDBColumnFamily
CfHandle *grocksdb.ColumnFamilyHandle CfHandle *grocksdb.ColumnFamilyHandle
It *grocksdb.Iterator It *grocksdb.Iterator
Serializer *prefixes.SerializationAPI Serializer *prefixes.SerializationAPI
@ -40,6 +43,9 @@ func NewIterateOptions() *IterOptions {
IncludeValue: false, IncludeValue: false,
RawKey: false, RawKey: false,
RawValue: false, RawValue: false,
ShutdownChan: make(chan struct{}),
DoneChan: make(chan struct{}),
DB: nil,
CfHandle: nil, CfHandle: nil,
It: nil, It: nil,
Serializer: prefixes.ProductionAPI, Serializer: prefixes.ProductionAPI,
@ -101,6 +107,11 @@ func (o *IterOptions) WithRawValue(rawValue bool) *IterOptions {
return o return o
} }
func (o *IterOptions) WithDB(db *ReadOnlyDBColumnFamily) *IterOptions {
o.DB = db
return o
}
func (o *IterOptions) WithSerializer(serializer *prefixes.SerializationAPI) *IterOptions { func (o *IterOptions) WithSerializer(serializer *prefixes.SerializationAPI) *IterOptions {
o.Serializer = serializer o.Serializer = serializer
return o return o

View file

@ -0,0 +1,13 @@
FROM jeffreypicard/hub-github-env:dev
COPY scripts/integration_tests.sh /integration_tests.sh
COPY scripts/cicd_integration_test_runner.sh /cicd_integration_test_runner.sh
COPY herald /herald
RUN apt install -y jq curl
ENV CGO_LDFLAGS "-L/usr/local/lib -lrocksdb -lstdc++ -lm -lz -lsnappy -llz4 -lzstd"
ENV CGO_CFLAGS "-I/usr/local/include/rocksdb"
ENV LD_LIBRARY_PATH /usr/local/lib
ENTRYPOINT ["/cicd_integration_test_runner.sh"]

View file

@ -0,0 +1,14 @@
#!/bin/bash
#
# cicd_integration_test_runner.sh
#
# simple script to kick off herald and call the integration testing
# script
#
# N.B. this currently just works locally until we figure a way to have
# the data in the cicd environment.
#
./herald serve --db-path /mnt/sdb1/wallet_server/_data/lbry-rocksdb &
./integration_tests.sh

206
scripts/integration_tests.sh Executable file
View file

@ -0,0 +1,206 @@
#!/bin/bash
#
# integration_testing.sh
#
# GitHub Action CI/CD based integration tests for herald.go
# These are smoke / sanity tests for the server behaving correctly on a "live"
# system, and looks for reasonable response codes, not specific correct
# behavior. Those are covered in unit tests.
#
# N.B.
# For the curl based json tests the `id` field existing is needed.
#
# global variables
RES=(0)
FINALRES=0
# functions
function logical_or {
for res in ${RES[@]}; do
if [ $res -eq 1 -o $FINALRES -eq 1 ]; then
FINALRES=1
return
fi
done
}
function want_got {
if [ "${WANT}" != "${GOT}" ]; then
echo "WANT: ${WANT}"
echo "GOT: ${GOT}"
RES+=(1)
else
RES+=(0)
fi
}
function want_greater {
if [ ${WANT} -ge ${GOT} ]; then
echo "WANT: ${WANT}"
echo "GOT: ${GOT}"
RES+=(1)
else
RES+=(0)
fi
}
function test_command_with_want {
echo $CMD
GOT=`eval $CMD`
want_got
}
# grpc endpoint testing
read -r -d '' CMD <<- EOM
grpcurl -plaintext -d '{"value": ["@Styxhexenhammer666:2"]}' 127.0.0.1:50051 pb.Hub.Resolve
| jq .txos[0].txHash | sed 's/"//g'
EOM
WANT="VOFP8MQEwps9Oa5NJJQ18WfVzUzlpCjst0Wz3xyOPd4="
test_command_with_want
# GOT=`eval $CMD`
#want_got
##
## N.B. This is a degenerate case that takes a long time to run.
## The runtime should be fixed, but in the meantime, we definitely should
## ensure this behaves as expected.
##
## TODO: Test runtime doesn't exceed worst case.
##
#WANT=806389
#read -r -d '' CMD <<- EOM
# grpcurl -plaintext -d '{"value": ["foo"]}' 127.0.0.1:50051 pb.Hub.Resolve | jq .txos[0].height
#EOM
# test_command_with_want
# json rpc endpoint testing
## blockchain.block
### blockchain.block.get_chunk
read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.block.get_chunk", "params": [0]}'
| jq .result | sed 's/"//g' | head -c 100
EOM
WANT="010000000000000000000000000000000000000000000000000000000000000000000000cc59e59ff97ac092b55e423aa549"
test_command_with_want
### blockchain.block.get_header
read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.block.get_header", "params": []}'
| jq .result.timestamp
EOM
WANT=1446058291
test_command_with_want
### blockchain.block.headers
read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.block.headers", "params": []}'
| jq .result.count
EOM
WANT=0
test_command_with_want
## blockchain.claimtrie
read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.claimtrie.resolve", "params":[{"Data": ["@Styxhexenhammer666:2"]}]}'
| jq .result.txos[0].tx_hash | sed 's/"//g'
EOM
WANT="VOFP8MQEwps9Oa5NJJQ18WfVzUzlpCjst0Wz3xyOPd4="
test_command_with_want
## blockchain.address
### blockchain.address.get_balance
read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.address.get_balance", "params":[{"Address": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
| jq .result.confirmed
EOM
WANT=44415602186
test_command_with_want
## blockchain.address.get_history
read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.address.get_history", "params":[{"Address": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
| jq '.result.confirmed | length'
EOM
WANT=82
test_command_with_want
## blockchain.address.listunspent
read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.address.listunspent", "params":[{"Address": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
| jq '.result | length'
EOM
WANT=32
test_command_with_want
# blockchain.scripthash
## blockchain.scripthash.get_mempool
read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.scripthash.get_mempool", "params":[{"scripthash": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
| jq .error | sed 's/"//g'
EOM
WANT="encoding/hex: invalid byte: U+0047 'G'"
test_command_with_want
## blockchain.scripthash.get_history
read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.scripthash.get_history", "params":[{"scripthash": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
| jq .error | sed 's/"//g'
EOM
WANT="encoding/hex: invalid byte: U+0047 'G'"
test_command_with_want
## blockchain.scripthash.listunspent
read -r -d '' CMD <<- EOM
curl http://127.0.0.1:50001/rpc -s -H "Content-Type: application/json"
--data '{"id": 1, "method": "blockchain.scripthash.listunspent", "params":[{"scripthash": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
| jq .error | sed 's/"//g'
EOM
WANT="encoding/hex: invalid byte: U+0047 'G'"
test_command_with_want
# metrics endpoint testing
WANT=0
GOT=$(curl http://127.0.0.1:2112/metrics -s | grep requests | grep resolve | awk '{print $NF}')
want_greater
# caclulate return value
logical_or $RES
if [ $FINALRES -eq 1 ]; then
echo "Failed!"
exit 1
else
echo "Passed!"
exit 0
fi

View file

@ -18,6 +18,8 @@ import (
"github.com/lbryio/lbcd/wire" "github.com/lbryio/lbcd/wire"
"github.com/lbryio/lbcutil" "github.com/lbryio/lbcutil"
"golang.org/x/exp/constraints" "golang.org/x/exp/constraints"
log "github.com/sirupsen/logrus"
) )
// BlockchainBlockService methods handle "blockchain.block.*" RPCs // BlockchainBlockService methods handle "blockchain.block.*" RPCs
@ -120,6 +122,7 @@ func (s *BlockchainBlockService) Get_chunk(req *BlockGetChunkReq, resp **BlockGe
index := uint32(*req) index := uint32(*req)
db_headers, err := s.DB.GetHeaders(index*CHUNK_SIZE, CHUNK_SIZE) db_headers, err := s.DB.GetHeaders(index*CHUNK_SIZE, CHUNK_SIZE)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
raw := make([]byte, 0, HEADER_SIZE*len(db_headers)) raw := make([]byte, 0, HEADER_SIZE*len(db_headers))
@ -141,6 +144,7 @@ func (s *BlockchainBlockService) Get_header(req *BlockGetHeaderReq, resp **Block
height := uint32(*req) height := uint32(*req)
headers, err := s.DB.GetHeaders(height, 1) headers, err := s.DB.GetHeaders(height, 1)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
if len(headers) < 1 { if len(headers) < 1 {
@ -171,6 +175,7 @@ func (s *BlockchainBlockService) Headers(req *BlockHeadersReq, resp **BlockHeade
count := min(req.Count, MAX_CHUNK_SIZE) count := min(req.Count, MAX_CHUNK_SIZE)
db_headers, err := s.DB.GetHeaders(req.StartHeight, count) db_headers, err := s.DB.GetHeaders(req.StartHeight, count)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
count = uint32(len(db_headers)) count = uint32(len(db_headers))
@ -283,18 +288,22 @@ type AddressGetBalanceResp struct {
func (s *BlockchainAddressService) Get_balance(req *AddressGetBalanceReq, resp **AddressGetBalanceResp) error { func (s *BlockchainAddressService) Get_balance(req *AddressGetBalanceReq, resp **AddressGetBalanceResp) error {
address, err := lbcutil.DecodeAddress(req.Address, s.Chain) address, err := lbcutil.DecodeAddress(req.Address, s.Chain)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
script, err := txscript.PayToAddrScript(address) script, err := txscript.PayToAddrScript(address)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
hashX := hashXScript(script, s.Chain) hashX := hashXScript(script, s.Chain)
confirmed, unconfirmed, err := s.DB.GetBalance(hashX) confirmed, unconfirmed, err := s.DB.GetBalance(hashX)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
*resp = &AddressGetBalanceResp{confirmed, unconfirmed} *resp = &AddressGetBalanceResp{confirmed, unconfirmed}
return err return err
} }
@ -310,11 +319,13 @@ type ScripthashGetBalanceResp struct {
func (s *BlockchainScripthashService) Get_balance(req *scripthashGetBalanceReq, resp **ScripthashGetBalanceResp) error { func (s *BlockchainScripthashService) Get_balance(req *scripthashGetBalanceReq, resp **ScripthashGetBalanceResp) error {
scripthash, err := decodeScriptHash(req.ScriptHash) scripthash, err := decodeScriptHash(req.ScriptHash)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
hashX := hashX(scripthash) hashX := hashX(scripthash)
confirmed, unconfirmed, err := s.DB.GetBalance(hashX) confirmed, unconfirmed, err := s.DB.GetBalance(hashX)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
*resp = &ScripthashGetBalanceResp{confirmed, unconfirmed} *resp = &ScripthashGetBalanceResp{confirmed, unconfirmed}
@ -341,15 +352,18 @@ type AddressGetHistoryResp struct {
func (s *BlockchainAddressService) Get_history(req *AddressGetHistoryReq, resp **AddressGetHistoryResp) error { func (s *BlockchainAddressService) Get_history(req *AddressGetHistoryReq, resp **AddressGetHistoryResp) error {
address, err := lbcutil.DecodeAddress(req.Address, s.Chain) address, err := lbcutil.DecodeAddress(req.Address, s.Chain)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
script, err := txscript.PayToAddrScript(address) script, err := txscript.PayToAddrScript(address)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
hashX := hashXScript(script, s.Chain) hashX := hashXScript(script, s.Chain)
dbTXs, err := s.DB.GetHistory(hashX) dbTXs, err := s.DB.GetHistory(hashX)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
confirmed := make([]TxInfo, 0, len(dbTXs)) confirmed := make([]TxInfo, 0, len(dbTXs))
@ -380,11 +394,13 @@ type ScripthashGetHistoryResp struct {
func (s *BlockchainScripthashService) Get_history(req *ScripthashGetHistoryReq, resp **ScripthashGetHistoryResp) error { func (s *BlockchainScripthashService) Get_history(req *ScripthashGetHistoryReq, resp **ScripthashGetHistoryResp) error {
scripthash, err := decodeScriptHash(req.ScriptHash) scripthash, err := decodeScriptHash(req.ScriptHash)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
hashX := hashX(scripthash) hashX := hashX(scripthash)
dbTXs, err := s.DB.GetHistory(hashX) dbTXs, err := s.DB.GetHistory(hashX)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
confirmed := make([]TxInfo, 0, len(dbTXs)) confirmed := make([]TxInfo, 0, len(dbTXs))
@ -412,10 +428,12 @@ type AddressGetMempoolResp []TxInfoFee
func (s *BlockchainAddressService) Get_mempool(req *AddressGetMempoolReq, resp **AddressGetMempoolResp) error { func (s *BlockchainAddressService) Get_mempool(req *AddressGetMempoolReq, resp **AddressGetMempoolResp) error {
address, err := lbcutil.DecodeAddress(req.Address, s.Chain) address, err := lbcutil.DecodeAddress(req.Address, s.Chain)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
script, err := txscript.PayToAddrScript(address) script, err := txscript.PayToAddrScript(address)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
hashX := hashXScript(script, s.Chain) hashX := hashXScript(script, s.Chain)
@ -436,6 +454,7 @@ type ScripthashGetMempoolResp []TxInfoFee
func (s *BlockchainScripthashService) Get_mempool(req *ScripthashGetMempoolReq, resp **ScripthashGetMempoolResp) error { func (s *BlockchainScripthashService) Get_mempool(req *ScripthashGetMempoolReq, resp **ScripthashGetMempoolResp) error {
scripthash, err := decodeScriptHash(req.ScriptHash) scripthash, err := decodeScriptHash(req.ScriptHash)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
hashX := hashX(scripthash) hashX := hashX(scripthash)
@ -462,10 +481,12 @@ type AddressListUnspentResp []TXOInfo
func (s *BlockchainAddressService) Listunspent(req *AddressListUnspentReq, resp **AddressListUnspentResp) error { func (s *BlockchainAddressService) Listunspent(req *AddressListUnspentReq, resp **AddressListUnspentResp) error {
address, err := lbcutil.DecodeAddress(req.Address, s.Chain) address, err := lbcutil.DecodeAddress(req.Address, s.Chain)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
script, err := txscript.PayToAddrScript(address) script, err := txscript.PayToAddrScript(address)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
hashX := hashXScript(script, s.Chain) hashX := hashXScript(script, s.Chain)
@ -494,6 +515,7 @@ type ScripthashListUnspentResp []TXOInfo
func (s *BlockchainScripthashService) Listunspent(req *ScripthashListUnspentReq, resp **ScripthashListUnspentResp) error { func (s *BlockchainScripthashService) Listunspent(req *ScripthashListUnspentReq, resp **ScripthashListUnspentResp) error {
scripthash, err := decodeScriptHash(req.ScriptHash) scripthash, err := decodeScriptHash(req.ScriptHash)
if err != nil { if err != nil {
log.Warn(err)
return err return err
} }
hashX := hashX(scripthash) hashX := hashX(scripthash)