bfe2ba4191
This commit adds a `defer` statement at the top of `TestRpcServer` which will attempt a `recover` which tears down all active harnesses in the event that one of the tests causes a panic in the main goroutine. Before this commit, if a buggy test caused a panic while all integration tests were being executed, then any active harnesses would fail to be properly torn down. This would cause the running btcd processes to be leaked, possibly interfering with future test runs until the process was manually killed. This commit fixes such behavior. In order to aide in debugging, when a test panics, the test number is printed out along with a full stack-trace from the start of the test to the panic point.
151 lines
4 KiB
Go
151 lines
4 KiB
Go
// Copyright (c) 2016 The btcsuite developers
|
|
// Use of this source code is governed by an ISC
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package main
|
|
|
|
import (
|
|
"bytes"
|
|
"fmt"
|
|
"os"
|
|
"runtime/debug"
|
|
"testing"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg"
|
|
"github.com/btcsuite/btcd/rpctest"
|
|
)
|
|
|
|
func testGetBestBlock(r *rpctest.Harness, t *testing.T) {
|
|
_, prevbestHeight, err := r.Node.GetBestBlock()
|
|
if err != nil {
|
|
t.Fatalf("Call to `getbestblock` failed: %v", err)
|
|
}
|
|
|
|
// Create a new block connecting to the current tip.
|
|
generatedBlockHashes, err := r.Node.Generate(1)
|
|
if err != nil {
|
|
t.Fatalf("Unable to generate block: %v", err)
|
|
}
|
|
|
|
bestHash, bestHeight, err := r.Node.GetBestBlock()
|
|
if err != nil {
|
|
t.Fatalf("Call to `getbestblock` failed: %v", err)
|
|
}
|
|
|
|
// Hash should be the same as the newly submitted block.
|
|
if !bytes.Equal(bestHash[:], generatedBlockHashes[0][:]) {
|
|
t.Fatalf("Block hashes do not match. Returned hash %v, wanted "+
|
|
"hash %v", bestHash, generatedBlockHashes[0][:])
|
|
}
|
|
|
|
// Block height should now reflect newest height.
|
|
if bestHeight != prevbestHeight+1 {
|
|
t.Fatalf("Block heights do not match. Got %v, wanted %v",
|
|
bestHeight, prevbestHeight+1)
|
|
}
|
|
}
|
|
|
|
func testGetBlockCount(r *rpctest.Harness, t *testing.T) {
|
|
// Save the current count.
|
|
currentCount, err := r.Node.GetBlockCount()
|
|
if err != nil {
|
|
t.Fatalf("Unable to get block count: %v", err)
|
|
}
|
|
|
|
if _, err := r.Node.Generate(1); err != nil {
|
|
t.Fatalf("Unable to generate block: %v", err)
|
|
}
|
|
|
|
// Count should have increased by one.
|
|
newCount, err := r.Node.GetBlockCount()
|
|
if err != nil {
|
|
t.Fatalf("Unable to get block count: %v", err)
|
|
}
|
|
if newCount != currentCount+1 {
|
|
t.Fatalf("Block count incorrect. Got %v should be %v",
|
|
newCount, currentCount+1)
|
|
}
|
|
}
|
|
|
|
func testGetBlockHash(r *rpctest.Harness, t *testing.T) {
|
|
// Create a new block connecting to the current tip.
|
|
generatedBlockHashes, err := r.Node.Generate(1)
|
|
if err != nil {
|
|
t.Fatalf("Unable to generate block: %v", err)
|
|
}
|
|
|
|
info, err := r.Node.GetInfo()
|
|
if err != nil {
|
|
t.Fatalf("call to getinfo cailed: %v", err)
|
|
}
|
|
|
|
blockHash, err := r.Node.GetBlockHash(int64(info.Blocks))
|
|
if err != nil {
|
|
t.Fatalf("Call to `getblockhash` failed: %v", err)
|
|
}
|
|
|
|
// Block hashes should match newly created block.
|
|
if !bytes.Equal(generatedBlockHashes[0][:], blockHash[:]) {
|
|
t.Fatalf("Block hashes do not match. Returned hash %v, wanted "+
|
|
"hash %v", blockHash, generatedBlockHashes[0][:])
|
|
}
|
|
}
|
|
|
|
var rpcTestCases = []rpctest.HarnessTestCase{
|
|
testGetBestBlock,
|
|
testGetBlockCount,
|
|
testGetBlockHash,
|
|
}
|
|
|
|
var primaryHarness *rpctest.Harness
|
|
|
|
func TestMain(m *testing.M) {
|
|
var err error
|
|
primaryHarness, err = rpctest.New(&chaincfg.SimNetParams, nil, nil)
|
|
if err != nil {
|
|
fmt.Println("unable to create primary harness: ", err)
|
|
os.Exit(1)
|
|
}
|
|
|
|
// Initialize the primary mining node with a chain of length 125,
|
|
// providing 25 mature coinbases to allow spending from for testing
|
|
// purposes.
|
|
if err := primaryHarness.SetUp(true, 25); err != nil {
|
|
fmt.Println("unable to setup test chain: ", err)
|
|
os.Exit(1)
|
|
}
|
|
|
|
exitCode := m.Run()
|
|
|
|
// Clean up any active harnesses that are still currently running.This
|
|
// includes removing all temporary directories, and shutting down any
|
|
// created processes.
|
|
if err := rpctest.TearDownAll(); err != nil {
|
|
fmt.Println("unable to tear down all harnesses: ", err)
|
|
os.Exit(1)
|
|
}
|
|
|
|
os.Exit(exitCode)
|
|
}
|
|
|
|
func TestRpcServer(t *testing.T) {
|
|
var currentTestNum int
|
|
defer func() {
|
|
// If one of the integration tests caused a panic within the main
|
|
// goroutine, then tear down all the harnesses in order to avoid
|
|
// any leaked btcd processes.
|
|
if r := recover(); r != nil {
|
|
fmt.Println("recovering from test panic: ", r)
|
|
if err := rpctest.TearDownAll(); err != nil {
|
|
fmt.Println("unable to tear down all harnesses: ", err)
|
|
}
|
|
t.Fatalf("test #%v panicked: %s", currentTestNum, debug.Stack())
|
|
}
|
|
}()
|
|
|
|
for _, testCase := range rpcTestCases {
|
|
testCase(primaryHarness, t)
|
|
|
|
currentTestNum++
|
|
}
|
|
}
|