[lbry] rework config and params

Ideally, network related params should be part of config, which is
passed down to the components to avoid references to global instances.

This commit is only halfway through as there are a couple of structs
that are too small to house the params and are still referencing
global variables. We'll rework that later.
This commit is contained in:
Roy Lee 2021-07-31 22:15:14 -07:00
parent c5b4662aa8
commit 424235655c
13 changed files with 96 additions and 58 deletions

View file

@ -2,6 +2,7 @@ package chainrepo
import (
"encoding/binary"
"github.com/pkg/errors"
"github.com/btcsuite/btcd/claimtrie/change"
@ -16,7 +17,7 @@ type Pebble struct {
func NewPebble(path string) (*Pebble, error) {
db, err := pebble.Open(path, nil)
db, err := pebble.Open(path, &pebble.Options{BytesPerSync: 64 << 20})
repo := &Pebble{db: db}
return repo, errors.Wrapf(err, "unable to open %s", path)

View file

@ -52,13 +52,18 @@ func New(cfg config.Config) (*ClaimTrie, error) {
var cleanups []func() error
blockRepo, err := blockrepo.NewPebble(filepath.Join(cfg.DataDir, cfg.BlockRepoPebble.Path))
// The passed in cfg.DataDir has been prepended with netname.
dataDir := filepath.Join(cfg.DataDir, "claim_dbs")
dbPath := filepath.Join(dataDir, cfg.BlockRepoPebble.Path)
blockRepo, err := blockrepo.NewPebble(dbPath)
if err != nil {
return nil, errors.Wrap(err, "creating block repo")
}
cleanups = append(cleanups, blockRepo.Close)
temporalRepo, err := temporalrepo.NewPebble(filepath.Join(cfg.DataDir, cfg.TemporalRepoPebble.Path))
dbPath = filepath.Join(dataDir, cfg.TemporalRepoPebble.Path)
temporalRepo, err := temporalrepo.NewPebble(dbPath)
if err != nil {
return nil, errors.Wrap(err, "creating temporal repo")
}
@ -66,7 +71,8 @@ func New(cfg config.Config) (*ClaimTrie, error) {
// Initialize repository for changes to nodes.
// The cleanup is delegated to the Node Manager.
nodeRepo, err := noderepo.NewPebble(filepath.Join(cfg.DataDir, cfg.NodeRepoPebble.Path))
dbPath = filepath.Join(dataDir, cfg.NodeRepoPebble.Path)
nodeRepo, err := noderepo.NewPebble(dbPath)
if err != nil {
return nil, errors.Wrap(err, "creating node repo")
}
@ -84,7 +90,8 @@ func New(cfg config.Config) (*ClaimTrie, error) {
} else {
// Initialize repository for MerkleTrie. The cleanup is delegated to MerkleTrie.
trieRepo, err := merkletrierepo.NewPebble(filepath.Join(cfg.DataDir, cfg.MerkleTrieRepoPebble.Path))
dbPath = filepath.Join(dataDir, cfg.MerkleTrieRepoPebble.Path)
trieRepo, err := merkletrierepo.NewPebble(dbPath)
if err != nil {
return nil, errors.Wrap(err, "creating trie repo")
}
@ -258,7 +265,7 @@ func (ct *ClaimTrie) AppendBlock() error {
}
func (ct *ClaimTrie) updateTrieForHashForkIfNecessary() bool {
if ct.height != param.AllClaimsInMerkleForkHeight {
if ct.height != param.ActiveParams.AllClaimsInMerkleForkHeight {
return false
}
@ -303,7 +310,7 @@ func (ct *ClaimTrie) ResetHeight(height int32) error {
return err
}
passedHashFork := ct.height >= param.AllClaimsInMerkleForkHeight && height < param.AllClaimsInMerkleForkHeight
passedHashFork := ct.height >= param.ActiveParams.AllClaimsInMerkleForkHeight && height < param.ActiveParams.AllClaimsInMerkleForkHeight
ct.height = height
hash, err := ct.blockRepo.Get(height)
if err != nil {
@ -323,7 +330,7 @@ func (ct *ClaimTrie) ResetHeight(height int32) error {
// MerkleHash returns the Merkle Hash of the claimTrie.
func (ct *ClaimTrie) MerkleHash() *chainhash.Hash {
if ct.height >= param.AllClaimsInMerkleForkHeight {
if ct.height >= param.ActiveParams.AllClaimsInMerkleForkHeight {
return ct.merkleTrie.MerkleHashAllClaims()
}
return ct.merkleTrie.MerkleHash()

View file

@ -74,7 +74,7 @@ func TestNormalizationFork(t *testing.T) {
r := require.New(t)
setup(t)
param.NormalizedNameForkHeight = 2
param.ActiveParams.NormalizedNameForkHeight = 2
ct, err := New(cfg)
r.NoError(err)
r.NotNil(ct)
@ -135,7 +135,7 @@ func TestActivationsOnNormalizationFork(t *testing.T) {
r := require.New(t)
setup(t)
param.NormalizedNameForkHeight = 4
param.ActiveParams.NormalizedNameForkHeight = 4
ct, err := New(cfg)
r.NoError(err)
r.NotNil(ct)
@ -178,7 +178,7 @@ func TestNormalizationSortOrder(t *testing.T) {
// this was an unfortunate bug; the normalization fork should not have activated anything
// alas, it's now part of our history; we hereby test it to keep it that way
setup(t)
param.NormalizedNameForkHeight = 2
param.ActiveParams.NormalizedNameForkHeight = 2
ct, err := New(cfg)
r.NoError(err)
r.NotNil(ct)

View file

@ -3,13 +3,16 @@ package config
import (
"path/filepath"
"github.com/btcsuite/btcd/claimtrie/param"
"github.com/btcsuite/btcutil"
)
var DefaultConfig = Config{
Params: param.MainNet,
RamTrie: true, // as it stands the other trie uses more RAM, more time, and 40GB+ of disk space
DataDir: filepath.Join(btcutil.AppDataDir("chain", false), "data", "mainnet", "claim_dbs"),
DataDir: filepath.Join(btcutil.AppDataDir("chain", false), "data"),
BlockRepoPebble: pebbleConfig{
Path: "blocks_pebble_db",
@ -33,6 +36,8 @@ var DefaultConfig = Config{
// Config is the container of all configurations.
type Config struct {
Params param.ClaimTrieParams
RamTrie bool
DataDir string

View file

@ -60,11 +60,11 @@ func (c *Claim) setStatus(status Status) *Claim {
func (c *Claim) ExpireAt() int32 {
if c.AcceptedAt+param.OriginalClaimExpirationTime > param.ExtendedClaimExpirationForkHeight {
return c.AcceptedAt + param.ExtendedClaimExpirationTime
if c.AcceptedAt+param.ActiveParams.OriginalClaimExpirationTime > param.ActiveParams.ExtendedClaimExpirationForkHeight {
return c.AcceptedAt + param.ActiveParams.ExtendedClaimExpirationTime
}
return c.AcceptedAt + param.OriginalClaimExpirationTime
return c.AcceptedAt + param.ActiveParams.OriginalClaimExpirationTime
}
func OutPointLess(a, b wire.OutPoint) bool {

View file

@ -6,9 +6,10 @@ import (
"crypto/sha256"
"encoding/binary"
"fmt"
"github.com/pkg/errors"
"strconv"
"github.com/pkg/errors"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/claimtrie/change"
"github.com/btcsuite/btcd/claimtrie/param"
@ -91,7 +92,7 @@ func NewBaseManager(repo Repo) (*BaseManager, error) {
nm := &BaseManager{
repo: repo,
cache: newNodeCache(param.MaxNodeManagerCacheSize),
cache: newNodeCache(param.ActiveParams.MaxNodeManagerCacheSize),
}
return nm, nil
@ -225,7 +226,7 @@ func (nm *BaseManager) IncrementHeightTo(height int32) ([][]byte, error) {
panic("invalid height")
}
if height >= param.MaxRemovalWorkaroundHeight {
if height >= param.ActiveParams.MaxRemovalWorkaroundHeight {
// not technically needed until block 884430, but to be true to the arbitrary rollback length...
collectChildNames(nm.changes)
}
@ -312,7 +313,7 @@ func (nm *BaseManager) aWorkaroundIsNeeded(n *Node, chg change.Change) bool {
return false
}
if chg.Height >= param.MaxRemovalWorkaroundHeight {
if chg.Height >= param.ActiveParams.MaxRemovalWorkaroundHeight {
// TODO: hard fork this out; it's a bug from previous versions:
// old 17.3 C++ code we're trying to mimic (where empty means no active claims):
@ -337,9 +338,9 @@ func (nm *BaseManager) aWorkaroundIsNeeded(n *Node, chg change.Change) bool {
func calculateDelay(curr, tookOver int32) int32 {
delay := (curr - tookOver) / param.ActiveDelayFactor
if delay > param.MaxActiveDelay {
return param.MaxActiveDelay
delay := (curr - tookOver) / param.ActiveParams.ActiveDelayFactor
if delay > param.ActiveParams.MaxActiveDelay {
return param.ActiveParams.MaxActiveDelay
}
return delay
@ -419,7 +420,7 @@ func (nm *BaseManager) claimHashes(name []byte) *chainhash.Hash {
func (nm *BaseManager) Hash(name []byte) *chainhash.Hash {
if nm.height >= param.AllClaimsInMerkleForkHeight {
if nm.height >= param.ActiveParams.AllClaimsInMerkleForkHeight {
return nm.claimHashes(name)
}

View file

@ -148,7 +148,7 @@ func TestNodeSort(t *testing.T) {
r := require.New(t)
param.ExtendedClaimExpirationTime = 1000
param.ActiveParams.ExtendedClaimExpirationTime = 1000
r.True(OutPointLess(*out1, *out2))
r.True(OutPointLess(*out1, *out3))
@ -171,7 +171,7 @@ func TestClaimSort(t *testing.T) {
r := require.New(t)
param.ExtendedClaimExpirationTime = 1000
param.ActiveParams.ExtendedClaimExpirationTime = 1000
n := New()
n.Claims = append(n.Claims, &Claim{OutPoint: *out2, AcceptedAt: 3, Amount: 3, ClaimID: change.ClaimID{2}})

View file

@ -141,7 +141,7 @@ func (n *Node) updateTakeoverHeight(height int32, name []byte, refindBest bool)
}
}
if !takeoverHappening && height < param.MaxRemovalWorkaroundHeight {
if !takeoverHappening && height < param.ActiveParams.MaxRemovalWorkaroundHeight {
// This is a super ugly hack to work around bug in old code.
// The bug: un/support a name then update it. This will cause its takeover height to be reset to current.
// This is because the old code would add to the cache without setting block originals when dealing in supports.

View file

@ -15,7 +15,7 @@ import (
var Normalize = normalizeGo
func NormalizeIfNecessary(name []byte, height int32) []byte {
if height < param.NormalizedNameForkHeight {
if height < param.ActiveParams.NormalizedNameForkHeight {
return name
}
return Normalize(name)

View file

@ -2,6 +2,7 @@ package node
import (
"bytes"
"github.com/btcsuite/btcd/claimtrie/change"
"github.com/btcsuite/btcd/claimtrie/param"
)
@ -37,7 +38,7 @@ func (nm *NormalizingManager) DecrementHeightTo(affectedNames [][]byte, height i
func (nm *NormalizingManager) NextUpdateHeightOfNode(name []byte) ([]byte, int32) {
name, nextUpdate := nm.Manager.NextUpdateHeightOfNode(name)
if nextUpdate > param.NormalizedNameForkHeight {
if nextUpdate > param.ActiveParams.NormalizedNameForkHeight {
name = Normalize(name)
}
return name, nextUpdate
@ -47,12 +48,12 @@ func (nm *NormalizingManager) addNormalizationForkChangesIfNecessary(height int3
if nm.Manager.Height()+1 != height {
// initialization phase
if height >= param.NormalizedNameForkHeight {
nm.normalizedAt = param.NormalizedNameForkHeight // eh, we don't really know that it happened there
if height >= param.ActiveParams.NormalizedNameForkHeight {
nm.normalizedAt = param.ActiveParams.NormalizedNameForkHeight // eh, we don't really know that it happened there
}
}
if nm.normalizedAt >= 0 || height != param.NormalizedNameForkHeight {
if nm.normalizedAt >= 0 || height != param.ActiveParams.NormalizedNameForkHeight {
return
}
nm.normalizedAt = height

View file

@ -1,10 +1,8 @@
package param
import (
"github.com/btcsuite/btcd/wire"
)
import "github.com/btcsuite/btcd/wire"
var (
type ClaimTrieParams struct {
MaxActiveDelay int32
ActiveDelayFactor int32
@ -18,34 +16,59 @@ var (
NormalizedNameForkHeight int32
AllClaimsInMerkleForkHeight int32
}
var (
ActiveParams = MainNet
MainNet = ClaimTrieParams{
MaxActiveDelay: 4032,
ActiveDelayFactor: 32,
MaxNodeManagerCacheSize: 32000,
OriginalClaimExpirationTime: 262974,
ExtendedClaimExpirationTime: 2102400,
ExtendedClaimExpirationForkHeight: 400155, // https://lbry.io/news/hf1807
MaxRemovalWorkaroundHeight: 658300,
NormalizedNameForkHeight: 539940, // targeting 21 March 2019}, https://lbry.com/news/hf1903
AllClaimsInMerkleForkHeight: 658309, // targeting 30 Oct 2019}, https://lbry.com/news/hf1910
}
TestNet = ClaimTrieParams{
MaxActiveDelay: 4032,
ActiveDelayFactor: 32,
MaxNodeManagerCacheSize: 32000,
OriginalClaimExpirationTime: 262974,
ExtendedClaimExpirationTime: 2102400,
ExtendedClaimExpirationForkHeight: 278160,
MaxRemovalWorkaroundHeight: 1, // if you get a hash mismatch, come back to this
NormalizedNameForkHeight: 993380,
AllClaimsInMerkleForkHeight: 1198559,
}
Regtest = ClaimTrieParams{
MaxActiveDelay: 4032,
ActiveDelayFactor: 32,
MaxNodeManagerCacheSize: 32000,
OriginalClaimExpirationTime: 500,
ExtendedClaimExpirationTime: 600,
ExtendedClaimExpirationForkHeight: 800,
MaxRemovalWorkaroundHeight: -1,
NormalizedNameForkHeight: 250,
AllClaimsInMerkleForkHeight: 349,
}
)
func SetNetwork(net wire.BitcoinNet) {
MaxActiveDelay = 4032
ActiveDelayFactor = 32
MaxNodeManagerCacheSize = 32000
switch net {
case wire.MainNet:
OriginalClaimExpirationTime = 262974
ExtendedClaimExpirationTime = 2102400
ExtendedClaimExpirationForkHeight = 400155 // https://lbry.io/news/hf1807
MaxRemovalWorkaroundHeight = 658300
NormalizedNameForkHeight = 539940 // targeting 21 March 2019}, https://lbry.com/news/hf1903
AllClaimsInMerkleForkHeight = 658309 // targeting 30 Oct 2019}, https://lbry.com/news/hf1910
ActiveParams = MainNet
case wire.TestNet3:
OriginalClaimExpirationTime = 262974
ExtendedClaimExpirationTime = 2102400
ExtendedClaimExpirationForkHeight = 278160
MaxRemovalWorkaroundHeight = 1 // if you get a hash mismatch, come back to this
NormalizedNameForkHeight = 993380
AllClaimsInMerkleForkHeight = 1198559
ActiveParams = TestNet
case wire.TestNet, wire.SimNet: // "regtest"
OriginalClaimExpirationTime = 500
ExtendedClaimExpirationTime = 600
ExtendedClaimExpirationForkHeight = 800
MaxRemovalWorkaroundHeight = -1
NormalizedNameForkHeight = 250
AllClaimsInMerkleForkHeight = 349
ActiveParams = Regtest
}
}

View file

@ -3,6 +3,7 @@ package temporalrepo
import (
"bytes"
"encoding/binary"
"github.com/pkg/errors"
"github.com/cockroachdb/pebble"

View file

@ -14,7 +14,6 @@ import (
"fmt"
"math"
"net"
"path/filepath"
"runtime"
"sort"
"strconv"
@ -2727,7 +2726,7 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string,
var err error
claimTrieCfg := claimtrieconfig.DefaultConfig
claimTrieCfg.DataDir = filepath.Join(cfg.DataDir, "claim_dbs")
claimTrieCfg.DataDir = cfg.DataDir
var ct *claimtrie.ClaimTrie