[lbry] rework config and params

Ideally, network related params should be part of config, which is
passed down to the components to avoid references to global instances.

This commit is only halfway through as there are a couple of structs
that are too small to house the params and are still referencing
global variables. We'll rework that later.
This commit is contained in:
Roy Lee 2021-07-31 22:15:14 -07:00
parent c5b4662aa8
commit 424235655c
13 changed files with 96 additions and 58 deletions

View file

@ -2,6 +2,7 @@ package chainrepo
import ( import (
"encoding/binary" "encoding/binary"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/btcsuite/btcd/claimtrie/change" "github.com/btcsuite/btcd/claimtrie/change"
@ -16,7 +17,7 @@ type Pebble struct {
func NewPebble(path string) (*Pebble, error) { func NewPebble(path string) (*Pebble, error) {
db, err := pebble.Open(path, nil) db, err := pebble.Open(path, &pebble.Options{BytesPerSync: 64 << 20})
repo := &Pebble{db: db} repo := &Pebble{db: db}
return repo, errors.Wrapf(err, "unable to open %s", path) return repo, errors.Wrapf(err, "unable to open %s", path)

View file

@ -52,13 +52,18 @@ func New(cfg config.Config) (*ClaimTrie, error) {
var cleanups []func() error var cleanups []func() error
blockRepo, err := blockrepo.NewPebble(filepath.Join(cfg.DataDir, cfg.BlockRepoPebble.Path)) // The passed in cfg.DataDir has been prepended with netname.
dataDir := filepath.Join(cfg.DataDir, "claim_dbs")
dbPath := filepath.Join(dataDir, cfg.BlockRepoPebble.Path)
blockRepo, err := blockrepo.NewPebble(dbPath)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "creating block repo") return nil, errors.Wrap(err, "creating block repo")
} }
cleanups = append(cleanups, blockRepo.Close) cleanups = append(cleanups, blockRepo.Close)
temporalRepo, err := temporalrepo.NewPebble(filepath.Join(cfg.DataDir, cfg.TemporalRepoPebble.Path)) dbPath = filepath.Join(dataDir, cfg.TemporalRepoPebble.Path)
temporalRepo, err := temporalrepo.NewPebble(dbPath)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "creating temporal repo") return nil, errors.Wrap(err, "creating temporal repo")
} }
@ -66,7 +71,8 @@ func New(cfg config.Config) (*ClaimTrie, error) {
// Initialize repository for changes to nodes. // Initialize repository for changes to nodes.
// The cleanup is delegated to the Node Manager. // The cleanup is delegated to the Node Manager.
nodeRepo, err := noderepo.NewPebble(filepath.Join(cfg.DataDir, cfg.NodeRepoPebble.Path)) dbPath = filepath.Join(dataDir, cfg.NodeRepoPebble.Path)
nodeRepo, err := noderepo.NewPebble(dbPath)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "creating node repo") return nil, errors.Wrap(err, "creating node repo")
} }
@ -84,7 +90,8 @@ func New(cfg config.Config) (*ClaimTrie, error) {
} else { } else {
// Initialize repository for MerkleTrie. The cleanup is delegated to MerkleTrie. // Initialize repository for MerkleTrie. The cleanup is delegated to MerkleTrie.
trieRepo, err := merkletrierepo.NewPebble(filepath.Join(cfg.DataDir, cfg.MerkleTrieRepoPebble.Path)) dbPath = filepath.Join(dataDir, cfg.MerkleTrieRepoPebble.Path)
trieRepo, err := merkletrierepo.NewPebble(dbPath)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "creating trie repo") return nil, errors.Wrap(err, "creating trie repo")
} }
@ -258,7 +265,7 @@ func (ct *ClaimTrie) AppendBlock() error {
} }
func (ct *ClaimTrie) updateTrieForHashForkIfNecessary() bool { func (ct *ClaimTrie) updateTrieForHashForkIfNecessary() bool {
if ct.height != param.AllClaimsInMerkleForkHeight { if ct.height != param.ActiveParams.AllClaimsInMerkleForkHeight {
return false return false
} }
@ -303,7 +310,7 @@ func (ct *ClaimTrie) ResetHeight(height int32) error {
return err return err
} }
passedHashFork := ct.height >= param.AllClaimsInMerkleForkHeight && height < param.AllClaimsInMerkleForkHeight passedHashFork := ct.height >= param.ActiveParams.AllClaimsInMerkleForkHeight && height < param.ActiveParams.AllClaimsInMerkleForkHeight
ct.height = height ct.height = height
hash, err := ct.blockRepo.Get(height) hash, err := ct.blockRepo.Get(height)
if err != nil { if err != nil {
@ -323,7 +330,7 @@ func (ct *ClaimTrie) ResetHeight(height int32) error {
// MerkleHash returns the Merkle Hash of the claimTrie. // MerkleHash returns the Merkle Hash of the claimTrie.
func (ct *ClaimTrie) MerkleHash() *chainhash.Hash { func (ct *ClaimTrie) MerkleHash() *chainhash.Hash {
if ct.height >= param.AllClaimsInMerkleForkHeight { if ct.height >= param.ActiveParams.AllClaimsInMerkleForkHeight {
return ct.merkleTrie.MerkleHashAllClaims() return ct.merkleTrie.MerkleHashAllClaims()
} }
return ct.merkleTrie.MerkleHash() return ct.merkleTrie.MerkleHash()

View file

@ -74,7 +74,7 @@ func TestNormalizationFork(t *testing.T) {
r := require.New(t) r := require.New(t)
setup(t) setup(t)
param.NormalizedNameForkHeight = 2 param.ActiveParams.NormalizedNameForkHeight = 2
ct, err := New(cfg) ct, err := New(cfg)
r.NoError(err) r.NoError(err)
r.NotNil(ct) r.NotNil(ct)
@ -135,7 +135,7 @@ func TestActivationsOnNormalizationFork(t *testing.T) {
r := require.New(t) r := require.New(t)
setup(t) setup(t)
param.NormalizedNameForkHeight = 4 param.ActiveParams.NormalizedNameForkHeight = 4
ct, err := New(cfg) ct, err := New(cfg)
r.NoError(err) r.NoError(err)
r.NotNil(ct) r.NotNil(ct)
@ -178,7 +178,7 @@ func TestNormalizationSortOrder(t *testing.T) {
// this was an unfortunate bug; the normalization fork should not have activated anything // this was an unfortunate bug; the normalization fork should not have activated anything
// alas, it's now part of our history; we hereby test it to keep it that way // alas, it's now part of our history; we hereby test it to keep it that way
setup(t) setup(t)
param.NormalizedNameForkHeight = 2 param.ActiveParams.NormalizedNameForkHeight = 2
ct, err := New(cfg) ct, err := New(cfg)
r.NoError(err) r.NoError(err)
r.NotNil(ct) r.NotNil(ct)

View file

@ -3,13 +3,16 @@ package config
import ( import (
"path/filepath" "path/filepath"
"github.com/btcsuite/btcd/claimtrie/param"
"github.com/btcsuite/btcutil" "github.com/btcsuite/btcutil"
) )
var DefaultConfig = Config{ var DefaultConfig = Config{
Params: param.MainNet,
RamTrie: true, // as it stands the other trie uses more RAM, more time, and 40GB+ of disk space RamTrie: true, // as it stands the other trie uses more RAM, more time, and 40GB+ of disk space
DataDir: filepath.Join(btcutil.AppDataDir("chain", false), "data", "mainnet", "claim_dbs"), DataDir: filepath.Join(btcutil.AppDataDir("chain", false), "data"),
BlockRepoPebble: pebbleConfig{ BlockRepoPebble: pebbleConfig{
Path: "blocks_pebble_db", Path: "blocks_pebble_db",
@ -33,6 +36,8 @@ var DefaultConfig = Config{
// Config is the container of all configurations. // Config is the container of all configurations.
type Config struct { type Config struct {
Params param.ClaimTrieParams
RamTrie bool RamTrie bool
DataDir string DataDir string

View file

@ -60,11 +60,11 @@ func (c *Claim) setStatus(status Status) *Claim {
func (c *Claim) ExpireAt() int32 { func (c *Claim) ExpireAt() int32 {
if c.AcceptedAt+param.OriginalClaimExpirationTime > param.ExtendedClaimExpirationForkHeight { if c.AcceptedAt+param.ActiveParams.OriginalClaimExpirationTime > param.ActiveParams.ExtendedClaimExpirationForkHeight {
return c.AcceptedAt + param.ExtendedClaimExpirationTime return c.AcceptedAt + param.ActiveParams.ExtendedClaimExpirationTime
} }
return c.AcceptedAt + param.OriginalClaimExpirationTime return c.AcceptedAt + param.ActiveParams.OriginalClaimExpirationTime
} }
func OutPointLess(a, b wire.OutPoint) bool { func OutPointLess(a, b wire.OutPoint) bool {

View file

@ -6,9 +6,10 @@ import (
"crypto/sha256" "crypto/sha256"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"github.com/pkg/errors"
"strconv" "strconv"
"github.com/pkg/errors"
"github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/claimtrie/change" "github.com/btcsuite/btcd/claimtrie/change"
"github.com/btcsuite/btcd/claimtrie/param" "github.com/btcsuite/btcd/claimtrie/param"
@ -91,7 +92,7 @@ func NewBaseManager(repo Repo) (*BaseManager, error) {
nm := &BaseManager{ nm := &BaseManager{
repo: repo, repo: repo,
cache: newNodeCache(param.MaxNodeManagerCacheSize), cache: newNodeCache(param.ActiveParams.MaxNodeManagerCacheSize),
} }
return nm, nil return nm, nil
@ -225,7 +226,7 @@ func (nm *BaseManager) IncrementHeightTo(height int32) ([][]byte, error) {
panic("invalid height") panic("invalid height")
} }
if height >= param.MaxRemovalWorkaroundHeight { if height >= param.ActiveParams.MaxRemovalWorkaroundHeight {
// not technically needed until block 884430, but to be true to the arbitrary rollback length... // not technically needed until block 884430, but to be true to the arbitrary rollback length...
collectChildNames(nm.changes) collectChildNames(nm.changes)
} }
@ -312,7 +313,7 @@ func (nm *BaseManager) aWorkaroundIsNeeded(n *Node, chg change.Change) bool {
return false return false
} }
if chg.Height >= param.MaxRemovalWorkaroundHeight { if chg.Height >= param.ActiveParams.MaxRemovalWorkaroundHeight {
// TODO: hard fork this out; it's a bug from previous versions: // TODO: hard fork this out; it's a bug from previous versions:
// old 17.3 C++ code we're trying to mimic (where empty means no active claims): // old 17.3 C++ code we're trying to mimic (where empty means no active claims):
@ -337,9 +338,9 @@ func (nm *BaseManager) aWorkaroundIsNeeded(n *Node, chg change.Change) bool {
func calculateDelay(curr, tookOver int32) int32 { func calculateDelay(curr, tookOver int32) int32 {
delay := (curr - tookOver) / param.ActiveDelayFactor delay := (curr - tookOver) / param.ActiveParams.ActiveDelayFactor
if delay > param.MaxActiveDelay { if delay > param.ActiveParams.MaxActiveDelay {
return param.MaxActiveDelay return param.ActiveParams.MaxActiveDelay
} }
return delay return delay
@ -419,7 +420,7 @@ func (nm *BaseManager) claimHashes(name []byte) *chainhash.Hash {
func (nm *BaseManager) Hash(name []byte) *chainhash.Hash { func (nm *BaseManager) Hash(name []byte) *chainhash.Hash {
if nm.height >= param.AllClaimsInMerkleForkHeight { if nm.height >= param.ActiveParams.AllClaimsInMerkleForkHeight {
return nm.claimHashes(name) return nm.claimHashes(name)
} }

View file

@ -148,7 +148,7 @@ func TestNodeSort(t *testing.T) {
r := require.New(t) r := require.New(t)
param.ExtendedClaimExpirationTime = 1000 param.ActiveParams.ExtendedClaimExpirationTime = 1000
r.True(OutPointLess(*out1, *out2)) r.True(OutPointLess(*out1, *out2))
r.True(OutPointLess(*out1, *out3)) r.True(OutPointLess(*out1, *out3))
@ -171,7 +171,7 @@ func TestClaimSort(t *testing.T) {
r := require.New(t) r := require.New(t)
param.ExtendedClaimExpirationTime = 1000 param.ActiveParams.ExtendedClaimExpirationTime = 1000
n := New() n := New()
n.Claims = append(n.Claims, &Claim{OutPoint: *out2, AcceptedAt: 3, Amount: 3, ClaimID: change.ClaimID{2}}) n.Claims = append(n.Claims, &Claim{OutPoint: *out2, AcceptedAt: 3, Amount: 3, ClaimID: change.ClaimID{2}})

View file

@ -141,7 +141,7 @@ func (n *Node) updateTakeoverHeight(height int32, name []byte, refindBest bool)
} }
} }
if !takeoverHappening && height < param.MaxRemovalWorkaroundHeight { if !takeoverHappening && height < param.ActiveParams.MaxRemovalWorkaroundHeight {
// This is a super ugly hack to work around bug in old code. // This is a super ugly hack to work around bug in old code.
// The bug: un/support a name then update it. This will cause its takeover height to be reset to current. // The bug: un/support a name then update it. This will cause its takeover height to be reset to current.
// This is because the old code would add to the cache without setting block originals when dealing in supports. // This is because the old code would add to the cache without setting block originals when dealing in supports.

View file

@ -15,7 +15,7 @@ import (
var Normalize = normalizeGo var Normalize = normalizeGo
func NormalizeIfNecessary(name []byte, height int32) []byte { func NormalizeIfNecessary(name []byte, height int32) []byte {
if height < param.NormalizedNameForkHeight { if height < param.ActiveParams.NormalizedNameForkHeight {
return name return name
} }
return Normalize(name) return Normalize(name)

View file

@ -2,6 +2,7 @@ package node
import ( import (
"bytes" "bytes"
"github.com/btcsuite/btcd/claimtrie/change" "github.com/btcsuite/btcd/claimtrie/change"
"github.com/btcsuite/btcd/claimtrie/param" "github.com/btcsuite/btcd/claimtrie/param"
) )
@ -37,7 +38,7 @@ func (nm *NormalizingManager) DecrementHeightTo(affectedNames [][]byte, height i
func (nm *NormalizingManager) NextUpdateHeightOfNode(name []byte) ([]byte, int32) { func (nm *NormalizingManager) NextUpdateHeightOfNode(name []byte) ([]byte, int32) {
name, nextUpdate := nm.Manager.NextUpdateHeightOfNode(name) name, nextUpdate := nm.Manager.NextUpdateHeightOfNode(name)
if nextUpdate > param.NormalizedNameForkHeight { if nextUpdate > param.ActiveParams.NormalizedNameForkHeight {
name = Normalize(name) name = Normalize(name)
} }
return name, nextUpdate return name, nextUpdate
@ -47,12 +48,12 @@ func (nm *NormalizingManager) addNormalizationForkChangesIfNecessary(height int3
if nm.Manager.Height()+1 != height { if nm.Manager.Height()+1 != height {
// initialization phase // initialization phase
if height >= param.NormalizedNameForkHeight { if height >= param.ActiveParams.NormalizedNameForkHeight {
nm.normalizedAt = param.NormalizedNameForkHeight // eh, we don't really know that it happened there nm.normalizedAt = param.ActiveParams.NormalizedNameForkHeight // eh, we don't really know that it happened there
} }
} }
if nm.normalizedAt >= 0 || height != param.NormalizedNameForkHeight { if nm.normalizedAt >= 0 || height != param.ActiveParams.NormalizedNameForkHeight {
return return
} }
nm.normalizedAt = height nm.normalizedAt = height

View file

@ -1,10 +1,8 @@
package param package param
import ( import "github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcd/wire"
)
var ( type ClaimTrieParams struct {
MaxActiveDelay int32 MaxActiveDelay int32
ActiveDelayFactor int32 ActiveDelayFactor int32
@ -18,34 +16,59 @@ var (
NormalizedNameForkHeight int32 NormalizedNameForkHeight int32
AllClaimsInMerkleForkHeight int32 AllClaimsInMerkleForkHeight int32
}
var (
ActiveParams = MainNet
MainNet = ClaimTrieParams{
MaxActiveDelay: 4032,
ActiveDelayFactor: 32,
MaxNodeManagerCacheSize: 32000,
OriginalClaimExpirationTime: 262974,
ExtendedClaimExpirationTime: 2102400,
ExtendedClaimExpirationForkHeight: 400155, // https://lbry.io/news/hf1807
MaxRemovalWorkaroundHeight: 658300,
NormalizedNameForkHeight: 539940, // targeting 21 March 2019}, https://lbry.com/news/hf1903
AllClaimsInMerkleForkHeight: 658309, // targeting 30 Oct 2019}, https://lbry.com/news/hf1910
}
TestNet = ClaimTrieParams{
MaxActiveDelay: 4032,
ActiveDelayFactor: 32,
MaxNodeManagerCacheSize: 32000,
OriginalClaimExpirationTime: 262974,
ExtendedClaimExpirationTime: 2102400,
ExtendedClaimExpirationForkHeight: 278160,
MaxRemovalWorkaroundHeight: 1, // if you get a hash mismatch, come back to this
NormalizedNameForkHeight: 993380,
AllClaimsInMerkleForkHeight: 1198559,
}
Regtest = ClaimTrieParams{
MaxActiveDelay: 4032,
ActiveDelayFactor: 32,
MaxNodeManagerCacheSize: 32000,
OriginalClaimExpirationTime: 500,
ExtendedClaimExpirationTime: 600,
ExtendedClaimExpirationForkHeight: 800,
MaxRemovalWorkaroundHeight: -1,
NormalizedNameForkHeight: 250,
AllClaimsInMerkleForkHeight: 349,
}
) )
func SetNetwork(net wire.BitcoinNet) { func SetNetwork(net wire.BitcoinNet) {
MaxActiveDelay = 4032
ActiveDelayFactor = 32
MaxNodeManagerCacheSize = 32000
switch net { switch net {
case wire.MainNet: case wire.MainNet:
OriginalClaimExpirationTime = 262974 ActiveParams = MainNet
ExtendedClaimExpirationTime = 2102400
ExtendedClaimExpirationForkHeight = 400155 // https://lbry.io/news/hf1807
MaxRemovalWorkaroundHeight = 658300
NormalizedNameForkHeight = 539940 // targeting 21 March 2019}, https://lbry.com/news/hf1903
AllClaimsInMerkleForkHeight = 658309 // targeting 30 Oct 2019}, https://lbry.com/news/hf1910
case wire.TestNet3: case wire.TestNet3:
OriginalClaimExpirationTime = 262974 ActiveParams = TestNet
ExtendedClaimExpirationTime = 2102400
ExtendedClaimExpirationForkHeight = 278160
MaxRemovalWorkaroundHeight = 1 // if you get a hash mismatch, come back to this
NormalizedNameForkHeight = 993380
AllClaimsInMerkleForkHeight = 1198559
case wire.TestNet, wire.SimNet: // "regtest" case wire.TestNet, wire.SimNet: // "regtest"
OriginalClaimExpirationTime = 500 ActiveParams = Regtest
ExtendedClaimExpirationTime = 600
ExtendedClaimExpirationForkHeight = 800
MaxRemovalWorkaroundHeight = -1
NormalizedNameForkHeight = 250
AllClaimsInMerkleForkHeight = 349
} }
} }

View file

@ -3,6 +3,7 @@ package temporalrepo
import ( import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/cockroachdb/pebble" "github.com/cockroachdb/pebble"

View file

@ -14,7 +14,6 @@ import (
"fmt" "fmt"
"math" "math"
"net" "net"
"path/filepath"
"runtime" "runtime"
"sort" "sort"
"strconv" "strconv"
@ -2727,7 +2726,7 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string,
var err error var err error
claimTrieCfg := claimtrieconfig.DefaultConfig claimTrieCfg := claimtrieconfig.DefaultConfig
claimTrieCfg.DataDir = filepath.Join(cfg.DataDir, "claim_dbs") claimTrieCfg.DataDir = cfg.DataDir
var ct *claimtrie.ClaimTrie var ct *claimtrie.ClaimTrie