Greatly optimize best chain selection work values.
Previously, the code was using big rational numbers for work values which resulted in carrying way too much precision around (and ultimately a lot of extra memory and computation to carry that precision). This commit converts the work values to big integers and calculates them with integer division. This is acceptable because the numerator is multiplied by 2^256 which is higher than the maximum possible proof of work. Therefore anything after the decimal is superfluous precision for the purposes of chain selection. Also, add a check for negative difficulty values when calculating the work value. Negative values won't occur in practice with valid blocks, but it's possible an invalid block could trigger the code path, so be safe and check for it.
This commit is contained in:
parent
455c92a716
commit
2d6a664d9d
2 changed files with 12 additions and 5 deletions
4
chain.go
4
chain.go
|
@ -38,7 +38,7 @@ type blockNode struct {
|
||||||
|
|
||||||
// workSum is the total amount of work in the chain up to and including
|
// workSum is the total amount of work in the chain up to and including
|
||||||
// this node.
|
// this node.
|
||||||
workSum *big.Rat
|
workSum *big.Int
|
||||||
|
|
||||||
// inMainChain denotes whether the block node is currently on the
|
// inMainChain denotes whether the block node is currently on the
|
||||||
// the main chain or not. This is used to help find the common
|
// the main chain or not. This is used to help find the common
|
||||||
|
@ -85,7 +85,7 @@ type orphanBlock struct {
|
||||||
// down the chain. It is used primarily to allow a new node to be dynamically
|
// down the chain. It is used primarily to allow a new node to be dynamically
|
||||||
// inserted from the database into the memory chain prior to nodes we already
|
// inserted from the database into the memory chain prior to nodes we already
|
||||||
// have and update their work values accordingly.
|
// have and update their work values accordingly.
|
||||||
func addChildrenWork(node *blockNode, work *big.Rat) {
|
func addChildrenWork(node *blockNode, work *big.Int) {
|
||||||
for _, childNode := range node.children {
|
for _, childNode := range node.children {
|
||||||
childNode.workSum.Add(childNode.workSum, work)
|
childNode.workSum.Add(childNode.workSum, work)
|
||||||
addChildrenWork(childNode, work)
|
addChildrenWork(childNode, work)
|
||||||
|
|
|
@ -171,11 +171,18 @@ func BigToCompact(n *big.Int) uint32 {
|
||||||
// accumulated must be the inverse of the difficulty. Also, in order to avoid
|
// accumulated must be the inverse of the difficulty. Also, in order to avoid
|
||||||
// potential division by zero and really small floating point numbers, add 1 to
|
// potential division by zero and really small floating point numbers, add 1 to
|
||||||
// the denominator and multiply the numerator by 2^256.
|
// the denominator and multiply the numerator by 2^256.
|
||||||
func calcWork(bits uint32) *big.Rat {
|
func calcWork(bits uint32) *big.Int {
|
||||||
// (1 << 256) / (difficultyNum + 1)
|
// Return a work value of zero if the passed difficulty bits represent
|
||||||
|
// a negative number. Note this should not happen in practice with valid
|
||||||
|
// blocks, but an invalid block could trigger it.
|
||||||
difficultyNum := CompactToBig(bits)
|
difficultyNum := CompactToBig(bits)
|
||||||
|
if difficultyNum.Sign() <= 0 {
|
||||||
|
return big.NewInt(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// (1 << 256) / (difficultyNum + 1)
|
||||||
denominator := new(big.Int).Add(difficultyNum, bigOne)
|
denominator := new(big.Int).Add(difficultyNum, bigOne)
|
||||||
return new(big.Rat).SetFrac(oneLsh256, denominator)
|
return new(big.Int).Div(oneLsh256, denominator)
|
||||||
}
|
}
|
||||||
|
|
||||||
// calcEasiestDifficulty calculates the easiest possible difficulty that a block
|
// calcEasiestDifficulty calculates the easiest possible difficulty that a block
|
||||||
|
|
Loading…
Reference in a new issue