Compare commits
178 commits
master
...
WIP-HF-202
Author | SHA1 | Date | |
---|---|---|---|
|
97a1553f9c | ||
|
9477313d74 | ||
|
494a39c1ab | ||
|
32e001c446 | ||
|
d816013234 | ||
|
fbb382d45a | ||
|
45e6adbe46 | ||
|
b87ee2b51e | ||
|
3f2a2b1f76 | ||
|
cb7b2b28bf | ||
|
2df74a828f | ||
|
eb686cfa9e | ||
|
d9147a4b9c | ||
|
0783e61fd8 | ||
|
73af86f9ae | ||
|
00d1ac1162 | ||
|
974c71284b | ||
|
ca6243fbd0 | ||
|
68369ba937 | ||
|
b9d6044694 | ||
|
4e63bef33b | ||
|
a09c895f49 | ||
|
f73b79ae04 | ||
|
ed5dd41a2a | ||
|
0c8cf5dea0 | ||
|
41472404c6 | ||
|
30f4bd6582 | ||
|
c035acb6b2 | ||
|
c87ce56241 | ||
|
276a6141c7 | ||
|
96846fef20 | ||
|
9ae7b95c85 | ||
|
3fde64cd63 | ||
|
a76bc2b828 | ||
|
4b47722136 | ||
|
8f3de76e19 | ||
|
9f88501d07 | ||
|
207fadab42 | ||
|
8205e467af | ||
|
7f3d51f8c3 | ||
|
470a71fbe1 | ||
|
753f413c13 | ||
|
28a5e6fc65 | ||
|
dadad3859a | ||
|
3f0ec0f28a | ||
|
62f2be3284 | ||
|
2765ac7215 | ||
|
6d80d906a8 | ||
|
185cb711b5 | ||
|
dfc7a4423c | ||
|
57bca30a00 | ||
|
4ecd4385f5 | ||
|
f8a5df1b87 | ||
|
a7b3ed5c29 | ||
|
25c026e060 | ||
|
8591f960e2 | ||
|
c8abd8fe93 | ||
|
53553df4a8 | ||
|
b179b1d52d | ||
|
ace9c12860 | ||
|
8420857491 | ||
|
261f9be12e | ||
|
a821057784 | ||
|
d7bfc9c077 | ||
|
a5050cf2de | ||
|
40606d22fd | ||
|
264075b311 | ||
|
780cc0889f | ||
|
3be166e3ae | ||
|
7070d53e09 | ||
|
b95ba0ac95 | ||
|
ef3d06e62b | ||
|
595d379fa6 | ||
|
ca044fefcb | ||
|
491b7b59fc | ||
|
e06b11a999 | ||
|
6e5fbf8ea8 | ||
|
7533672993 | ||
|
69f3a39c1c | ||
|
94e99cf6b7 | ||
|
911db90858 | ||
|
07ab66b790 | ||
|
03d1fb0f86 | ||
|
06c8bea6c7 | ||
|
d6b968c3ea | ||
|
54036e8bab | ||
|
710bd5646e | ||
|
62c608f265 | ||
|
c6410257eb | ||
|
484f7b1fef | ||
|
a2ab5b6681 | ||
|
2ddcdb91f5 | ||
|
a4720f30e5 | ||
|
30874ff76b | ||
|
f3354beb12 | ||
|
e00fec1557 | ||
|
ed9e17a043 | ||
|
dd609d6e36 | ||
|
7ad3a10442 | ||
|
d410d7d7d4 | ||
|
4b03b59391 | ||
|
8b706344a1 | ||
|
6fb1c82fe5 | ||
|
1034a66b35 | ||
|
a83152214c | ||
|
ae7fffbe52 | ||
|
507a4dcc00 | ||
|
0bc18254d4 | ||
|
0e810b4ef4 | ||
|
16bd6633b6 | ||
|
055be988c0 | ||
|
33ee3e2f53 | ||
|
367a75a3f4 | ||
|
6ec9b73a53 | ||
|
81b80328bd | ||
|
da9fdabbd5 | ||
|
0a4f228dd1 | ||
|
c4f6302189 | ||
|
7791f92f6f | ||
|
8c54905959 | ||
|
6c212fd7ee | ||
|
705d24cab4 | ||
|
43846b1edf | ||
|
1a60e11da7 | ||
|
847a262d78 | ||
|
78046b3815 | ||
|
d80863da92 | ||
|
d02f97e04a | ||
|
13f646243d | ||
|
1133ea0bb3 | ||
|
8b64adc234 | ||
|
6e86f0d09b | ||
|
71bf51e82c | ||
|
671b5fefef | ||
|
3b86e0a0e2 | ||
|
9b06388f76 | ||
|
69d560c03c | ||
|
77863f5649 | ||
|
26d63c61dc | ||
|
a4a21c0b08 | ||
|
cc802d1407 | ||
|
4d5c0b2529 | ||
|
8316a06a0e | ||
|
98dd6a900f | ||
|
1be3450efb | ||
|
a59e01c23c | ||
|
549a1f26f2 | ||
|
e98b7c1a9a | ||
|
77660b7fb0 | ||
|
5f771c1aaa | ||
|
e4118c914f | ||
|
55a6bb5e32 | ||
|
728ce1001d | ||
|
54d08ebd5f | ||
|
e422d42d7f | ||
|
2d5f7cf825 | ||
|
ce1513df03 | ||
|
34ebf0f32f | ||
|
02dab1695f | ||
|
0eaae2663b | ||
|
4d31d1599d | ||
|
215af7ff54 | ||
|
665c29802e | ||
|
c771f4fb38 | ||
|
2d2608c34e | ||
|
99cb679b6f | ||
|
05aa488a87 | ||
|
dfb1a6797b | ||
|
583b74040d | ||
|
c6f4cafe57 | ||
|
c19535b145 | ||
|
af757d3d0d | ||
|
f980c9a28d | ||
|
099784267e | ||
|
c997417978 | ||
|
bcb9643d39 | ||
|
47806df63d | ||
|
843d7607ef |
147 changed files with 4109 additions and 27694 deletions
2
.github/workflows/basic-check.yml
vendored
2
.github/workflows/basic-check.yml
vendored
|
@ -9,7 +9,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go: [1.19]
|
||||
go: [1.16.8, 1.17.5]
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
|
|
4
.github/workflows/full-sync-part-1.yml
vendored
4
.github/workflows/full-sync-part-1.yml
vendored
|
@ -14,7 +14,7 @@ jobs:
|
|||
runs-on: self-hosted
|
||||
strategy:
|
||||
matrix:
|
||||
go: [1.19]
|
||||
go: [1.17.5]
|
||||
steps:
|
||||
- run: |
|
||||
echo "Note ${{ github.event.inputs.note }}!"
|
||||
|
@ -29,7 +29,7 @@ jobs:
|
|||
- name: Create datadir
|
||||
run: echo "TEMP_DATA_DIR=$(mktemp -d)" >> $GITHUB_ENV
|
||||
- name: Run lbcd
|
||||
run: ./lbcd --datadir=${{env.TEMP_DATA_DIR}}/data --logdir=${{env.TEMP_DATA_DIR}}/logs --nolisten --norpc
|
||||
run: ./lbcd --datadir=${{env.TEMP_DATA_DIR}}/data --logdir=${{env.TEMP_DATA_DIR}}/logs --connect=127.0.0.1 --norpc
|
||||
- name: Remove datadir
|
||||
if: always()
|
||||
run: rm -rf ${{env.TEMP_DATA_DIR}}
|
||||
|
|
4
.github/workflows/full-sync-part-2.yml
vendored
4
.github/workflows/full-sync-part-2.yml
vendored
|
@ -14,7 +14,7 @@ jobs:
|
|||
runs-on: self-hosted
|
||||
strategy:
|
||||
matrix:
|
||||
go: [1.19]
|
||||
go: [1.17.5]
|
||||
steps:
|
||||
- run: |
|
||||
echo "Note ${{ github.event.inputs.note }}!"
|
||||
|
@ -31,7 +31,7 @@ jobs:
|
|||
- name: Copy initial data
|
||||
run: cp -r /home/lbry/lbcd_814k/* ${{env.TEMP_DATA_DIR}}
|
||||
- name: Run lbcd
|
||||
run: ./lbcd --datadir=${{env.TEMP_DATA_DIR}}/data --logdir=${{env.TEMP_DATA_DIR}}/logs --nolisten --norpc
|
||||
run: ./lbcd --datadir=${{env.TEMP_DATA_DIR}}/data --logdir=${{env.TEMP_DATA_DIR}}/logs --connect=127.0.0.1 --norpc
|
||||
- name: Remove datadir
|
||||
if: always()
|
||||
run: rm -rf ${{env.TEMP_DATA_DIR}}
|
||||
|
|
2
.github/workflows/golangci-lint.yml
vendored
2
.github/workflows/golangci-lint.yml
vendored
|
@ -4,7 +4,7 @@ env:
|
|||
# go needs absolute directories, using the $HOME variable doesn't work here.
|
||||
GOCACHE: /home/runner/work/go/pkg/build
|
||||
GOPATH: /home/runner/work/go
|
||||
GO_VERSION: '^1.19'
|
||||
GO_VERSION: '^1.17.0'
|
||||
|
||||
on:
|
||||
push:
|
||||
|
|
11
.github/workflows/release.yml
vendored
11
.github/workflows/release.yml
vendored
|
@ -28,16 +28,7 @@ jobs:
|
|||
name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
# Login against a Docker registry except on PR
|
||||
# https://github.com/docker/login-action
|
||||
- name: Log into registry docker.io
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@28218f9b04b4f3f62068d7b6ce6ca5b26e35336c
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
go-version: 1.17.5
|
||||
-
|
||||
name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -52,4 +52,3 @@ lbcctl
|
|||
|
||||
# CI artifacts
|
||||
dist
|
||||
debug
|
||||
|
|
|
@ -17,15 +17,13 @@ builds:
|
|||
- -trimpath
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -buildid=
|
||||
- -X github.com/lbryio/lbcd/version.appTag={{ .Tag }}
|
||||
- -X main.appBuild={{.Commit}}
|
||||
targets:
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
- windows_amd64
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
-
|
||||
main: ./cmd/lbcctl
|
||||
id: "lbcctl"
|
||||
|
@ -34,8 +32,7 @@ builds:
|
|||
- -trimpath
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -buildid=
|
||||
- -X github.com/lbryio/lbcd/version.appTag={{ .Tag }}
|
||||
- -X main.appBuild={{.Commit}}
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
targets:
|
||||
|
@ -44,11 +41,10 @@ builds:
|
|||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
- windows_amd64
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
snapshot:
|
||||
name_template: "{{ .Version }}+{{ .Commit }}"
|
||||
name_template: "{{ incpatch .Version }}-next"
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
|
@ -56,13 +52,6 @@ changelog:
|
|||
- '^docs:'
|
||||
- '^test:'
|
||||
|
||||
dockers:
|
||||
- use: buildx
|
||||
dockerfile: Dockerfile.goreleaser
|
||||
image_templates:
|
||||
- "docker.io/lbry/lbcd:{{ .Tag }}"
|
||||
- "docker.io/lbry/lbcd:latest"
|
||||
|
||||
release:
|
||||
draft: true
|
||||
prerelease: auto
|
||||
|
|
14
Dockerfile
14
Dockerfile
|
@ -11,14 +11,20 @@
|
|||
# For more information how to use this docker image visit:
|
||||
# https://github.com/lbryio/lbcd/tree/master/docs
|
||||
#
|
||||
# 9246 Mainnet LBRY peer-to-peer port
|
||||
# 9246 Mainnet Bitcoin peer-to-peer port
|
||||
# 9245 Mainet RPC port
|
||||
|
||||
ARG ARCH=amd64
|
||||
|
||||
FROM golang:1.19 AS build-container
|
||||
# using the SHA256 instead of tags
|
||||
# https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests
|
||||
# https://cloud.google.com/architecture/using-container-images
|
||||
# https://github.com/google/go-containerregistry/blob/main/cmd/crane/README.md
|
||||
# ➜ ~ crane digest golang:1.16-alpine3.12
|
||||
# sha256:db2475a1dbb2149508e5db31d7d77a75e6600d54be645f37681f03f2762169ba
|
||||
FROM golang@sha256:db2475a1dbb2149508e5db31d7d77a75e6600d54be645f37681f03f2762169ba AS build-container
|
||||
|
||||
ARG ARCH
|
||||
ENV GO111MODULE=on
|
||||
|
||||
ADD . /app
|
||||
WORKDIR /app
|
||||
|
@ -29,7 +35,7 @@ RUN set -ex \
|
|||
&& echo "Compiling for $GOARCH" \
|
||||
&& go install -v . ./cmd/...
|
||||
|
||||
FROM $ARCH/debian:bullseye-20220418-slim
|
||||
FROM $ARCH/alpine:3.12
|
||||
|
||||
COPY --from=build-container /go/bin /bin
|
||||
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
FROM debian:bullseye-20220418-slim
|
||||
|
||||
COPY lbcd lbcctl /bin/
|
||||
|
||||
VOLUME ["/root/.lbcd"]
|
||||
|
||||
EXPOSE 9245 9246
|
||||
|
||||
ENTRYPOINT ["lbcd"]
|
398
README.md
398
README.md
|
@ -5,310 +5,17 @@
|
|||
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
|
||||
<!--[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/lbryio/lbcd)-->
|
||||
|
||||
**lbcd** is a full node implementation of LBRY's blockchain written in Go (golang).
|
||||
`lbcd` is a full node implementation of LBRY's blockchain written in Go (golang).
|
||||
|
||||
Software stack developed by LBRY teams has been all migrated to **lbcd**.
|
||||
This project is currently under active development and is in a Beta state while
|
||||
we ensure it matches LBRYcrd's functionality. The intention is that it properly
|
||||
downloads, validates, and serves the block chain using the exact rules
|
||||
(including consensus bugs) for block acceptance as LBRYcrd.
|
||||
We have taken great care to avoid lbcd causing a fork to the blockchain.
|
||||
|
||||
We're working with exchanges and pool oerators to migrate from **lbrycrd** to **lbcd**.
|
||||
|
||||
If you're integrating with **lbcd+lbcwallet**, please check the Wiki for current [supported RPCs](wiki/RPC-availability).
|
||||
|
||||
Note: **lbcd** does *NOT* include wallet functionality. That functionality is provided by the
|
||||
Note: `lbcd` does *NOT* include wallet functionality. That functionality is provided by the
|
||||
[lbcwallet](https://github.com/lbryio/lbcwallet) and the [LBRY SDK](https://github.com/lbryio/lbry-sdk).
|
||||
|
||||
## Requirements
|
||||
|
||||
All common operating systems are supported. lbcd requires at least 8GB of RAM
|
||||
and at least 100GB of disk storage. Both RAM and disk requirements increase slowly over time.
|
||||
Using a fast NVMe disk is recommended.
|
||||
|
||||
## Installation
|
||||
|
||||
Acquire binary files from [releases](https://github.com/lbryio/lbcd/releases)
|
||||
|
||||
For compilation, [Go](http://golang.org) 1.19 or newer is required.
|
||||
Install Go according to its [installation instructions](http://golang.org/doc/install).
|
||||
|
||||
``` sh
|
||||
# lbcd (full node)
|
||||
$ go install github.com/lbryio/lbcd@latest
|
||||
|
||||
# lbcctl (rpc client utility)
|
||||
$ go install github.com/lbryio/lbcd/cmd/lbcctl@latest
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Default application folder `${LBCDDIR}`:
|
||||
|
||||
- Linux: `~/.lbcd/`
|
||||
- MacOS: `/Users/<username>/Library/Application Support/Lbcd/`
|
||||
|
||||
### Start the **lbcd**
|
||||
|
||||
``` sh
|
||||
./lbcd
|
||||
```
|
||||
|
||||
**lbcd** loads config file at `"${LBCDDIR}/lbcd.conf"`.
|
||||
|
||||
If no config is found, it creates a [default one](sample-lbcd.conf), which includes all available options with default settings except randomly generated *RPC credentials* (see below).
|
||||
|
||||
### RPC server
|
||||
|
||||
RPC credentials (`rpcuser` and `rpcpass`) is required to enable RPC server. It can be specify in the `"${LBCDDIR}/lbcd.conf"`, using command line options:
|
||||
|
||||
``` sh
|
||||
./lbcd --rpcuser=rpcuser --rpcpass=rpcpass
|
||||
|
||||
2022-07-28 12:28:19.627 [INF] RPCS: RPC server listening on 0.0.0.0:9245
|
||||
2022-07-28 12:28:19.627 [INF] RPCS: RPC server listening on [::]:9245
|
||||
```
|
||||
|
||||
### Working with TLS (Default)
|
||||
|
||||
By default, **lbcd** runs RPC server with TLS enabled, and generates the `rpc.cert` and `rpc.key` under `${LBCDDIR}`, if not exist already.
|
||||
|
||||
To interact with the RPC server, a client has to either specify the `rpc.cert`, or disable the certification verification for TLS.
|
||||
|
||||
Interact with **lbcd** RPC using `lbcctl`
|
||||
|
||||
``` sh
|
||||
$ ./lbcctl --rpccert "${LBCDDIR}/rpc.cert" getblockcount
|
||||
|
||||
# or disable the certificate verification
|
||||
$ ./lbcctl --skipverify getblockcount
|
||||
|
||||
1200062
|
||||
```
|
||||
|
||||
Interact with **lbcd** RPC using `curl`
|
||||
|
||||
``` sh
|
||||
$ curl --user rpcuser:rpcpass \
|
||||
--cacert "${LBCDDIR}/rpc.cert" \
|
||||
--data-binary '{"jsonrpc": "1.0", "id": "curltest", "method": "getblockcount", "params": []}' \
|
||||
-H 'content-type: text/plain;' \
|
||||
https://127.0.0.1:9245/
|
||||
|
||||
# or disable the certificate verification
|
||||
$ curl --user rpcuser:rpcpass \
|
||||
--insecure \
|
||||
--data-binary '{"jsonrpc": "1.0", "id": "curltest", "method": "getblockcount", "params": []}' \
|
||||
-H 'content-type: text/plain;' \
|
||||
https://127.0.0.1:9245/
|
||||
```
|
||||
|
||||
``` json
|
||||
{"jsonrpc":"1.0","result":1200062,"error":null,"id":"curltest"}
|
||||
```
|
||||
|
||||
### Working without TLS
|
||||
|
||||
TLS can be disabled using the `--notls` option:
|
||||
|
||||
``` sh
|
||||
$ ./lbcd --notls
|
||||
```
|
||||
|
||||
``` sh
|
||||
$ ./lbcctl --notls getblockcount
|
||||
|
||||
1200062
|
||||
```
|
||||
|
||||
``` sh
|
||||
$ curl --user rpcuser:rpcpass \
|
||||
--data-binary '{"jsonrpc": "1.0", "id": "curltest", "method": "getblockcount", "params": []}' \
|
||||
-H 'content-type: text/plain;' \
|
||||
http://127.0.0.1:9245/
|
||||
```
|
||||
|
||||
``` json
|
||||
{"jsonrpc":"1.0","result":1200062,"error":null,"id":"curltest"}
|
||||
```
|
||||
|
||||
## Using Snapshots (optional)
|
||||
|
||||
[Snapshots](https://snapshots.lbry.com/blockchain/) are created bi-weekly to help new users catch up current block height.
|
||||
|
||||
The snapshots are archived and compressed in [zstd](https://facebook.github.io/zstd/) format for it's compression ratio and speed.
|
||||
|
||||
Download the snapshot, and uncompress it:
|
||||
|
||||
``` sh
|
||||
time curl -O https://snapshots.lbry.com/blockchain/lbcd_snapshot_1199527_v0.22.105_2022-07-27.tar.zst
|
||||
zstd -d --stdout lbcd_snapshot_1199527_v0.22.105_2022-07-27.tar.zst | tar xf - -C "${LBCDDIR}"
|
||||
```
|
||||
|
||||
If preferred, a user can download and uncompress the snapshot on the fly:
|
||||
By the time the download is finished, the snapshots should be almost uncompressed already.
|
||||
|
||||
``` sh
|
||||
mkdir -p "${LBCDDIR}"
|
||||
|
||||
time curl https://snapshots.lbry.com/blockchain/lbcd_snapshot_1199527_v0.22.105_2022-07-27.tar.zst | zstd -d --stdout | tar xf - -C "${LBCDDIR}"
|
||||
|
||||
# % Total % Received % Xferd Average Speed Time Time Time Current
|
||||
# Dload Upload Total Spent Left Speed
|
||||
# 100 64.9G 100 64.9G 0 0 37.0M 0 0:29:49 0:29:49 --:--:-- 33.0M
|
||||
#
|
||||
# real 29m49.962s
|
||||
# user 6m53.710s
|
||||
# sys 8m56.545s
|
||||
```
|
||||
|
||||
## Working with RPCs
|
||||
|
||||
Using `lbcctl -l` to list available RPCs:
|
||||
|
||||
``` sh
|
||||
$ lbcctl -l
|
||||
|
||||
Chain Server Commands:
|
||||
addnode "addr" "add|remove|onetry"
|
||||
createrawtransaction [{"txid":"value","vout":n},...] {"address":amount,...} (locktime)
|
||||
debuglevel "levelspec"
|
||||
decoderawtransaction "hextx"
|
||||
decodescript "hexscript"
|
||||
deriveaddresses "descriptor" ({"value":value})
|
||||
fundrawtransaction "hextx" {"changeaddress":changeaddress,"changeposition":changeposition,"changetype":changetype,"includewatching":includewatching,"lockunspents":lockunspents,"feerate":feerate,"subtractfeefromoutputs":[subtractfeefromoutput,...],"replaceable":replaceable,"conftarget":conftarget,"estimatemode":estimatemode} (iswitness)
|
||||
generate numblocks
|
||||
|
||||
[skipped]
|
||||
|
||||
Wallet Server Commands (--wallet):
|
||||
addmultisigaddress nrequired ["key",...] ("account")
|
||||
addwitnessaddress "address"
|
||||
backupwallet "destination"
|
||||
createmultisig nrequired ["key",...]
|
||||
createnewaccount "account"
|
||||
createwallet "walletname" (disableprivatekeys=false blank=false passphrase="" avoidreuse=false)
|
||||
dumpprivkey "address"
|
||||
dumpwallet "filename"
|
||||
encryptwallet "passphrase"
|
||||
estimatefee numblocks
|
||||
estimatepriority numblocks
|
||||
estimatesmartfee conftarget (estimatemode="CONSERVATIVE")
|
||||
getaccount "address"
|
||||
getaccountaddress "account"
|
||||
getaddressesbyaccount "account"
|
||||
|
||||
[skipped]
|
||||
```
|
||||
|
||||
Using `lbcctl help rpcname` to show the RPC spec:
|
||||
|
||||
``` sh
|
||||
$ lbcctl help getblock
|
||||
|
||||
getblock "hash" (verbosity=1)
|
||||
|
||||
Returns information about a block given its hash.
|
||||
|
||||
Arguments:
|
||||
1. hash (string, required) The hash of the block
|
||||
2. verbosity (numeric, optional, default=1) Specifies whether the block data should be returned as a hex-encoded string (0), as parsed data with a slice of TXIDs (1), or as parsed data with parsed transaction data (2)
|
||||
|
||||
Result (verbosity=0):
|
||||
"value" (string) Hex-encoded bytes of the serialized block
|
||||
|
||||
Result (verbosity=1):
|
||||
{
|
||||
"getblockverboseresultbase": { (object)
|
||||
"hash": "value", (string) The hash of the block (same as provided)
|
||||
"confirmations": n, (numeric) The number of confirmations
|
||||
"strippedsize": n, (numeric) The size of the block without witness data
|
||||
"size": n, (numeric) The size of the block
|
||||
"weight": n, (numeric) The weight of the block
|
||||
"height": n, (numeric) The height of the block in the block chain
|
||||
"version": n, (numeric) The block version
|
||||
"versionHex": "value", (string) The block version in hexadecimal
|
||||
"merkleroot": "value", (string) Root hash of the merkle tree
|
||||
"time": n, (numeric) The block time in seconds since 1 Jan 1970 GMT
|
||||
"mediantime": n, (numeric) The median block time in seconds since 1 Jan 1970 GMT
|
||||
"nonce": n, (numeric) The block nonce
|
||||
"bits": "value", (string) The bits which represent the block difficulty
|
||||
"difficulty": n.nnn, (numeric) The proof-of-work difficulty as a multiple of the minimum difficulty
|
||||
"chainwork": "value", (string) Expected number of hashes required to produce the chain up to this block (in hex)
|
||||
"previousblockhash": "value", (string) The hash of the previous block
|
||||
"nextblockhash": "value", (string) The hash of the next block (only if there is one)
|
||||
"nameclaimroot": "value", (string) Root hash of the claim trie
|
||||
"nTx": n, (numeric) The number of transactions (aka, count of TX)
|
||||
},
|
||||
"tx": ["value",...], (array of string) The transaction hashes (only when verbosity=1)
|
||||
}
|
||||
```
|
||||
|
||||
## **lbcd** & **lbcwallet**
|
||||
|
||||
*Wallet* related functianlities and RPCs are provided by a separate programe - [**lbcwallet**](https://github.com/lbryio/lbcwallet).
|
||||
|
||||
Once setup, lbcwallet can serve wallet related RPCs as well as proxy lbcd RPCs to an assocated lbcd now.
|
||||
It's sufficient for user to connect just the **lbcwallet** instead of both.
|
||||
|
||||
``` mermaid
|
||||
sequenceDiagram
|
||||
actor C as lbcctl
|
||||
participant W as lbcwallet (port: 9244)
|
||||
participant D as lbcd (port: 9245)
|
||||
|
||||
rect rgb(200,200,200)
|
||||
Note over C,D: lbcctl getblockcount
|
||||
C ->>+ D: getblockcount
|
||||
D -->>- C: response
|
||||
end
|
||||
|
||||
rect rgb(200,200,200)
|
||||
Note over C,W: lbcctl --wallet balance
|
||||
C ->>+ W: getbalance
|
||||
W -->>- C: response
|
||||
end
|
||||
|
||||
rect rgb(200,200,200)
|
||||
Note over C,D: lbcctl --wallet getblockcount (lbcd RPC service proxied by lbcwallet)
|
||||
C ->>+ W: getblockcount
|
||||
W ->>+ D: getblockcount
|
||||
D -->>- W: response
|
||||
W -->>- C: response
|
||||
end
|
||||
```
|
||||
|
||||
While **lbcd** can run standalone as a full node, **lbcwallet** requires an associated **lbcd** instance for scanning and sync'ing block data.
|
||||
|
||||
``` mermaid
|
||||
sequenceDiagram
|
||||
participant W as lbcwallet (RPC port: 9244)
|
||||
participant D as lbcd (RPC port: 9245, P2P port: 9246)
|
||||
participant D2 as other lbcd node(s) (P2P port: 9246)
|
||||
|
||||
rect rgb(200,200,200)
|
||||
Note over W,D: Asynchronous websocket notifications
|
||||
W ->> D: subscribe to notifications
|
||||
D -->> W: notification
|
||||
D -->> W: notification
|
||||
end
|
||||
|
||||
rect rgb(200,200,200)
|
||||
Note over W,D: lbcd RPCs
|
||||
W ->>+ D: getblockheader
|
||||
D ->>- W: response
|
||||
end
|
||||
|
||||
rect rgb(200,200,200)
|
||||
Note over D,D2: P2P messages over port 9246
|
||||
D -->> D2: P2P message
|
||||
D2 -->> D: P2P message
|
||||
end
|
||||
|
||||
```
|
||||
|
||||
## Data integrity
|
||||
|
||||
**lbcd** is not immune to data loss. It expects a clean shutdown via SIGINT or
|
||||
SIGTERM. SIGKILL, immediate VM kills, and sudden power loss can cause data
|
||||
corruption, thus requiring chain resynchronization for recovery.
|
||||
|
||||
## Security
|
||||
|
||||
We take security seriously. Please contact [security](mailto:security@lbry.com) regarding any security issues.
|
||||
|
@ -317,6 +24,97 @@ Our PGP key is [here](https://lbry.com/faq/pgp-key) if you need it.
|
|||
We maintain a mailing list for notifications of upgrades, security issues,
|
||||
and soft/hard forks. To join, visit [fork list](https://lbry.com/forklist)
|
||||
|
||||
## Requirements
|
||||
|
||||
All common operating systems are supported. lbcd requires at least 8GB of RAM
|
||||
and at least 100GB of disk storage. Both RAM and disk requirements increase slowly over time.
|
||||
Using a fast NVMe disk is recommended.
|
||||
|
||||
`lbcd` is not immune to data loss. It expects a clean shutdown via SIGINT or
|
||||
SIGTERM. SIGKILL, immediate VM kills, and sudden power loss can cause data
|
||||
corruption, thus requiring chain resynchronization for recovery.
|
||||
|
||||
For compilation, [Go](http://golang.org) 1.16 or newer is required.
|
||||
|
||||
## Installation
|
||||
|
||||
Acquire binary files from [releases](https://github.com/lbryio/lbcd/releases)
|
||||
|
||||
### To build from Source on Linux/BSD/MacOSX/POSIX
|
||||
|
||||
Install Go according to its [installation instructions](http://golang.org/doc/install).
|
||||
|
||||
``` sh
|
||||
git clone https://github.com/lbryio/lbcd
|
||||
cd lbcd
|
||||
|
||||
# Build lbcd
|
||||
go build .
|
||||
|
||||
# Build lbcctl
|
||||
go build ./cmd/lbcctl
|
||||
```
|
||||
|
||||
Both [GoLand](https://www.jetbrains.com/go/)
|
||||
and [VS Code](https://code.visualstudio.com/docs/languages/go) IDEs are supported.
|
||||
|
||||
## Usage
|
||||
|
||||
By default, data and logs are stored in `<LBCDDIR>`:
|
||||
|
||||
- Linux: `~/.lbcd/`
|
||||
- MacOS: `/Users/<username>/Library/Application Support/Lbcd/`
|
||||
|
||||
To enable RPC access a username and password is required. Example:
|
||||
|
||||
``` sh
|
||||
./lbcd --txindex --rpcuser=rpcuser --rpcpass=rpcpass
|
||||
```
|
||||
|
||||
Interact with lbcd via RPC using `lbcctl`
|
||||
|
||||
``` sh
|
||||
./lbcctl --rpcuser=rpcuser --rpcpass=rpcpass getblockcount
|
||||
./lbcctl --rpcuser=rpcuser --rpcpass=rpcpass getblocktemplate
|
||||
```
|
||||
|
||||
By default, the RPCs are served over TLS. `lbcd` generates (if not exists) `rpc.cert` and
|
||||
`rpc.key` under `<LBCDDIR>` where `lbcctl` would search and use them.
|
||||
|
||||
The RPCs can also be served without TLS *(on localhost only)* using (`--notls`)
|
||||
|
||||
``` sh
|
||||
./lbcd --txindex --rpcuser=rpcuser --rpcpass=rpcpass --notls
|
||||
./lbcctl --rpcuser=rpcuser --rpcpass=rpcpass --notls getblockcount
|
||||
```
|
||||
|
||||
## Working with Different Networks
|
||||
|
||||
By default, `lbcd` and `lbcctl` use the following ports for different networks respectively:
|
||||
|
||||
| Network | RPC Port | Network Port |
|
||||
| ------- | -------- | ------------ |
|
||||
| mainnet | 9245 | 9246 |
|
||||
| testnet | 19245 | 19246 |
|
||||
| regtest | 29245 | 29246 |
|
||||
|
||||
Running `lbcd` and `lbcctl` with `--testnet` or `--regtest` would use different chain params as well as default RPC and Network ports.
|
||||
|
||||
``` sh
|
||||
./lbcd --txindex --rpcuser=rpcuser --rpcpass=rpcpass --regtest
|
||||
./lbcctl --rpcuser=rpcuser --rpcpass=rpcpass --regtest getblockcount
|
||||
```
|
||||
|
||||
The default Network and RPC ports of `lbcd` can be overriden using `--listen` and `--rpclisten`
|
||||
`lbcctl` can also connect to RPC server specified by `--rpcserver`
|
||||
|
||||
``` sh
|
||||
./lbcd --txindex --rpcuser=rpcuser --rpcpass=rpcpass --regtest --listen=127.0.0.1:29248 --rpclisten=127.0.0.1:29247
|
||||
./lbcctl --rpcuser=rpcuser --rpcpass=rpcpass --regtest --rpcserver=127.0.0.1:29247 getblockcount
|
||||
```
|
||||
|
||||
Note: Wallet related RPCs are provided by [lbcwallet](https://github.com/lbryio/lbcwallet).
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions to this project are welcome, encouraged, and compensated.
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
/*
|
||||
Package addrmgr implements concurrency safe Bitcoin address manager.
|
||||
|
||||
# Address Manager Overview
|
||||
Address Manager Overview
|
||||
|
||||
In order maintain the peer-to-peer Bitcoin network, there needs to be a source
|
||||
of addresses to connect to as nodes come and go. The Bitcoin protocol provides
|
||||
|
|
|
@ -8,7 +8,6 @@ package blockchain
|
|||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -37,9 +36,8 @@ const (
|
|||
// from the block being located.
|
||||
//
|
||||
// For example, assume a block chain with a side chain as depicted below:
|
||||
//
|
||||
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
|
||||
// \-> 16a -> 17a
|
||||
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
|
||||
// \-> 16a -> 17a
|
||||
//
|
||||
// The block locator for block 17a would be the hashes of blocks:
|
||||
// [17a 16a 15 14 13 12 11 10 9 8 7 6 4 genesis]
|
||||
|
@ -489,7 +487,7 @@ func (b *BlockChain) calcSequenceLock(node *blockNode, tx *btcutil.Tx, utxoView
|
|||
// LockTimeToSequence converts the passed relative locktime to a sequence
|
||||
// number in accordance to BIP-68.
|
||||
// See: https://github.com/bitcoin/bips/blob/master/bip-0068.mediawiki
|
||||
// - (Compatibility)
|
||||
// * (Compatibility)
|
||||
func LockTimeToSequence(isSeconds bool, locktime uint32) uint32 {
|
||||
// If we're expressing the relative lock time in blocks, then the
|
||||
// corresponding sequence number is simply the desired input age.
|
||||
|
@ -1006,7 +1004,6 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error
|
|||
err = b.checkConnectBlock(n, block, view, nil)
|
||||
if err != nil {
|
||||
if _, ok := err.(RuleError); ok {
|
||||
b.index.UnsetStatusFlags(n, statusValid)
|
||||
b.index.SetStatusFlags(n, statusValidateFailed)
|
||||
for de := e.Next(); de != nil; de = de.Next() {
|
||||
dn := de.Value.(*blockNode)
|
||||
|
@ -1108,8 +1105,8 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error
|
|||
// a reorganization to become the main chain).
|
||||
//
|
||||
// The flags modify the behavior of this function as follows:
|
||||
// - BFFastAdd: Avoids several expensive transaction validation operations.
|
||||
// This is useful when using checkpoints.
|
||||
// - BFFastAdd: Avoids several expensive transaction validation operations.
|
||||
// This is useful when using checkpoints.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, flags BehaviorFlags) (bool, error) {
|
||||
|
@ -1144,7 +1141,6 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla
|
|||
if err == nil {
|
||||
b.index.SetStatusFlags(node, statusValid)
|
||||
} else if _, ok := err.(RuleError); ok {
|
||||
b.index.UnsetStatusFlags(node, statusValid)
|
||||
b.index.SetStatusFlags(node, statusValidateFailed)
|
||||
} else {
|
||||
return false, err
|
||||
|
@ -1179,7 +1175,6 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla
|
|||
// that status of the block as invalid and flush the
|
||||
// index state to disk before returning with the error.
|
||||
if _, ok := err.(RuleError); ok {
|
||||
b.index.UnsetStatusFlags(node, statusValid)
|
||||
b.index.SetStatusFlags(
|
||||
node, statusValidateFailed,
|
||||
)
|
||||
|
@ -1250,8 +1245,8 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla
|
|||
// isCurrent returns whether or not the chain believes it is current. Several
|
||||
// factors are used to guess, but the key factors that allow the chain to
|
||||
// believe it is current are:
|
||||
// - Latest block height is after the latest checkpoint (if enabled)
|
||||
// - Latest block has a timestamp newer than ~6 hours ago (as LBRY block time is one fourth of bitcoin)
|
||||
// - Latest block height is after the latest checkpoint (if enabled)
|
||||
// - Latest block has a timestamp newer than ~6 hours ago (as LBRY block time is one fourth of bitcoin)
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for reads).
|
||||
func (b *BlockChain) isCurrent() bool {
|
||||
|
@ -1274,8 +1269,8 @@ func (b *BlockChain) isCurrent() bool {
|
|||
// IsCurrent returns whether or not the chain believes it is current. Several
|
||||
// factors are used to guess, but the key factors that allow the chain to
|
||||
// believe it is current are:
|
||||
// - Latest block height is after the latest checkpoint (if enabled)
|
||||
// - Latest block has a timestamp newer than 24 hours ago
|
||||
// - Latest block height is after the latest checkpoint (if enabled)
|
||||
// - Latest block has a timestamp newer than 24 hours ago
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (b *BlockChain) IsCurrent() bool {
|
||||
|
@ -1375,57 +1370,6 @@ func (b *BlockChain) BlockHashByHeight(blockHeight int32) (*chainhash.Hash, erro
|
|||
return &node.hash, nil
|
||||
}
|
||||
|
||||
// BlockAttributes desribes a Block in relation to others on the main chain.
|
||||
type BlockAttributes struct {
|
||||
Height int32
|
||||
Confirmations int32
|
||||
MedianTime time.Time
|
||||
ChainWork *big.Int
|
||||
PrevHash *chainhash.Hash
|
||||
NextHash *chainhash.Hash
|
||||
}
|
||||
|
||||
// BlockAttributesByHash returns BlockAttributes for the block with the given hash
|
||||
// relative to other blocks in the main chain. A BestState snapshot describing
|
||||
// the main chain is also returned for convenience.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (b *BlockChain) BlockAttributesByHash(hash *chainhash.Hash, prevHash *chainhash.Hash) (
|
||||
attrs *BlockAttributes, best *BestState, err error) {
|
||||
best = b.BestSnapshot()
|
||||
node := b.index.LookupNode(hash)
|
||||
if node == nil {
|
||||
str := fmt.Sprintf("block %s not found", hash)
|
||||
return nil, best, errNotInMainChain(str)
|
||||
}
|
||||
|
||||
attrs = &BlockAttributes{
|
||||
Height: node.height,
|
||||
Confirmations: 1 + best.Height - node.height,
|
||||
MedianTime: node.CalcPastMedianTime(),
|
||||
ChainWork: node.workSum,
|
||||
}
|
||||
if !b.bestChain.Contains(node) {
|
||||
attrs.Confirmations = -1
|
||||
}
|
||||
|
||||
// Populate prev block hash if there is one.
|
||||
if node.height > 0 {
|
||||
attrs.PrevHash = prevHash
|
||||
}
|
||||
|
||||
// Populate next block hash if there is one.
|
||||
if node.height < best.Height {
|
||||
nextHash, err := b.BlockHashByHeight(node.height + 1)
|
||||
if err != nil {
|
||||
return nil, best, err
|
||||
}
|
||||
attrs.NextHash = nextHash
|
||||
}
|
||||
|
||||
return attrs, best, nil
|
||||
}
|
||||
|
||||
// HeightRange returns a range of block hashes for the given start and end
|
||||
// heights. It is inclusive of the start height and exclusive of the end
|
||||
// height. The end height will be limited to the current main chain height.
|
||||
|
@ -1561,11 +1505,11 @@ func (b *BlockChain) IntervalBlockHashes(endHash *chainhash.Hash, interval int,
|
|||
//
|
||||
// In addition, there are two special cases:
|
||||
//
|
||||
// - When no locators are provided, the stop hash is treated as a request for
|
||||
// that block, so it will either return the node associated with the stop hash
|
||||
// if it is known, or nil if it is unknown
|
||||
// - When locators are provided, but none of them are known, nodes starting
|
||||
// after the genesis block will be returned
|
||||
// - When no locators are provided, the stop hash is treated as a request for
|
||||
// that block, so it will either return the node associated with the stop hash
|
||||
// if it is known, or nil if it is unknown
|
||||
// - When locators are provided, but none of them are known, nodes starting
|
||||
// after the genesis block will be returned
|
||||
//
|
||||
// This is primarily a helper function for the locateBlocks and locateHeaders
|
||||
// functions.
|
||||
|
@ -1649,11 +1593,11 @@ func (b *BlockChain) locateBlocks(locator BlockLocator, hashStop *chainhash.Hash
|
|||
//
|
||||
// In addition, there are two special cases:
|
||||
//
|
||||
// - When no locators are provided, the stop hash is treated as a request for
|
||||
// that block, so it will either return the stop hash itself if it is known,
|
||||
// or nil if it is unknown
|
||||
// - When locators are provided, but none of them are known, hashes starting
|
||||
// after the genesis block will be returned
|
||||
// - When no locators are provided, the stop hash is treated as a request for
|
||||
// that block, so it will either return the stop hash itself if it is known,
|
||||
// or nil if it is unknown
|
||||
// - When locators are provided, but none of them are known, hashes starting
|
||||
// after the genesis block will be returned
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (b *BlockChain) LocateBlocks(locator BlockLocator, hashStop *chainhash.Hash, maxHashes uint32) []chainhash.Hash {
|
||||
|
@ -1694,11 +1638,11 @@ func (b *BlockChain) locateHeaders(locator BlockLocator, hashStop *chainhash.Has
|
|||
//
|
||||
// In addition, there are two special cases:
|
||||
//
|
||||
// - When no locators are provided, the stop hash is treated as a request for
|
||||
// that header, so it will either return the header for the stop hash itself
|
||||
// if it is known, or nil if it is unknown
|
||||
// - When locators are provided, but none of them are known, headers starting
|
||||
// after the genesis block will be returned
|
||||
// - When no locators are provided, the stop hash is treated as a request for
|
||||
// that header, so it will either return the header for the stop hash itself
|
||||
// if it is known, or nil if it is unknown
|
||||
// - When locators are provided, but none of them are known, headers starting
|
||||
// after the genesis block will be returned
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (b *BlockChain) LocateHeaders(locator BlockLocator, hashStop *chainhash.Hash) []wire.BlockHeader {
|
||||
|
@ -1776,7 +1720,7 @@ func (b *BlockChain) reconsiderBlock(hash *chainhash.Hash) error {
|
|||
}
|
||||
|
||||
// No need to reconsider, it is already valid.
|
||||
if node.status.KnownValid() && !node.status.KnownInvalid() { // second clause works around old bug
|
||||
if node.status.KnownValid() {
|
||||
err := fmt.Errorf("block %s is already valid", hash)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -36,13 +36,11 @@ func fastLog2Floor(n uint32) uint8 {
|
|||
// for comparing chains.
|
||||
//
|
||||
// For example, assume a block chain with a side chain as depicted below:
|
||||
//
|
||||
// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8
|
||||
// \-> 4a -> 5a -> 6a
|
||||
// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8
|
||||
// \-> 4a -> 5a -> 6a
|
||||
//
|
||||
// The chain view for the branch ending in 6a consists of:
|
||||
//
|
||||
// genesis -> 1 -> 2 -> 3 -> 4a -> 5a -> 6a
|
||||
// genesis -> 1 -> 2 -> 3 -> 4a -> 5a -> 6a
|
||||
type chainView struct {
|
||||
mtx sync.Mutex
|
||||
nodes []*blockNode
|
||||
|
@ -260,14 +258,12 @@ func (c *chainView) next(node *blockNode) *blockNode {
|
|||
// view.
|
||||
//
|
||||
// For example, assume a block chain with a side chain as depicted below:
|
||||
//
|
||||
// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8
|
||||
// \-> 4a -> 5a -> 6a
|
||||
// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8
|
||||
// \-> 4a -> 5a -> 6a
|
||||
//
|
||||
// Further, assume the view is for the longer chain depicted above. That is to
|
||||
// say it consists of:
|
||||
//
|
||||
// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8
|
||||
// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8
|
||||
//
|
||||
// Invoking this function with block node 5 would return block node 6 while
|
||||
// invoking it with block node 5a would return nil since that node is not part
|
||||
|
@ -325,14 +321,12 @@ func (c *chainView) findFork(node *blockNode) *blockNode {
|
|||
// the chain view. It will return nil if there is no common block.
|
||||
//
|
||||
// For example, assume a block chain with a side chain as depicted below:
|
||||
//
|
||||
// genesis -> 1 -> 2 -> ... -> 5 -> 6 -> 7 -> 8
|
||||
// \-> 6a -> 7a
|
||||
// genesis -> 1 -> 2 -> ... -> 5 -> 6 -> 7 -> 8
|
||||
// \-> 6a -> 7a
|
||||
//
|
||||
// Further, assume the view is for the longer chain depicted above. That is to
|
||||
// say it consists of:
|
||||
//
|
||||
// genesis -> 1 -> 2 -> ... -> 5 -> 6 -> 7 -> 8.
|
||||
// genesis -> 1 -> 2 -> ... -> 5 -> 6 -> 7 -> 8.
|
||||
//
|
||||
// Invoking this function with block node 7a would return block node 5 while
|
||||
// invoking it with block node 7 would return itself since it is already part of
|
||||
|
|
|
@ -185,14 +185,14 @@ func isNonstandardTransaction(tx *btcutil.Tx) bool {
|
|||
// checkpoint candidate.
|
||||
//
|
||||
// The factors used to determine a good checkpoint are:
|
||||
// - The block must be in the main chain
|
||||
// - The block must be at least 'CheckpointConfirmations' blocks prior to the
|
||||
// current end of the main chain
|
||||
// - The timestamps for the blocks before and after the checkpoint must have
|
||||
// timestamps which are also before and after the checkpoint, respectively
|
||||
// (due to the median time allowance this is not always the case)
|
||||
// - The block must not contain any strange transaction such as those with
|
||||
// nonstandard scripts
|
||||
// - The block must be in the main chain
|
||||
// - The block must be at least 'CheckpointConfirmations' blocks prior to the
|
||||
// current end of the main chain
|
||||
// - The timestamps for the blocks before and after the checkpoint must have
|
||||
// timestamps which are also before and after the checkpoint, respectively
|
||||
// (due to the median time allowance this is not always the case)
|
||||
// - The block must not contain any strange transaction such as those with
|
||||
// nonstandard scripts
|
||||
//
|
||||
// The intent is that candidates are reviewed by a developer to make the final
|
||||
// decision and then manually added to the list of checkpoints for a network.
|
||||
|
|
|
@ -3,17 +3,21 @@ package blockchain
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||
"github.com/lbryio/lbcd/txscript"
|
||||
"github.com/lbryio/lbcd/wire"
|
||||
btcutil "github.com/lbryio/lbcutil"
|
||||
|
||||
"github.com/lbryio/lbcd/claimtrie"
|
||||
"github.com/lbryio/lbcd/claimtrie/change"
|
||||
"github.com/lbryio/lbcd/claimtrie/merkletrie"
|
||||
"github.com/lbryio/lbcd/claimtrie/node"
|
||||
"github.com/lbryio/lbcd/claimtrie/normalization"
|
||||
"github.com/lbryio/lbcd/claimtrie/param"
|
||||
)
|
||||
|
||||
func (b *BlockChain) SetClaimtrieHeader(block *btcutil.Block, view *UtxoViewpoint) error {
|
||||
|
@ -44,7 +48,7 @@ func (b *BlockChain) ParseClaimScripts(block *btcutil.Block, bn *blockNode, view
|
|||
}
|
||||
}
|
||||
|
||||
err := b.claimTrie.AppendBlock(bn == nil)
|
||||
err := b.claimTrie.AppendBlock()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "in append block")
|
||||
}
|
||||
|
@ -181,3 +185,68 @@ func (b *BlockChain) GetClaimsForName(height int32, name string) (string, *node.
|
|||
n.SortClaimsByBid()
|
||||
return string(normalizedName), n, nil
|
||||
}
|
||||
|
||||
func (b *BlockChain) GetProofForName(name, id string, bid, seq int) (chainhash.Hash, int32, *node.Claim, int32, int32, string, []merkletrie.HashSidePair, error) {
|
||||
// results: block hash, height, claim, bid, takeover, name, pairs, err
|
||||
|
||||
b.chainLock.RLock()
|
||||
defer b.chainLock.RUnlock()
|
||||
|
||||
tip := b.bestChain.Tip()
|
||||
|
||||
normalizedName := normalization.NormalizeIfNecessary([]byte(name), tip.height)
|
||||
|
||||
if tip.height < param.ActiveParams.GrandForkHeight {
|
||||
err := errors.Errorf("Unable to generate proofs for claims before height %d",
|
||||
param.ActiveParams.GrandForkHeight)
|
||||
return tip.hash, tip.height, nil, 0, 0, string(normalizedName), nil, err
|
||||
}
|
||||
|
||||
n, err := b.claimTrie.NodeAt(tip.height, normalizedName)
|
||||
if n == nil && err == nil {
|
||||
err = errors.Errorf("Unable to locate a claim with name %s at height %d", normalizedName, tip.height)
|
||||
}
|
||||
if err != nil {
|
||||
return tip.hash, tip.height, nil, 0, 0, string(normalizedName), nil, err
|
||||
}
|
||||
|
||||
// now find the desired claim
|
||||
n.SortClaimsByBid()
|
||||
var claim *node.Claim
|
||||
for i, c := range n.Claims {
|
||||
if c.Status != node.Activated {
|
||||
continue
|
||||
}
|
||||
if bid >= 0 && i == bid {
|
||||
claim = c
|
||||
bid = i
|
||||
break
|
||||
}
|
||||
if seq >= 0 && int(c.Sequence) == seq {
|
||||
claim = c
|
||||
bid = i
|
||||
break
|
||||
}
|
||||
if len(id) > 0 && strings.HasPrefix(c.ClaimID.String(), id) {
|
||||
claim = c
|
||||
bid = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if claim == nil {
|
||||
if bid >= 0 {
|
||||
err = errors.Errorf("Unable to locate a claim named %s with bid %d at height %d", normalizedName, bid, tip.height)
|
||||
}
|
||||
if seq >= 0 {
|
||||
err = errors.Errorf("Unable to locate a claim named %s with sequence %d at height %d", normalizedName, seq, tip.height)
|
||||
}
|
||||
if len(id) > 0 {
|
||||
err = errors.Errorf("Unable to locate a claim named %s with ID %s at height %d", normalizedName, id, tip.height)
|
||||
}
|
||||
return tip.hash, tip.height, nil, 0, 0, string(normalizedName), nil, err
|
||||
}
|
||||
|
||||
pairs := b.claimTrie.MerklePath(normalizedName, n, bid)
|
||||
|
||||
return tip.hash, tip.height, claim, int32(bid), n.TakenOverAt, string(normalizedName), pairs, nil
|
||||
}
|
||||
|
|
|
@ -42,21 +42,18 @@ func HashToBig(hash *chainhash.Hash) *big.Int {
|
|||
// Like IEEE754 floating point, there are three basic components: the sign,
|
||||
// the exponent, and the mantissa. They are broken out as follows:
|
||||
//
|
||||
// - the most significant 8 bits represent the unsigned base 256 exponent
|
||||
// * the most significant 8 bits represent the unsigned base 256 exponent
|
||||
// * bit 23 (the 24th bit) represents the sign bit
|
||||
// * the least significant 23 bits represent the mantissa
|
||||
//
|
||||
// - bit 23 (the 24th bit) represents the sign bit
|
||||
//
|
||||
// - the least significant 23 bits represent the mantissa
|
||||
//
|
||||
// -------------------------------------------------
|
||||
// | Exponent | Sign | Mantissa |
|
||||
// -------------------------------------------------
|
||||
// | 8 bits [31-24] | 1 bit [23] | 23 bits [22-00] |
|
||||
// -------------------------------------------------
|
||||
// -------------------------------------------------
|
||||
// | Exponent | Sign | Mantissa |
|
||||
// -------------------------------------------------
|
||||
// | 8 bits [31-24] | 1 bit [23] | 23 bits [22-00] |
|
||||
// -------------------------------------------------
|
||||
//
|
||||
// The formula to calculate N is:
|
||||
//
|
||||
// N = (-1^sign) * mantissa * 256^(exponent-3)
|
||||
// N = (-1^sign) * mantissa * 256^(exponent-3)
|
||||
//
|
||||
// This compact form is only used in bitcoin to encode unsigned 256-bit numbers
|
||||
// which represent difficulty targets, thus there really is not a need for a
|
||||
|
|
|
@ -26,42 +26,42 @@ caller a high level of flexibility in how they want to react to certain events
|
|||
such as orphan blocks which need their parents requested and newly connected
|
||||
main chain blocks which might result in wallet updates.
|
||||
|
||||
# Bitcoin Chain Processing Overview
|
||||
Bitcoin Chain Processing Overview
|
||||
|
||||
Before a block is allowed into the block chain, it must go through an intensive
|
||||
series of validation rules. The following list serves as a general outline of
|
||||
those rules to provide some intuition into what is going on under the hood, but
|
||||
is by no means exhaustive:
|
||||
|
||||
- Reject duplicate blocks
|
||||
- Perform a series of sanity checks on the block and its transactions such as
|
||||
verifying proof of work, timestamps, number and character of transactions,
|
||||
transaction amounts, script complexity, and merkle root calculations
|
||||
- Compare the block against predetermined checkpoints for expected timestamps
|
||||
and difficulty based on elapsed time since the checkpoint
|
||||
- Save the most recent orphan blocks for a limited time in case their parent
|
||||
blocks become available
|
||||
- Stop processing if the block is an orphan as the rest of the processing
|
||||
depends on the block's position within the block chain
|
||||
- Perform a series of more thorough checks that depend on the block's position
|
||||
within the block chain such as verifying block difficulties adhere to
|
||||
difficulty retarget rules, timestamps are after the median of the last
|
||||
several blocks, all transactions are finalized, checkpoint blocks match, and
|
||||
block versions are in line with the previous blocks
|
||||
- Determine how the block fits into the chain and perform different actions
|
||||
accordingly in order to ensure any side chains which have higher difficulty
|
||||
than the main chain become the new main chain
|
||||
- When a block is being connected to the main chain (either through
|
||||
reorganization of a side chain to the main chain or just extending the
|
||||
main chain), perform further checks on the block's transactions such as
|
||||
verifying transaction duplicates, script complexity for the combination of
|
||||
connected scripts, coinbase maturity, double spends, and connected
|
||||
transaction values
|
||||
- Run the transaction scripts to verify the spender is allowed to spend the
|
||||
coins
|
||||
- Insert the block into the block database
|
||||
- Reject duplicate blocks
|
||||
- Perform a series of sanity checks on the block and its transactions such as
|
||||
verifying proof of work, timestamps, number and character of transactions,
|
||||
transaction amounts, script complexity, and merkle root calculations
|
||||
- Compare the block against predetermined checkpoints for expected timestamps
|
||||
and difficulty based on elapsed time since the checkpoint
|
||||
- Save the most recent orphan blocks for a limited time in case their parent
|
||||
blocks become available
|
||||
- Stop processing if the block is an orphan as the rest of the processing
|
||||
depends on the block's position within the block chain
|
||||
- Perform a series of more thorough checks that depend on the block's position
|
||||
within the block chain such as verifying block difficulties adhere to
|
||||
difficulty retarget rules, timestamps are after the median of the last
|
||||
several blocks, all transactions are finalized, checkpoint blocks match, and
|
||||
block versions are in line with the previous blocks
|
||||
- Determine how the block fits into the chain and perform different actions
|
||||
accordingly in order to ensure any side chains which have higher difficulty
|
||||
than the main chain become the new main chain
|
||||
- When a block is being connected to the main chain (either through
|
||||
reorganization of a side chain to the main chain or just extending the
|
||||
main chain), perform further checks on the block's transactions such as
|
||||
verifying transaction duplicates, script complexity for the combination of
|
||||
connected scripts, coinbase maturity, double spends, and connected
|
||||
transaction values
|
||||
- Run the transaction scripts to verify the spender is allowed to spend the
|
||||
coins
|
||||
- Insert the block into the block database
|
||||
|
||||
# Errors
|
||||
Errors
|
||||
|
||||
Errors returned by this package are either the raw errors provided by underlying
|
||||
calls or of type blockchain.RuleError. This allows the caller to differentiate
|
||||
|
@ -70,12 +70,12 @@ violations through type assertions. In addition, callers can programmatically
|
|||
determine the specific rule violation by examining the ErrorCode field of the
|
||||
type asserted blockchain.RuleError.
|
||||
|
||||
# Bitcoin Improvement Proposals
|
||||
Bitcoin Improvement Proposals
|
||||
|
||||
This package includes spec changes outlined by the following BIPs:
|
||||
|
||||
BIP0016 (https://en.bitcoin.it/wiki/BIP_0016)
|
||||
BIP0030 (https://en.bitcoin.it/wiki/BIP_0030)
|
||||
BIP0034 (https://en.bitcoin.it/wiki/BIP_0034)
|
||||
BIP0016 (https://en.bitcoin.it/wiki/BIP_0016)
|
||||
BIP0030 (https://en.bitcoin.it/wiki/BIP_0030)
|
||||
BIP0034 (https://en.bitcoin.it/wiki/BIP_0034)
|
||||
*/
|
||||
package blockchain
|
||||
|
|
|
@ -464,9 +464,9 @@ func createSpendTxForTx(tx *wire.MsgTx, fee btcutil.Amount) *wire.MsgTx {
|
|||
// - A coinbase that pays the required subsidy to an OP_TRUE script
|
||||
// - When a spendable output is provided:
|
||||
// - A transaction that spends from the provided output the following outputs:
|
||||
// - One that pays the inputs amount minus 1 atom to an OP_TRUE script
|
||||
// - One that contains an OP_RETURN output with a random uint64 in order to
|
||||
// ensure the transaction has a unique hash
|
||||
// - One that pays the inputs amount minus 1 atom to an OP_TRUE script
|
||||
// - One that contains an OP_RETURN output with a random uint64 in order to
|
||||
// ensure the transaction has a unique hash
|
||||
//
|
||||
// Additionally, if one or more munge functions are specified, they will be
|
||||
// invoked with the block prior to solving it. This provides callers with the
|
||||
|
|
|
@ -27,9 +27,8 @@ type blockProgressLogger struct {
|
|||
|
||||
// newBlockProgressLogger returns a new block progress logger.
|
||||
// The progress message is templated as follows:
|
||||
//
|
||||
// {progressAction} {numProcessed} {blocks|block} in the last {timePeriod}
|
||||
// ({numTxs}, height {lastBlockHeight}, {lastBlockTimeStamp})
|
||||
// {progressAction} {numProcessed} {blocks|block} in the last {timePeriod}
|
||||
// ({numTxs}, height {lastBlockHeight}, {lastBlockTimeStamp})
|
||||
func newBlockProgressLogger(progressMessage string, logger btclog.Logger) *blockProgressLogger {
|
||||
return &blockProgressLogger{
|
||||
lastBlockLogTime: time.Now(),
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
|
||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||
"github.com/lbryio/lbcd/txscript"
|
||||
"github.com/lbryio/lbcd/wire"
|
||||
btcutil "github.com/lbryio/lbcutil"
|
||||
)
|
||||
|
||||
|
@ -87,7 +86,7 @@ func HashMerkleBranches(left *chainhash.Hash, right *chainhash.Hash) *chainhash.
|
|||
//
|
||||
// The above stored as a linear array is as follows:
|
||||
//
|
||||
// [h1 h2 h3 h4 h12 h34 root]
|
||||
// [h1 h2 h3 h4 h12 h34 root]
|
||||
//
|
||||
// As the above shows, the merkle root is always the last element in the array.
|
||||
//
|
||||
|
@ -228,20 +227,6 @@ func ValidateWitnessCommitment(blk *btcutil.Block) error {
|
|||
// coinbase transaction MUST have exactly one witness element within
|
||||
// its witness data and that element must be exactly
|
||||
// CoinbaseWitnessDataLen bytes.
|
||||
//
|
||||
// Some popular pool software, for example yiimp, uses pre-BIP0141
|
||||
// coinbase struture. In this case, we don't just accept it, but also
|
||||
// turn it into post-BIP0141 format.
|
||||
if len(coinbaseTx.MsgTx().TxIn[0].Witness) == 0 {
|
||||
log.Infof("pre-BIP0141 coinbase transaction detected. Height: %d", blk.Height())
|
||||
|
||||
var witnessNonce [CoinbaseWitnessDataLen]byte
|
||||
coinbaseTx.MsgTx().TxIn[0].Witness = wire.TxWitness{witnessNonce[:]}
|
||||
blk.MsgBlock().Transactions[0].TxIn[0].Witness = wire.TxWitness{witnessNonce[:]}
|
||||
|
||||
// Clear cached serialized block.
|
||||
blk.SetBytes(nil)
|
||||
}
|
||||
coinbaseWitness := coinbaseTx.MsgTx().TxIn[0].Witness
|
||||
if len(coinbaseWitness) != 1 {
|
||||
str := fmt.Sprintf("the coinbase transaction has %d items in "+
|
||||
|
|
|
@ -50,9 +50,9 @@ func (n NotificationType) String() string {
|
|||
// Notification defines notification that is sent to the caller via the callback
|
||||
// function provided during the call to New and consists of a notification type
|
||||
// as well as associated data that depends on the type as follows:
|
||||
// - NTBlockAccepted: *btcutil.Block
|
||||
// - NTBlockConnected: *btcutil.Block
|
||||
// - NTBlockDisconnected: *btcutil.Block
|
||||
// - NTBlockAccepted: *btcutil.Block
|
||||
// - NTBlockConnected: *btcutil.Block
|
||||
// - NTBlockDisconnected: *btcutil.Block
|
||||
type Notification struct {
|
||||
Type NotificationType
|
||||
Data interface{}
|
||||
|
|
|
@ -232,25 +232,24 @@ func determineMainChainBlocks(blocksMap map[chainhash.Hash]*blockChainContext, t
|
|||
//
|
||||
// The legacy format is as follows:
|
||||
//
|
||||
// <version><height><header code><unspentness bitmap>[<compressed txouts>,...]
|
||||
// <version><height><header code><unspentness bitmap>[<compressed txouts>,...]
|
||||
//
|
||||
// Field Type Size
|
||||
// version VLQ variable
|
||||
// block height VLQ variable
|
||||
// header code VLQ variable
|
||||
// unspentness bitmap []byte variable
|
||||
// compressed txouts
|
||||
// compressed amount VLQ variable
|
||||
// compressed script []byte variable
|
||||
// Field Type Size
|
||||
// version VLQ variable
|
||||
// block height VLQ variable
|
||||
// header code VLQ variable
|
||||
// unspentness bitmap []byte variable
|
||||
// compressed txouts
|
||||
// compressed amount VLQ variable
|
||||
// compressed script []byte variable
|
||||
//
|
||||
// The serialized header code format is:
|
||||
//
|
||||
// bit 0 - containing transaction is a coinbase
|
||||
// bit 1 - output zero is unspent
|
||||
// bit 2 - output one is unspent
|
||||
// bits 3-x - number of bytes in unspentness bitmap. When both bits 1 and 2
|
||||
// are unset, it encodes N-1 since there must be at least one unspent
|
||||
// output.
|
||||
// bit 0 - containing transaction is a coinbase
|
||||
// bit 1 - output zero is unspent
|
||||
// bit 2 - output one is unspent
|
||||
// bits 3-x - number of bytes in unspentness bitmap. When both bits 1 and 2
|
||||
// are unset, it encodes N-1 since there must be at least one unspent
|
||||
// output.
|
||||
//
|
||||
// The rationale for the header code scheme is as follows:
|
||||
// - Transactions which only pay to a single output and a change output are
|
||||
|
@ -270,65 +269,65 @@ func determineMainChainBlocks(blocksMap map[chainhash.Hash]*blockChainContext, t
|
|||
// From tx in main blockchain:
|
||||
// Blk 1, 0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098
|
||||
//
|
||||
// 010103320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52
|
||||
// <><><><------------------------------------------------------------------>
|
||||
// | | \--------\ |
|
||||
// | height | compressed txout 0
|
||||
// version header code
|
||||
// 010103320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52
|
||||
// <><><><------------------------------------------------------------------>
|
||||
// | | \--------\ |
|
||||
// | height | compressed txout 0
|
||||
// version header code
|
||||
//
|
||||
// - version: 1
|
||||
// - height: 1
|
||||
// - header code: 0x03 (coinbase, output zero unspent, 0 bytes of unspentness)
|
||||
// - unspentness: Nothing since it is zero bytes
|
||||
// - compressed txout 0:
|
||||
// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC)
|
||||
// - 0x04: special script type pay-to-pubkey
|
||||
// - 0x96...52: x-coordinate of the pubkey
|
||||
// - version: 1
|
||||
// - height: 1
|
||||
// - header code: 0x03 (coinbase, output zero unspent, 0 bytes of unspentness)
|
||||
// - unspentness: Nothing since it is zero bytes
|
||||
// - compressed txout 0:
|
||||
// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC)
|
||||
// - 0x04: special script type pay-to-pubkey
|
||||
// - 0x96...52: x-coordinate of the pubkey
|
||||
//
|
||||
// Example 2:
|
||||
// From tx in main blockchain:
|
||||
// Blk 113931, 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f
|
||||
//
|
||||
// 0185f90b0a011200e2ccd6ec7c6e2e581349c77e067385fa8236bf8a800900b8025be1b3efc63b0ad48e7f9f10e87544528d58
|
||||
// <><----><><><------------------------------------------><-------------------------------------------->
|
||||
// | | | \-------------------\ | |
|
||||
// version | \--------\ unspentness | compressed txout 2
|
||||
// height header code compressed txout 0
|
||||
// 0185f90b0a011200e2ccd6ec7c6e2e581349c77e067385fa8236bf8a800900b8025be1b3efc63b0ad48e7f9f10e87544528d58
|
||||
// <><----><><><------------------------------------------><-------------------------------------------->
|
||||
// | | | \-------------------\ | |
|
||||
// version | \--------\ unspentness | compressed txout 2
|
||||
// height header code compressed txout 0
|
||||
//
|
||||
// - version: 1
|
||||
// - height: 113931
|
||||
// - header code: 0x0a (output zero unspent, 1 byte in unspentness bitmap)
|
||||
// - unspentness: [0x01] (bit 0 is set, so output 0+2 = 2 is unspent)
|
||||
// NOTE: It's +2 since the first two outputs are encoded in the header code
|
||||
// - compressed txout 0:
|
||||
// - 0x12: VLQ-encoded compressed amount for 20000000 (0.2 BTC)
|
||||
// - 0x00: special script type pay-to-pubkey-hash
|
||||
// - 0xe2...8a: pubkey hash
|
||||
// - compressed txout 2:
|
||||
// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 BTC)
|
||||
// - 0x00: special script type pay-to-pubkey-hash
|
||||
// - 0xb8...58: pubkey hash
|
||||
// - version: 1
|
||||
// - height: 113931
|
||||
// - header code: 0x0a (output zero unspent, 1 byte in unspentness bitmap)
|
||||
// - unspentness: [0x01] (bit 0 is set, so output 0+2 = 2 is unspent)
|
||||
// NOTE: It's +2 since the first two outputs are encoded in the header code
|
||||
// - compressed txout 0:
|
||||
// - 0x12: VLQ-encoded compressed amount for 20000000 (0.2 BTC)
|
||||
// - 0x00: special script type pay-to-pubkey-hash
|
||||
// - 0xe2...8a: pubkey hash
|
||||
// - compressed txout 2:
|
||||
// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 BTC)
|
||||
// - 0x00: special script type pay-to-pubkey-hash
|
||||
// - 0xb8...58: pubkey hash
|
||||
//
|
||||
// Example 3:
|
||||
// From tx in main blockchain:
|
||||
// Blk 338156, 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620
|
||||
//
|
||||
// 0193d06c100000108ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6
|
||||
// <><----><><----><-------------------------------------------------->
|
||||
// | | | \-----------------\ |
|
||||
// version | \--------\ unspentness |
|
||||
// height header code compressed txout 22
|
||||
// 0193d06c100000108ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6
|
||||
// <><----><><----><-------------------------------------------------->
|
||||
// | | | \-----------------\ |
|
||||
// version | \--------\ unspentness |
|
||||
// height header code compressed txout 22
|
||||
//
|
||||
// - version: 1
|
||||
// - height: 338156
|
||||
// - header code: 0x10 (2+1 = 3 bytes in unspentness bitmap)
|
||||
// NOTE: It's +1 since neither bit 1 nor 2 are set, so N-1 is encoded.
|
||||
// - unspentness: [0x00 0x00 0x10] (bit 20 is set, so output 20+2 = 22 is unspent)
|
||||
// NOTE: It's +2 since the first two outputs are encoded in the header code
|
||||
// - compressed txout 22:
|
||||
// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 BTC)
|
||||
// - 0x01: special script type pay-to-script-hash
|
||||
// - 0x1d...e6: script hash
|
||||
// - version: 1
|
||||
// - height: 338156
|
||||
// - header code: 0x10 (2+1 = 3 bytes in unspentness bitmap)
|
||||
// NOTE: It's +1 since neither bit 1 nor 2 are set, so N-1 is encoded.
|
||||
// - unspentness: [0x00 0x00 0x10] (bit 20 is set, so output 20+2 = 22 is unspent)
|
||||
// NOTE: It's +2 since the first two outputs are encoded in the header code
|
||||
// - compressed txout 22:
|
||||
// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 BTC)
|
||||
// - 0x01: special script type pay-to-script-hash
|
||||
// - 0x1d...e6: script hash
|
||||
func deserializeUtxoEntryV0(serialized []byte) (map[uint32]*UtxoEntry, error) {
|
||||
// Deserialize the version.
|
||||
//
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
"github.com/lbryio/lbcd/chaincfg"
|
||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||
"github.com/lbryio/lbcd/claimtrie/param"
|
||||
"github.com/lbryio/lbcd/txscript"
|
||||
"github.com/lbryio/lbcd/wire"
|
||||
btcutil "github.com/lbryio/lbcutil"
|
||||
|
@ -334,8 +335,8 @@ func CheckTransactionSanity(tx *btcutil.Tx, enforceSoftFork bool) error {
|
|||
// target difficulty as claimed.
|
||||
//
|
||||
// The flags modify the behavior of this function as follows:
|
||||
// - BFNoPoWCheck: The check to ensure the block hash is less than the target
|
||||
// difficulty is not performed.
|
||||
// - BFNoPoWCheck: The check to ensure the block hash is less than the target
|
||||
// difficulty is not performed.
|
||||
func checkProofOfWork(header *wire.BlockHeader, powLimit *big.Int, flags BehaviorFlags) error {
|
||||
// The target difficulty must be larger than zero.
|
||||
target := CompactToBig(header.Bits)
|
||||
|
@ -547,7 +548,7 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median
|
|||
// Do some preliminary checks on each transaction to ensure they are
|
||||
// sane before continuing.
|
||||
for _, tx := range transactions {
|
||||
err := CheckTransactionSanity(tx, false)
|
||||
err := CheckTransactionSanity(tx, block.Height() >= param.ActiveParams.GrandForkHeight)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -669,8 +670,8 @@ func checkSerializedHeight(coinbaseTx *btcutil.Tx, wantHeight int32) error {
|
|||
// which depend on its position within the block chain.
|
||||
//
|
||||
// The flags modify the behavior of this function as follows:
|
||||
// - BFFastAdd: All checks except those involving comparing the header against
|
||||
// the checkpoints are not performed.
|
||||
// - BFFastAdd: All checks except those involving comparing the header against
|
||||
// the checkpoints are not performed.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode *blockNode, flags BehaviorFlags) error {
|
||||
|
@ -748,8 +749,8 @@ func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode
|
|||
// on its position within the block chain.
|
||||
//
|
||||
// The flags modify the behavior of this function as follows:
|
||||
// - BFFastAdd: The transaction are not checked to see if they are finalized
|
||||
// and the somewhat expensive BIP0034 validation is not performed.
|
||||
// - BFFastAdd: The transaction are not checked to see if they are finalized
|
||||
// and the somewhat expensive BIP0034 validation is not performed.
|
||||
//
|
||||
// The flags are also passed to checkBlockHeaderContext. See its documentation
|
||||
// for how the flags modify its behavior.
|
||||
|
|
|
@ -527,7 +527,7 @@ type baseMultTest struct {
|
|||
x, y string
|
||||
}
|
||||
|
||||
// TODO: add more test vectors
|
||||
//TODO: add more test vectors
|
||||
var s256BaseMultTests = []baseMultTest{
|
||||
{
|
||||
"AA5E28D6A97A2479A65527F7290311A3624D4CC0FA1578598EE3C2613BF99522",
|
||||
|
@ -556,7 +556,7 @@ var s256BaseMultTests = []baseMultTest{
|
|||
},
|
||||
}
|
||||
|
||||
// TODO: test different curves as well?
|
||||
//TODO: test different curves as well?
|
||||
func TestBaseMult(t *testing.T) {
|
||||
s256 := S256()
|
||||
for i, e := range s256BaseMultTests {
|
||||
|
|
|
@ -125,30 +125,27 @@ var (
|
|||
// the arithmetic needed for elliptic curve operations.
|
||||
//
|
||||
// The following depicts the internal representation:
|
||||
//
|
||||
// -----------------------------------------------------------------
|
||||
// | n[9] | n[8] | ... | n[0] |
|
||||
// | 32 bits available | 32 bits available | ... | 32 bits available |
|
||||
// | 22 bits for value | 26 bits for value | ... | 26 bits for value |
|
||||
// | 10 bits overflow | 6 bits overflow | ... | 6 bits overflow |
|
||||
// | Mult: 2^(26*9) | Mult: 2^(26*8) | ... | Mult: 2^(26*0) |
|
||||
// -----------------------------------------------------------------
|
||||
// -----------------------------------------------------------------
|
||||
// | n[9] | n[8] | ... | n[0] |
|
||||
// | 32 bits available | 32 bits available | ... | 32 bits available |
|
||||
// | 22 bits for value | 26 bits for value | ... | 26 bits for value |
|
||||
// | 10 bits overflow | 6 bits overflow | ... | 6 bits overflow |
|
||||
// | Mult: 2^(26*9) | Mult: 2^(26*8) | ... | Mult: 2^(26*0) |
|
||||
// -----------------------------------------------------------------
|
||||
//
|
||||
// For example, consider the number 2^49 + 1. It would be represented as:
|
||||
//
|
||||
// n[0] = 1
|
||||
// n[1] = 2^23
|
||||
// n[2..9] = 0
|
||||
// n[0] = 1
|
||||
// n[1] = 2^23
|
||||
// n[2..9] = 0
|
||||
//
|
||||
// The full 256-bit value is then calculated by looping i from 9..0 and
|
||||
// doing sum(n[i] * 2^(26i)) like so:
|
||||
//
|
||||
// n[9] * 2^(26*9) = 0 * 2^234 = 0
|
||||
// n[8] * 2^(26*8) = 0 * 2^208 = 0
|
||||
// ...
|
||||
// n[1] * 2^(26*1) = 2^23 * 2^26 = 2^49
|
||||
// n[0] * 2^(26*0) = 1 * 2^0 = 1
|
||||
// Sum: 0 + 0 + ... + 2^49 + 1 = 2^49 + 1
|
||||
// n[9] * 2^(26*9) = 0 * 2^234 = 0
|
||||
// n[8] * 2^(26*8) = 0 * 2^208 = 0
|
||||
// ...
|
||||
// n[1] * 2^(26*1) = 2^23 * 2^26 = 2^49
|
||||
// n[0] * 2^(26*0) = 1 * 2^0 = 1
|
||||
// Sum: 0 + 0 + ... + 2^49 + 1 = 2^49 + 1
|
||||
type fieldVal struct {
|
||||
n [10]uint32
|
||||
}
|
||||
|
|
|
@ -48,15 +48,6 @@ func NewAddNodeCmd(addr string, subCmd AddNodeSubCmd) *AddNodeCmd {
|
|||
}
|
||||
}
|
||||
|
||||
// ClearBannedCmd defines the clearbanned JSON-RPC command.
|
||||
type ClearBannedCmd struct{}
|
||||
|
||||
// NewClearBannedCmd returns a new instance which can be used to issue an clearbanned
|
||||
// JSON-RPC command.
|
||||
func NewClearBannedCmd() *ClearBannedCmd {
|
||||
return &ClearBannedCmd{}
|
||||
}
|
||||
|
||||
// TransactionInput represents the inputs to a transaction. Specifically a
|
||||
// transaction hash and output number pair.
|
||||
type TransactionInput struct {
|
||||
|
@ -67,7 +58,7 @@ type TransactionInput struct {
|
|||
// CreateRawTransactionCmd defines the createrawtransaction JSON-RPC command.
|
||||
type CreateRawTransactionCmd struct {
|
||||
Inputs []TransactionInput
|
||||
Outputs map[string]interface{} `jsonrpcusage:"{\"address\":amount, \"data\":\"hex\", ...}"`
|
||||
Amounts map[string]float64 `jsonrpcusage:"{\"address\":amount,...}"` // In BTC
|
||||
LockTime *int64
|
||||
}
|
||||
|
||||
|
@ -76,7 +67,7 @@ type CreateRawTransactionCmd struct {
|
|||
//
|
||||
// Amounts are in BTC. Passing in nil and the empty slice as inputs is equivalent,
|
||||
// both gets interpreted as the empty slice.
|
||||
func NewCreateRawTransactionCmd(inputs []TransactionInput, outputs map[string]interface{},
|
||||
func NewCreateRawTransactionCmd(inputs []TransactionInput, amounts map[string]float64,
|
||||
lockTime *int64) *CreateRawTransactionCmd {
|
||||
// to make sure we're serializing this to the empty list and not null, we
|
||||
// explicitly initialize the list
|
||||
|
@ -85,7 +76,7 @@ func NewCreateRawTransactionCmd(inputs []TransactionInput, outputs map[string]in
|
|||
}
|
||||
return &CreateRawTransactionCmd{
|
||||
Inputs: inputs,
|
||||
Outputs: outputs,
|
||||
Amounts: amounts,
|
||||
LockTime: lockTime,
|
||||
}
|
||||
}
|
||||
|
@ -659,7 +650,7 @@ func NewGetRawMempoolCmd(verbose *bool) *GetRawMempoolCmd {
|
|||
// Core even though it really should be a bool.
|
||||
type GetRawTransactionCmd struct {
|
||||
Txid string
|
||||
Verbose *bool `jsonrpcdefault:"false"`
|
||||
Verbose *int `jsonrpcdefault:"0"`
|
||||
}
|
||||
|
||||
// NewGetRawTransactionCmd returns a new instance which can be used to issue a
|
||||
|
@ -667,7 +658,7 @@ type GetRawTransactionCmd struct {
|
|||
//
|
||||
// The parameters which are pointers indicate they are optional. Passing nil
|
||||
// for optional parameters will use the default value.
|
||||
func NewGetRawTransactionCmd(txHash string, verbose *bool) *GetRawTransactionCmd {
|
||||
func NewGetRawTransactionCmd(txHash string, verbose *int) *GetRawTransactionCmd {
|
||||
return &GetRawTransactionCmd{
|
||||
Txid: txHash,
|
||||
Verbose: verbose,
|
||||
|
@ -766,15 +757,6 @@ func NewInvalidateBlockCmd(blockHash string) *InvalidateBlockCmd {
|
|||
}
|
||||
}
|
||||
|
||||
// ListBannedCmd defines the listbanned JSON-RPC command.
|
||||
type ListBannedCmd struct{}
|
||||
|
||||
// NewListBannedCmd returns a new instance which can be used to issue a listbanned
|
||||
// JSON-RPC command.
|
||||
func NewListBannedCmd() *ListBannedCmd {
|
||||
return &ListBannedCmd{}
|
||||
}
|
||||
|
||||
// PingCmd defines the ping JSON-RPC command.
|
||||
type PingCmd struct{}
|
||||
|
||||
|
@ -921,39 +903,6 @@ func NewBitcoindSendRawTransactionCmd(hexTx string, maxFeeRate int32) *SendRawTr
|
|||
}
|
||||
}
|
||||
|
||||
// SetBanSubCmd defines the type used in the setban JSON-RPC command for the
|
||||
// sub command field.
|
||||
type SetBanSubCmd string
|
||||
|
||||
const (
|
||||
// SBAdd indicates the specified host should be added as a persistent
|
||||
// peer.
|
||||
SBAdd SetBanSubCmd = "add"
|
||||
|
||||
// SBRemove indicates the specified peer should be removed.
|
||||
SBRemove SetBanSubCmd = "remove"
|
||||
)
|
||||
|
||||
// SetBanCmd defines the setban JSON-RPC command.
|
||||
type SetBanCmd struct {
|
||||
Addr string
|
||||
SubCmd SetBanSubCmd `jsonrpcusage:"\"add|remove\""`
|
||||
BanTime *int `jsonrpcdefault:"0"`
|
||||
Absolute *bool `jsonrpcdefault:"false"`
|
||||
}
|
||||
|
||||
// NewSetBanCmd returns a new instance which can be used to issue an setban
|
||||
// JSON-RPC command.
|
||||
func NewSetBanCmd(addr string, subCmd SetBanSubCmd, banTime *int,
|
||||
absolute *bool) *SetBanCmd {
|
||||
return &SetBanCmd{
|
||||
Addr: addr,
|
||||
SubCmd: subCmd,
|
||||
BanTime: banTime,
|
||||
Absolute: absolute,
|
||||
}
|
||||
}
|
||||
|
||||
// SetGenerateCmd defines the setgenerate JSON-RPC command.
|
||||
type SetGenerateCmd struct {
|
||||
Generate bool
|
||||
|
@ -1131,9 +1080,6 @@ func init() {
|
|||
MustRegisterCmd("getnetworkhashps", (*GetNetworkHashPSCmd)(nil), flags)
|
||||
MustRegisterCmd("getnodeaddresses", (*GetNodeAddressesCmd)(nil), flags)
|
||||
MustRegisterCmd("getpeerinfo", (*GetPeerInfoCmd)(nil), flags)
|
||||
MustRegisterCmd("listbanned", (*ListBannedCmd)(nil), flags)
|
||||
MustRegisterCmd("setban", (*SetBanCmd)(nil), flags)
|
||||
MustRegisterCmd("clearbanned", (*ClearBannedCmd)(nil), flags)
|
||||
MustRegisterCmd("getrawmempool", (*GetRawMempoolCmd)(nil), flags)
|
||||
MustRegisterCmd("getrawtransaction", (*GetRawTransactionCmd)(nil), flags)
|
||||
MustRegisterCmd("gettxout", (*GetTxOutCmd)(nil), flags)
|
||||
|
|
|
@ -52,13 +52,13 @@ func TestChainSvrCmds(t *testing.T) {
|
|||
txInputs := []btcjson.TransactionInput{
|
||||
{Txid: "123", Vout: 1},
|
||||
}
|
||||
txOutputs := map[string]interface{}{"456": .0123}
|
||||
return btcjson.NewCreateRawTransactionCmd(txInputs, txOutputs, nil)
|
||||
amounts := map[string]float64{"456": .0123}
|
||||
return btcjson.NewCreateRawTransactionCmd(txInputs, amounts, nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"createrawtransaction","params":[[{"txid":"123","vout":1}],{"456":0.0123}],"id":1}`,
|
||||
unmarshalled: &btcjson.CreateRawTransactionCmd{
|
||||
Inputs: []btcjson.TransactionInput{{Txid: "123", Vout: 1}},
|
||||
Outputs: map[string]interface{}{"456": .0123},
|
||||
Amounts: map[string]float64{"456": .0123},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -67,13 +67,13 @@ func TestChainSvrCmds(t *testing.T) {
|
|||
return btcjson.NewCmd("createrawtransaction", `[]`, `{"456":0.0123}`)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
txOutputs := map[string]interface{}{"456": .0123}
|
||||
return btcjson.NewCreateRawTransactionCmd(nil, txOutputs, nil)
|
||||
amounts := map[string]float64{"456": .0123}
|
||||
return btcjson.NewCreateRawTransactionCmd(nil, amounts, nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"createrawtransaction","params":[[],{"456":0.0123}],"id":1}`,
|
||||
unmarshalled: &btcjson.CreateRawTransactionCmd{
|
||||
Inputs: []btcjson.TransactionInput{},
|
||||
Outputs: map[string]interface{}{"456": .0123},
|
||||
Amounts: map[string]float64{"456": .0123},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -86,35 +86,16 @@ func TestChainSvrCmds(t *testing.T) {
|
|||
txInputs := []btcjson.TransactionInput{
|
||||
{Txid: "123", Vout: 1},
|
||||
}
|
||||
txOutputs := map[string]interface{}{"456": .0123}
|
||||
return btcjson.NewCreateRawTransactionCmd(txInputs, txOutputs, btcjson.Int64(12312333333))
|
||||
amounts := map[string]float64{"456": .0123}
|
||||
return btcjson.NewCreateRawTransactionCmd(txInputs, amounts, btcjson.Int64(12312333333))
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"createrawtransaction","params":[[{"txid":"123","vout":1}],{"456":0.0123},12312333333],"id":1}`,
|
||||
unmarshalled: &btcjson.CreateRawTransactionCmd{
|
||||
Inputs: []btcjson.TransactionInput{{Txid: "123", Vout: 1}},
|
||||
Outputs: map[string]interface{}{"456": .0123},
|
||||
Amounts: map[string]float64{"456": .0123},
|
||||
LockTime: btcjson.Int64(12312333333),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "createrawtransaction with data",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("createrawtransaction", `[{"txid":"123","vout":1}]`,
|
||||
`{"data":"6a134920616d204672616374616c456e6372797074"}`)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
txInputs := []btcjson.TransactionInput{
|
||||
{Txid: "123", Vout: 1},
|
||||
}
|
||||
txOutputs := map[string]interface{}{"data": "6a134920616d204672616374616c456e6372797074"}
|
||||
return btcjson.NewCreateRawTransactionCmd(txInputs, txOutputs, nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"createrawtransaction","params":[[{"txid":"123","vout":1}],{"data":"6a134920616d204672616374616c456e6372797074"}],"id":1}`,
|
||||
unmarshalled: &btcjson.CreateRawTransactionCmd{
|
||||
Inputs: []btcjson.TransactionInput{{Txid: "123", Vout: 1}},
|
||||
Outputs: map[string]interface{}{"data": "6a134920616d204672616374616c456e6372797074"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fundrawtransaction - empty opts",
|
||||
newCmd: func() (i interface{}, e error) {
|
||||
|
@ -891,21 +872,21 @@ func TestChainSvrCmds(t *testing.T) {
|
|||
marshalled: `{"jsonrpc":"1.0","method":"getrawtransaction","params":["123"],"id":1}`,
|
||||
unmarshalled: &btcjson.GetRawTransactionCmd{
|
||||
Txid: "123",
|
||||
Verbose: btcjson.Bool(false),
|
||||
Verbose: btcjson.Int(0),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "getrawtransaction optional",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("getrawtransaction", "123", true)
|
||||
return btcjson.NewCmd("getrawtransaction", "123", 1)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetRawTransactionCmd("123", btcjson.Bool(true))
|
||||
return btcjson.NewGetRawTransactionCmd("123", btcjson.Int(1))
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getrawtransaction","params":["123",true],"id":1}`,
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getrawtransaction","params":["123",1],"id":1}`,
|
||||
unmarshalled: &btcjson.GetRawTransactionCmd{
|
||||
Txid: "123",
|
||||
Verbose: btcjson.Bool(true),
|
||||
Verbose: btcjson.Int(1),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
|
@ -35,37 +35,35 @@ type GetBlockHeaderVerboseResult struct {
|
|||
}
|
||||
|
||||
// GetBlockStatsResult models the data from the getblockstats command.
|
||||
// Pointers are used instead of values to allow for optional fields.
|
||||
type GetBlockStatsResult struct {
|
||||
AverageFee *int64 `json:"avgfee,omitempty"`
|
||||
AverageFeeRate *int64 `json:"avgfeerate,omitempty"`
|
||||
AverageTxSize *int64 `json:"avgtxsize,omitempty"`
|
||||
FeeratePercentiles *[]int64 `json:"feerate_percentiles,omitempty"`
|
||||
Hash *string `json:"blockhash,omitempty"`
|
||||
Height *int64 `json:"height,omitempty"`
|
||||
Ins *int64 `json:"ins,omitempty"`
|
||||
MaxFee *int64 `json:"maxfee,omitempty"`
|
||||
MaxFeeRate *int64 `json:"maxfeerate,omitempty"`
|
||||
MaxTxSize *int64 `json:"maxtxsize,omitempty"`
|
||||
MedianFee *int64 `json:"medianfee,omitempty"`
|
||||
MedianTime *int64 `json:"mediantime,omitempty"`
|
||||
MedianTxSize *int64 `json:"mediantxsize,omitempty"`
|
||||
MinFee *int64 `json:"minfee,omitempty"`
|
||||
MinFeeRate *int64 `json:"minfeerate,omitempty"`
|
||||
MinTxSize *int64 `json:"mintxsize,omitempty"`
|
||||
Outs *int64 `json:"outs,omitempty"`
|
||||
SegWitTotalSize *int64 `json:"swtotal_size,omitempty"`
|
||||
SegWitTotalWeight *int64 `json:"swtotal_weight,omitempty"`
|
||||
SegWitTxs *int64 `json:"swtxs,omitempty"`
|
||||
Subsidy *int64 `json:"subsidy,omitempty"`
|
||||
Time *int64 `json:"time,omitempty"`
|
||||
TotalOut *int64 `json:"total_out,omitempty"`
|
||||
TotalSize *int64 `json:"total_size,omitempty"`
|
||||
TotalWeight *int64 `json:"total_weight,omitempty"`
|
||||
TotalFee *int64 `json:"totalfee,omitempty"`
|
||||
Txs *int64 `json:"txs,omitempty"`
|
||||
UTXOIncrease *int64 `json:"utxo_increase,omitempty"`
|
||||
UTXOSizeIncrease *int64 `json:"utxo_size_inc,omitempty"`
|
||||
AverageFee int64 `json:"avgfee"`
|
||||
AverageFeeRate int64 `json:"avgfeerate"`
|
||||
AverageTxSize int64 `json:"avgtxsize"`
|
||||
FeeratePercentiles []int64 `json:"feerate_percentiles"`
|
||||
Hash string `json:"blockhash"`
|
||||
Height int64 `json:"height"`
|
||||
Ins int64 `json:"ins"`
|
||||
MaxFee int64 `json:"maxfee"`
|
||||
MaxFeeRate int64 `json:"maxfeerate"`
|
||||
MaxTxSize int64 `json:"maxtxsize"`
|
||||
MedianFee int64 `json:"medianfee"`
|
||||
MedianTime int64 `json:"mediantime"`
|
||||
MedianTxSize int64 `json:"mediantxsize"`
|
||||
MinFee int64 `json:"minfee"`
|
||||
MinFeeRate int64 `json:"minfeerate"`
|
||||
MinTxSize int64 `json:"mintxsize"`
|
||||
Outs int64 `json:"outs"`
|
||||
SegWitTotalSize int64 `json:"swtotal_size"`
|
||||
SegWitTotalWeight int64 `json:"swtotal_weight"`
|
||||
SegWitTxs int64 `json:"swtxs"`
|
||||
Subsidy int64 `json:"subsidy"`
|
||||
Time int64 `json:"time"`
|
||||
TotalOut int64 `json:"total_out"`
|
||||
TotalSize int64 `json:"total_size"`
|
||||
TotalWeight int64 `json:"total_weight"`
|
||||
Txs int64 `json:"txs"`
|
||||
UTXOIncrease int64 `json:"utxo_increase"`
|
||||
UTXOSizeIncrease int64 `json:"utxo_size_inc"`
|
||||
}
|
||||
|
||||
type GetBlockVerboseResultBase struct {
|
||||
|
@ -79,11 +77,9 @@ type GetBlockVerboseResultBase struct {
|
|||
VersionHex string `json:"versionHex"`
|
||||
MerkleRoot string `json:"merkleroot"`
|
||||
Time int64 `json:"time"`
|
||||
MedianTime int64 `json:"mediantime"`
|
||||
Nonce uint32 `json:"nonce"`
|
||||
Bits string `json:"bits"`
|
||||
Difficulty float64 `json:"difficulty"`
|
||||
ChainWork string `json:"chainwork"`
|
||||
PreviousHash string `json:"previousblockhash,omitempty"`
|
||||
NextHash string `json:"nextblockhash,omitempty"`
|
||||
|
||||
|
@ -327,7 +323,6 @@ type GetMempoolEntryResult struct {
|
|||
WTxId string `json:"wtxid"`
|
||||
Fees MempoolFees `json:"fees"`
|
||||
Depends []string `json:"depends"`
|
||||
SpentBy []string `json:"spentby"`
|
||||
}
|
||||
|
||||
// GetChainTipsResult models the data returns from the getchaintips command.
|
||||
|
@ -341,13 +336,8 @@ type GetChainTipsResult struct {
|
|||
// GetMempoolInfoResult models the data returned from the getmempoolinfo
|
||||
// command.
|
||||
type GetMempoolInfoResult struct {
|
||||
Size int64 `json:"size"` // Current tx count
|
||||
Bytes int64 `json:"bytes"` // Sum of all virtual transaction sizes as defined in BIP 141. Differs from actual serialized size because witness data is discounted
|
||||
Usage int64 `json:"usage"` // Total memory usage for the mempool
|
||||
TotalFee float64 `json:"total_fee"` // Total fees for the mempool in LBC, ignoring modified fees through prioritizetransaction
|
||||
MemPoolMinFee float64 `json:"mempoolminfee"` // Minimum fee rate in LBC/kvB for tx to be accepted. Is the maximum of minrelaytxfee and minimum mempool fee
|
||||
MinRelayTxFee float64 `json:"minrelaytxfee"` // Current minimum relay fee for transactions
|
||||
UnbroadcastCount int64 `json:"unbroadcastcount"` // Current number of transactions that haven't passed initial broadcast yet
|
||||
Size int64 `json:"size"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
}
|
||||
|
||||
// NetworksResult models the networks data from the getnetworkinfo command.
|
||||
|
@ -723,15 +713,6 @@ type InfoChainResult struct {
|
|||
Errors string `json:"errors"`
|
||||
}
|
||||
|
||||
// ListBannedResult models the data returned from the listbanned command.
|
||||
type ListBannedResult struct {
|
||||
Address string `json:"address"`
|
||||
BanCreated int64 `json:"ban_created"`
|
||||
BannedUntil int64 `json:"banned_until"`
|
||||
BanDuration int64 `json:"ban_duration"`
|
||||
TimeRemaining int64 `json:"time_remaining"`
|
||||
}
|
||||
|
||||
// TxRawResult models the data from the getrawtransaction command.
|
||||
type TxRawResult struct {
|
||||
Hex string `json:"hex"`
|
||||
|
|
|
@ -10,6 +10,10 @@ func init() {
|
|||
MustRegisterCmd("getclaimsfornamebybid", (*GetClaimsForNameByBidCmd)(nil), flags)
|
||||
MustRegisterCmd("getclaimsfornamebyseq", (*GetClaimsForNameBySeqCmd)(nil), flags)
|
||||
MustRegisterCmd("normalize", (*GetNormalizedCmd)(nil), flags)
|
||||
|
||||
MustRegisterCmd("getprooffornamebyid", (*GetProofForNameByIDCmd)(nil), flags)
|
||||
MustRegisterCmd("getprooffornamebybid", (*GetProofForNameByBidCmd)(nil), flags)
|
||||
MustRegisterCmd("getprooffornamebyseq", (*GetProofForNameBySeqCmd)(nil), flags)
|
||||
}
|
||||
|
||||
// optional inputs are required to be pointers, but they support things like `jsonrpcdefault:"false"`
|
||||
|
@ -95,3 +99,36 @@ type GetNormalizedCmd struct {
|
|||
type GetNormalizedResult struct {
|
||||
NormalizedName string `json:"normalizedname"`
|
||||
}
|
||||
|
||||
type GetProofForNameByIDCmd struct {
|
||||
Name string `json:"name"`
|
||||
PartialClaimID string `json:"partialclaimid"`
|
||||
}
|
||||
|
||||
type GetProofForNameByBidCmd struct {
|
||||
Name string `json:"name"`
|
||||
Bid int `json:"bid"`
|
||||
}
|
||||
|
||||
type GetProofForNameBySeqCmd struct {
|
||||
Name string `json:"name"`
|
||||
Sequence int `json:"sequence"`
|
||||
}
|
||||
|
||||
type ProofPairResult struct {
|
||||
Right bool `json:"right"`
|
||||
Hash string `json:"hash"`
|
||||
}
|
||||
|
||||
type ProofResult struct { // should we include the claim trie hash?
|
||||
BlockHash string `json:"blockhash"`
|
||||
BlockHeight int32 `json:"blockheight"`
|
||||
NormalizedName string `json:"normalizedname"`
|
||||
ClaimID string `json:"claimid"`
|
||||
TXID string `json:"txid"`
|
||||
N uint32 `json:"n"`
|
||||
Bid int32 `json:"bid"`
|
||||
Sequence int32 `json:"sequence"`
|
||||
Takeover int32 `json:"takeover"`
|
||||
Pairs []ProofPairResult `json:"pairs"`
|
||||
}
|
||||
|
|
|
@ -134,16 +134,6 @@ func UnmarshalCmd(r *Request) (interface{}, error) {
|
|||
// Unmarshal the parameter into the struct field.
|
||||
concreteVal := rvf.Addr().Interface()
|
||||
if err := json.Unmarshal(r.Params[i], &concreteVal); err != nil {
|
||||
// Parse Integer into Bool for compatibility with lbrycrd.
|
||||
if rvf.Kind() == reflect.Ptr &&
|
||||
rvf.Elem().Type().Kind() == reflect.Bool {
|
||||
boolInt, errBoolInt := strconv.Atoi(string(r.Params[i]))
|
||||
if errBoolInt == nil {
|
||||
rvf.Elem().SetBool(boolInt != 0)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// The most common error is the wrong type, so
|
||||
// explicitly detect that error and make it nicer.
|
||||
fieldName := strings.ToLower(rt.Field(i).Name)
|
||||
|
|
|
@ -557,7 +557,7 @@ func TestUnmarshalCmdErrors(t *testing.T) {
|
|||
request: btcjson.Request{
|
||||
Jsonrpc: btcjson.RpcVersion1,
|
||||
Method: "getblock",
|
||||
Params: []json.RawMessage{[]byte("1.0")},
|
||||
Params: []json.RawMessage{[]byte("1")},
|
||||
ID: nil,
|
||||
},
|
||||
err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType},
|
||||
|
@ -591,84 +591,3 @@ func TestUnmarshalCmdErrors(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestUnmarshalCmdBoolParams tests the parsing of boolean paramers of the UnmarshalCmd function.
|
||||
func TestUnmarshalCmdBoolParams(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
txid := []byte(`"ab91c149aff2b37a4a1856e9935ea623c973f47886d032ed7511ad8ca37855bb"`)
|
||||
tests := []struct {
|
||||
name string
|
||||
request btcjson.Request
|
||||
expect bool
|
||||
}{
|
||||
{
|
||||
name: "parse true",
|
||||
request: btcjson.Request{
|
||||
Jsonrpc: btcjson.RpcVersion1,
|
||||
Method: "getrawtransaction",
|
||||
Params: []json.RawMessage{txid, []byte("true")},
|
||||
ID: nil,
|
||||
},
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "parse false",
|
||||
request: btcjson.Request{
|
||||
Jsonrpc: btcjson.RpcVersion1,
|
||||
Method: "getrawtransaction",
|
||||
Params: []json.RawMessage{txid, []byte("false")},
|
||||
ID: nil,
|
||||
},
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "parse integer 0 to false",
|
||||
request: btcjson.Request{
|
||||
Jsonrpc: btcjson.RpcVersion1,
|
||||
Method: "getrawtransaction",
|
||||
Params: []json.RawMessage{txid, []byte("0")},
|
||||
ID: nil,
|
||||
},
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "parse integer 1 to true",
|
||||
request: btcjson.Request{
|
||||
Jsonrpc: btcjson.RpcVersion1,
|
||||
Method: "getrawtransaction",
|
||||
Params: []json.RawMessage{txid, []byte("1")},
|
||||
ID: nil,
|
||||
},
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "parse integer 100 to true",
|
||||
request: btcjson.Request{
|
||||
Jsonrpc: btcjson.RpcVersion1,
|
||||
Method: "getrawtransaction",
|
||||
Params: []json.RawMessage{txid, []byte("100")},
|
||||
ID: nil,
|
||||
},
|
||||
expect: true,
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
cmd, err := btcjson.UnmarshalCmd(&test.request)
|
||||
if err != nil {
|
||||
t.Errorf("Test #%d (%s) error - got %T (%v)", i, test.name,
|
||||
err, err)
|
||||
continue
|
||||
}
|
||||
cc := cmd.(*btcjson.GetRawTransactionCmd)
|
||||
verbose := *cc.Verbose
|
||||
if verbose != test.expect {
|
||||
t.Errorf("Test #%d (%s) got %t, want %v", i, test.name,
|
||||
verbose, test.expect)
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
/*
|
||||
Package btcjson provides primitives for working with the bitcoin JSON-RPC API.
|
||||
|
||||
# Overview
|
||||
Overview
|
||||
|
||||
When communicating via the JSON-RPC protocol, all of the commands need to be
|
||||
marshalled to and from the the wire in the appropriate format. This package
|
||||
|
@ -14,7 +14,7 @@ provides data structures and primitives to ease this process.
|
|||
In addition, it also provides some additional features such as custom command
|
||||
registration, command categorization, and reflection-based help generation.
|
||||
|
||||
# JSON-RPC Protocol Overview
|
||||
JSON-RPC Protocol Overview
|
||||
|
||||
This information is not necessary in order to use this package, but it does
|
||||
provide some intuition into what the marshalling and unmarshalling that is
|
||||
|
@ -47,39 +47,39 @@ with it) doesn't always follow the spec and will sometimes return an error
|
|||
string in the result field with a null error for certain commands. However,
|
||||
for the most part, the error field will be set as described on failure.
|
||||
|
||||
# Marshalling and Unmarshalling
|
||||
Marshalling and Unmarshalling
|
||||
|
||||
Based upon the discussion above, it should be easy to see how the types of this
|
||||
package map into the required parts of the protocol
|
||||
|
||||
- Request Objects (type Request)
|
||||
- Commands (type <Foo>Cmd)
|
||||
- Notifications (type <Foo>Ntfn)
|
||||
- Commands (type <Foo>Cmd)
|
||||
- Notifications (type <Foo>Ntfn)
|
||||
- Response Objects (type Response)
|
||||
- Result (type <Foo>Result)
|
||||
- Result (type <Foo>Result)
|
||||
|
||||
To simplify the marshalling of the requests and responses, the MarshalCmd and
|
||||
MarshalResponse functions are provided. They return the raw bytes ready to be
|
||||
sent across the wire.
|
||||
|
||||
Unmarshalling a received Request object is a two step process:
|
||||
1. Unmarshal the raw bytes into a Request struct instance via json.Unmarshal
|
||||
2. Use UnmarshalCmd on the Result field of the unmarshalled Request to create
|
||||
a concrete command or notification instance with all struct fields set
|
||||
accordingly
|
||||
1) Unmarshal the raw bytes into a Request struct instance via json.Unmarshal
|
||||
2) Use UnmarshalCmd on the Result field of the unmarshalled Request to create
|
||||
a concrete command or notification instance with all struct fields set
|
||||
accordingly
|
||||
|
||||
This approach is used since it provides the caller with access to the additional
|
||||
fields in the request that are not part of the command such as the ID.
|
||||
|
||||
Unmarshalling a received Response object is also a two step process:
|
||||
1. Unmarhsal the raw bytes into a Response struct instance via json.Unmarshal
|
||||
2. Depending on the ID, unmarshal the Result field of the unmarshalled
|
||||
Response to create a concrete type instance
|
||||
1) Unmarhsal the raw bytes into a Response struct instance via json.Unmarshal
|
||||
2) Depending on the ID, unmarshal the Result field of the unmarshalled
|
||||
Response to create a concrete type instance
|
||||
|
||||
As above, this approach is used since it provides the caller with access to the
|
||||
fields in the response such as the ID and Error.
|
||||
|
||||
# Command Creation
|
||||
Command Creation
|
||||
|
||||
This package provides two approaches for creating a new command. This first,
|
||||
and preferred, method is to use one of the New<Foo>Cmd functions. This allows
|
||||
|
@ -93,7 +93,7 @@ obviously, run-time which means any mistakes won't be found until the code is
|
|||
actually executed. However, it is quite useful for user-supplied commands
|
||||
that are intentionally dynamic.
|
||||
|
||||
# Custom Command Registration
|
||||
Custom Command Registration
|
||||
|
||||
The command handling of this package is built around the concept of registered
|
||||
commands. This is true for the wide variety of commands already provided by the
|
||||
|
@ -104,7 +104,7 @@ function for this purpose.
|
|||
A list of all registered methods can be obtained with the RegisteredCmdMethods
|
||||
function.
|
||||
|
||||
# Command Inspection
|
||||
Command Inspection
|
||||
|
||||
All registered commands are registered with flags that identify information such
|
||||
as whether the command applies to a chain server, wallet server, or is a
|
||||
|
@ -112,7 +112,7 @@ notification along with the method name to use. These flags can be obtained
|
|||
with the MethodUsageFlags flags, and the method can be obtained with the
|
||||
CmdMethod function.
|
||||
|
||||
# Help Generation
|
||||
Help Generation
|
||||
|
||||
To facilitate providing consistent help to users of the RPC server, this package
|
||||
exposes the GenerateHelp and function which uses reflection on registered
|
||||
|
@ -122,7 +122,7 @@ generate the final help text.
|
|||
In addition, the MethodUsageText function is provided to generate consistent
|
||||
one-line usage for registered commands and notifications using reflection.
|
||||
|
||||
# Errors
|
||||
Errors
|
||||
|
||||
There are 2 distinct type of errors supported by this package:
|
||||
|
||||
|
|
|
@ -476,12 +476,11 @@ func isValidResultType(kind reflect.Kind) bool {
|
|||
// an error will use the key in place of the description.
|
||||
//
|
||||
// The following outlines the required keys:
|
||||
//
|
||||
// "<method>--synopsis" Synopsis for the command
|
||||
// "<method>-<lowerfieldname>" Description for each command argument
|
||||
// "<typename>-<lowerfieldname>" Description for each object field
|
||||
// "<method>--condition<#>" Description for each result condition
|
||||
// "<method>--result<#>" Description for each primitive result num
|
||||
// "<method>--synopsis" Synopsis for the command
|
||||
// "<method>-<lowerfieldname>" Description for each command argument
|
||||
// "<typename>-<lowerfieldname>" Description for each object field
|
||||
// "<method>--condition<#>" Description for each result condition
|
||||
// "<method>--result<#>" Description for each primitive result num
|
||||
//
|
||||
// Notice that the "special" keys synopsis, condition<#>, and result<#> are
|
||||
// preceded by a double dash to ensure they don't conflict with field names.
|
||||
|
@ -493,17 +492,16 @@ func isValidResultType(kind reflect.Kind) bool {
|
|||
// For example, consider the 'help' command itself. There are two possible
|
||||
// returns depending on the provided parameters. So, the help would be
|
||||
// generated by calling the function as follows:
|
||||
//
|
||||
// GenerateHelp("help", descs, (*string)(nil), (*string)(nil)).
|
||||
// GenerateHelp("help", descs, (*string)(nil), (*string)(nil)).
|
||||
//
|
||||
// The following keys would then be required in the provided descriptions map:
|
||||
//
|
||||
// "help--synopsis": "Returns a list of all commands or help for ...."
|
||||
// "help-command": "The command to retrieve help for",
|
||||
// "help--condition0": "no command provided"
|
||||
// "help--condition1": "command specified"
|
||||
// "help--result0": "List of commands"
|
||||
// "help--result1": "Help for specified command"
|
||||
// "help--synopsis": "Returns a list of all commands or help for ...."
|
||||
// "help-command": "The command to retrieve help for",
|
||||
// "help--condition0": "no command provided"
|
||||
// "help--condition1": "command specified"
|
||||
// "help--result0": "List of commands"
|
||||
// "help--result1": "Help for specified command"
|
||||
func GenerateHelp(method string, descs map[string]string, resultTypes ...interface{}) (string, error) {
|
||||
// Look up details about the provided method and error out if not
|
||||
// registered.
|
||||
|
|
|
@ -176,13 +176,12 @@ func NewGetAccountCmd(address string) *GetAccountCmd {
|
|||
|
||||
// GetAccountAddressCmd defines the getaccountaddress JSON-RPC command.
|
||||
type GetAccountAddressCmd struct {
|
||||
Account *string `jsonrpcdefault:"\"default\""`
|
||||
AddressType *string `jsonrpcdefault:"\"legacy\""`
|
||||
Account string
|
||||
}
|
||||
|
||||
// NewGetAccountAddressCmd returns a new instance which can be used to issue a
|
||||
// getaccountaddress JSON-RPC command.
|
||||
func NewGetAccountAddressCmd(account *string) *GetAccountAddressCmd {
|
||||
func NewGetAccountAddressCmd(account string) *GetAccountAddressCmd {
|
||||
return &GetAccountAddressCmd{
|
||||
Account: account,
|
||||
}
|
||||
|
@ -190,13 +189,12 @@ func NewGetAccountAddressCmd(account *string) *GetAccountAddressCmd {
|
|||
|
||||
// GetAddressesByAccountCmd defines the getaddressesbyaccount JSON-RPC command.
|
||||
type GetAddressesByAccountCmd struct {
|
||||
Account *string `jsonrpcdefault:"\"default\""`
|
||||
AddressType *string `jsonrpcdefault:"\"*\""`
|
||||
Account string
|
||||
}
|
||||
|
||||
// NewGetAddressesByAccountCmd returns a new instance which can be used to issue
|
||||
// a getaddressesbyaccount JSON-RPC command.
|
||||
func NewGetAddressesByAccountCmd(account *string) *GetAddressesByAccountCmd {
|
||||
func NewGetAddressesByAccountCmd(account string) *GetAddressesByAccountCmd {
|
||||
return &GetAddressesByAccountCmd{
|
||||
Account: account,
|
||||
}
|
||||
|
@ -217,9 +215,8 @@ func NewGetAddressInfoCmd(address string) *GetAddressInfoCmd {
|
|||
|
||||
// GetBalanceCmd defines the getbalance JSON-RPC command.
|
||||
type GetBalanceCmd struct {
|
||||
Account *string `jsonrpcdefault:"\"default\""`
|
||||
MinConf *int `jsonrpcdefault:"1"`
|
||||
AddressType *string `jsonrpcdefault:"\"*\""`
|
||||
Account *string
|
||||
MinConf *int `jsonrpcdefault:"1"`
|
||||
}
|
||||
|
||||
// NewGetBalanceCmd returns a new instance which can be used to issue a
|
||||
|
@ -245,8 +242,8 @@ func NewGetBalancesCmd() *GetBalancesCmd {
|
|||
|
||||
// GetNewAddressCmd defines the getnewaddress JSON-RPC command.
|
||||
type GetNewAddressCmd struct {
|
||||
Account *string `jsonrpcdefault:"\"default\""`
|
||||
AddressType *string `jsonrpcdefault:"\"legacy\""`
|
||||
Account *string
|
||||
AddressType *string // must be one of legacy / p2pkh or p2sh-p2wkh / p2sh-segwit, or p2wkh / bech32
|
||||
}
|
||||
|
||||
// NewGetNewAddressCmd returns a new instance which can be used to issue a
|
||||
|
@ -262,8 +259,7 @@ func NewGetNewAddressCmd(account *string) *GetNewAddressCmd {
|
|||
|
||||
// GetRawChangeAddressCmd defines the getrawchangeaddress JSON-RPC command.
|
||||
type GetRawChangeAddressCmd struct {
|
||||
Account *string `jsonrpcdefault:"\"default\""`
|
||||
AddressType *string `jsonrpcdefault:"\"legacy\""`
|
||||
Account *string
|
||||
}
|
||||
|
||||
// NewGetRawChangeAddressCmd returns a new instance which can be used to issue a
|
||||
|
@ -279,8 +275,8 @@ func NewGetRawChangeAddressCmd(account *string) *GetRawChangeAddressCmd {
|
|||
|
||||
// GetReceivedByAccountCmd defines the getreceivedbyaccount JSON-RPC command.
|
||||
type GetReceivedByAccountCmd struct {
|
||||
Account *string `jsonrpcdefault:"\"default\""`
|
||||
MinConf *int `jsonrpcdefault:"1"`
|
||||
Account string
|
||||
MinConf *int `jsonrpcdefault:"1"`
|
||||
}
|
||||
|
||||
// NewGetReceivedByAccountCmd returns a new instance which can be used to issue
|
||||
|
@ -288,7 +284,7 @@ type GetReceivedByAccountCmd struct {
|
|||
//
|
||||
// The parameters which are pointers indicate they are optional. Passing nil
|
||||
// for optional parameters will use the default value.
|
||||
func NewGetReceivedByAccountCmd(account *string, minConf *int) *GetReceivedByAccountCmd {
|
||||
func NewGetReceivedByAccountCmd(account string, minConf *int) *GetReceivedByAccountCmd {
|
||||
return &GetReceivedByAccountCmd{
|
||||
Account: account,
|
||||
MinConf: minConf,
|
||||
|
@ -411,8 +407,7 @@ func NewKeyPoolRefillCmd(newSize *uint) *KeyPoolRefillCmd {
|
|||
|
||||
// ListAccountsCmd defines the listaccounts JSON-RPC command.
|
||||
type ListAccountsCmd struct {
|
||||
MinConf *int `jsonrpcdefault:"1"`
|
||||
AddressType *string `jsonrpcdefault:"\"*\""`
|
||||
MinConf *int `jsonrpcdefault:"1"`
|
||||
}
|
||||
|
||||
// NewListAccountsCmd returns a new instance which can be used to issue a
|
||||
|
@ -506,10 +501,10 @@ func NewListSinceBlockCmd(blockHash *string, targetConfirms *int, includeWatchOn
|
|||
|
||||
// ListTransactionsCmd defines the listtransactions JSON-RPC command.
|
||||
type ListTransactionsCmd struct {
|
||||
Account *string `jsonrpcdefault:"\"default\""`
|
||||
Count *int `jsonrpcdefault:"10"`
|
||||
From *int `jsonrpcdefault:"0"`
|
||||
IncludeWatchOnly *bool `jsonrpcdefault:"false"`
|
||||
Account *string
|
||||
Count *int `jsonrpcdefault:"10"`
|
||||
From *int `jsonrpcdefault:"0"`
|
||||
IncludeWatchOnly *bool `jsonrpcdefault:"false"`
|
||||
}
|
||||
|
||||
// NewListTransactionsCmd returns a new instance which can be used to issue a
|
||||
|
@ -561,13 +556,36 @@ func NewLockUnspentCmd(unlock bool, transactions []TransactionInput) *LockUnspen
|
|||
}
|
||||
}
|
||||
|
||||
// MoveCmd defines the move JSON-RPC command.
|
||||
type MoveCmd struct {
|
||||
FromAccount string
|
||||
ToAccount string
|
||||
Amount float64 // In BTC
|
||||
MinConf *int `jsonrpcdefault:"1"`
|
||||
Comment *string
|
||||
}
|
||||
|
||||
// NewMoveCmd returns a new instance which can be used to issue a move JSON-RPC
|
||||
// command.
|
||||
//
|
||||
// The parameters which are pointers indicate they are optional. Passing nil
|
||||
// for optional parameters will use the default value.
|
||||
func NewMoveCmd(fromAccount, toAccount string, amount float64, minConf *int, comment *string) *MoveCmd {
|
||||
return &MoveCmd{
|
||||
FromAccount: fromAccount,
|
||||
ToAccount: toAccount,
|
||||
Amount: amount,
|
||||
MinConf: minConf,
|
||||
Comment: comment,
|
||||
}
|
||||
}
|
||||
|
||||
// SendFromCmd defines the sendfrom JSON-RPC command.
|
||||
type SendFromCmd struct {
|
||||
FromAccount string
|
||||
ToAddress string
|
||||
Amount float64 // In BTC
|
||||
MinConf *int `jsonrpcdefault:"1"`
|
||||
AddressType *string `jsonrpcdefault:"\"*\""`
|
||||
Comment *string
|
||||
CommentTo *string
|
||||
}
|
||||
|
@ -577,15 +595,12 @@ type SendFromCmd struct {
|
|||
//
|
||||
// The parameters which are pointers indicate they are optional. Passing nil
|
||||
// for optional parameters will use the default value.
|
||||
func NewSendFromCmd(fromAccount, toAddress string, amount float64,
|
||||
minConf *int, addrType *string, comment, commentTo *string) *SendFromCmd {
|
||||
|
||||
func NewSendFromCmd(fromAccount, toAddress string, amount float64, minConf *int, comment, commentTo *string) *SendFromCmd {
|
||||
return &SendFromCmd{
|
||||
FromAccount: fromAccount,
|
||||
ToAddress: toAddress,
|
||||
Amount: amount,
|
||||
MinConf: minConf,
|
||||
AddressType: addrType,
|
||||
Comment: comment,
|
||||
CommentTo: commentTo,
|
||||
}
|
||||
|
@ -596,7 +611,6 @@ type SendManyCmd struct {
|
|||
FromAccount string
|
||||
Amounts map[string]float64 `jsonrpcusage:"{\"address\":amount,...}"` // In BTC
|
||||
MinConf *int `jsonrpcdefault:"1"`
|
||||
AddressType *string `jsonrpcdefault:"\"*\""`
|
||||
Comment *string
|
||||
}
|
||||
|
||||
|
@ -605,24 +619,21 @@ type SendManyCmd struct {
|
|||
//
|
||||
// The parameters which are pointers indicate they are optional. Passing nil
|
||||
// for optional parameters will use the default value.
|
||||
func NewSendManyCmd(fromAccount string, amounts map[string]float64,
|
||||
minConf *int, addrType *string, comment *string) *SendManyCmd {
|
||||
func NewSendManyCmd(fromAccount string, amounts map[string]float64, minConf *int, comment *string) *SendManyCmd {
|
||||
return &SendManyCmd{
|
||||
FromAccount: fromAccount,
|
||||
Amounts: amounts,
|
||||
MinConf: minConf,
|
||||
AddressType: addrType,
|
||||
Comment: comment,
|
||||
}
|
||||
}
|
||||
|
||||
// SendToAddressCmd defines the sendtoaddress JSON-RPC command.
|
||||
type SendToAddressCmd struct {
|
||||
Address string
|
||||
Amount float64
|
||||
AddressType *string `jsonrpcdefault:"\"*\""`
|
||||
Comment *string
|
||||
CommentTo *string
|
||||
Address string
|
||||
Amount float64
|
||||
Comment *string
|
||||
CommentTo *string
|
||||
}
|
||||
|
||||
// NewSendToAddressCmd returns a new instance which can be used to issue a
|
||||
|
@ -630,14 +641,27 @@ type SendToAddressCmd struct {
|
|||
//
|
||||
// The parameters which are pointers indicate they are optional. Passing nil
|
||||
// for optional parameters will use the default value.
|
||||
func NewSendToAddressCmd(address string, amount float64, addrType *string,
|
||||
comment, commentTo *string) *SendToAddressCmd {
|
||||
func NewSendToAddressCmd(address string, amount float64, comment, commentTo *string) *SendToAddressCmd {
|
||||
return &SendToAddressCmd{
|
||||
Address: address,
|
||||
Amount: amount,
|
||||
AddressType: addrType,
|
||||
Comment: comment,
|
||||
CommentTo: commentTo,
|
||||
Address: address,
|
||||
Amount: amount,
|
||||
Comment: comment,
|
||||
CommentTo: commentTo,
|
||||
}
|
||||
}
|
||||
|
||||
// SetAccountCmd defines the setaccount JSON-RPC command.
|
||||
type SetAccountCmd struct {
|
||||
Address string
|
||||
Account string
|
||||
}
|
||||
|
||||
// NewSetAccountCmd returns a new instance which can be used to issue a
|
||||
// setaccount JSON-RPC command.
|
||||
func NewSetAccountCmd(address, account string) *SetAccountCmd {
|
||||
return &SetAccountCmd{
|
||||
Address: address,
|
||||
Account: account,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -848,8 +872,7 @@ func (s *ScriptPubKey) UnmarshalJSON(data []byte) error {
|
|||
//
|
||||
// Descriptors are typically ranged when specified in the form of generic HD
|
||||
// chain paths.
|
||||
//
|
||||
// Example of a ranged descriptor: pkh(tpub.../*)
|
||||
// Example of a ranged descriptor: pkh(tpub.../*)
|
||||
//
|
||||
// The value can be an int to specify the end of the range, or the range
|
||||
// itself, as []int{begin, end}.
|
||||
|
@ -975,24 +998,6 @@ func NewImportMultiCmd(requests []ImportMultiRequest, options *ImportMultiOption
|
|||
}
|
||||
}
|
||||
|
||||
// RescanBlockchainCmd defines the RescanBlockchain JSON-RPC command.
|
||||
type RescanBlockchainCmd struct {
|
||||
StartHeight *int32 `jsonrpcdefault:"0"`
|
||||
StopHeight *int32
|
||||
}
|
||||
|
||||
// NewRescanBlockchainCmd returns a new instance which can be used to issue
|
||||
// an RescanBlockchain JSON-RPC command.
|
||||
//
|
||||
// The parameters which are pointers indicate they are optional. Passing nil
|
||||
// for optional parameters will use the default value.
|
||||
func NewRescanBlockchainCmd(startHeight *int32, stopHeight *int32) *RescanBlockchainCmd {
|
||||
return &RescanBlockchainCmd{
|
||||
StartHeight: startHeight,
|
||||
StopHeight: stopHeight,
|
||||
}
|
||||
}
|
||||
|
||||
// PsbtInput represents an input to include in the PSBT created by the
|
||||
// WalletCreateFundedPsbtCmd command.
|
||||
type PsbtInput struct {
|
||||
|
@ -1114,10 +1119,11 @@ func init() {
|
|||
MustRegisterCmd("listunspent", (*ListUnspentCmd)(nil), flags)
|
||||
MustRegisterCmd("loadwallet", (*LoadWalletCmd)(nil), flags)
|
||||
MustRegisterCmd("lockunspent", (*LockUnspentCmd)(nil), flags)
|
||||
MustRegisterCmd("rescanblockchain", (*RescanBlockchainCmd)(nil), flags)
|
||||
MustRegisterCmd("move", (*MoveCmd)(nil), flags)
|
||||
MustRegisterCmd("sendfrom", (*SendFromCmd)(nil), flags)
|
||||
MustRegisterCmd("sendmany", (*SendManyCmd)(nil), flags)
|
||||
MustRegisterCmd("sendtoaddress", (*SendToAddressCmd)(nil), flags)
|
||||
MustRegisterCmd("setaccount", (*SetAccountCmd)(nil), flags)
|
||||
MustRegisterCmd("settxfee", (*SetTxFeeCmd)(nil), flags)
|
||||
MustRegisterCmd("signmessage", (*SignMessageCmd)(nil), flags)
|
||||
MustRegisterCmd("signrawtransaction", (*SignRawTransactionCmd)(nil), flags)
|
||||
|
|
|
@ -287,12 +287,11 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
return btcjson.NewCmd("getaccountaddress", "acct")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetAccountAddressCmd(btcjson.String("acct"))
|
||||
return btcjson.NewGetAccountAddressCmd("acct")
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getaccountaddress","params":["acct"],"id":1}`,
|
||||
unmarshalled: &btcjson.GetAccountAddressCmd{
|
||||
Account: btcjson.String("acct"),
|
||||
AddressType: btcjson.String("legacy"),
|
||||
Account: "acct",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -301,12 +300,11 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
return btcjson.NewCmd("getaddressesbyaccount", "acct")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetAddressesByAccountCmd(btcjson.String("acct"))
|
||||
return btcjson.NewGetAddressesByAccountCmd("acct")
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getaddressesbyaccount","params":["acct"],"id":1}`,
|
||||
unmarshalled: &btcjson.GetAddressesByAccountCmd{
|
||||
Account: btcjson.String("acct"),
|
||||
AddressType: btcjson.String("*"),
|
||||
Account: "acct",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -332,9 +330,8 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getbalance","params":[],"id":1}`,
|
||||
unmarshalled: &btcjson.GetBalanceCmd{
|
||||
Account: btcjson.String("default"),
|
||||
MinConf: btcjson.Int(1),
|
||||
AddressType: btcjson.String("*"),
|
||||
Account: nil,
|
||||
MinConf: btcjson.Int(1),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -347,9 +344,8 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getbalance","params":["acct"],"id":1}`,
|
||||
unmarshalled: &btcjson.GetBalanceCmd{
|
||||
Account: btcjson.String("acct"),
|
||||
MinConf: btcjson.Int(1),
|
||||
AddressType: btcjson.String("*"),
|
||||
Account: btcjson.String("acct"),
|
||||
MinConf: btcjson.Int(1),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -362,9 +358,8 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getbalance","params":["acct",6],"id":1}`,
|
||||
unmarshalled: &btcjson.GetBalanceCmd{
|
||||
Account: btcjson.String("acct"),
|
||||
MinConf: btcjson.Int(6),
|
||||
AddressType: btcjson.String("*"),
|
||||
Account: btcjson.String("acct"),
|
||||
MinConf: btcjson.Int(6),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -388,8 +383,7 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getnewaddress","params":[],"id":1}`,
|
||||
unmarshalled: &btcjson.GetNewAddressCmd{
|
||||
Account: btcjson.String("default"),
|
||||
AddressType: btcjson.String("legacy"),
|
||||
Account: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -402,8 +396,7 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getnewaddress","params":["acct"],"id":1}`,
|
||||
unmarshalled: &btcjson.GetNewAddressCmd{
|
||||
Account: btcjson.String("acct"),
|
||||
AddressType: btcjson.String("legacy"),
|
||||
Account: btcjson.String("acct"),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -416,8 +409,7 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getrawchangeaddress","params":[],"id":1}`,
|
||||
unmarshalled: &btcjson.GetRawChangeAddressCmd{
|
||||
Account: btcjson.String("default"),
|
||||
AddressType: btcjson.String("legacy"),
|
||||
Account: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -430,8 +422,7 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getrawchangeaddress","params":["acct"],"id":1}`,
|
||||
unmarshalled: &btcjson.GetRawChangeAddressCmd{
|
||||
Account: btcjson.String("acct"),
|
||||
AddressType: btcjson.String("legacy"),
|
||||
Account: btcjson.String("acct"),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -440,11 +431,11 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
return btcjson.NewCmd("getreceivedbyaccount", "acct")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetReceivedByAccountCmd(btcjson.String("acct"), nil)
|
||||
return btcjson.NewGetReceivedByAccountCmd("acct", nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getreceivedbyaccount","params":["acct"],"id":1}`,
|
||||
unmarshalled: &btcjson.GetReceivedByAccountCmd{
|
||||
Account: btcjson.String("acct"),
|
||||
Account: "acct",
|
||||
MinConf: btcjson.Int(1),
|
||||
},
|
||||
},
|
||||
|
@ -454,11 +445,11 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
return btcjson.NewCmd("getreceivedbyaccount", "acct", 6)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetReceivedByAccountCmd(btcjson.String("acct"), btcjson.Int(6))
|
||||
return btcjson.NewGetReceivedByAccountCmd("acct", btcjson.Int(6))
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getreceivedbyaccount","params":["acct",6],"id":1}`,
|
||||
unmarshalled: &btcjson.GetReceivedByAccountCmd{
|
||||
Account: btcjson.String("acct"),
|
||||
Account: "acct",
|
||||
MinConf: btcjson.Int(6),
|
||||
},
|
||||
},
|
||||
|
@ -610,8 +601,7 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"listaccounts","params":[],"id":1}`,
|
||||
unmarshalled: &btcjson.ListAccountsCmd{
|
||||
MinConf: btcjson.Int(1),
|
||||
AddressType: btcjson.String("*"),
|
||||
MinConf: btcjson.Int(1),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -624,8 +614,7 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"listaccounts","params":[6],"id":1}`,
|
||||
unmarshalled: &btcjson.ListAccountsCmd{
|
||||
MinConf: btcjson.Int(6),
|
||||
AddressType: btcjson.String("*"),
|
||||
MinConf: btcjson.Int(6),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -855,7 +844,7 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"listtransactions","params":[],"id":1}`,
|
||||
unmarshalled: &btcjson.ListTransactionsCmd{
|
||||
Account: btcjson.String("default"),
|
||||
Account: nil,
|
||||
Count: btcjson.Int(10),
|
||||
From: btcjson.Int(0),
|
||||
IncludeWatchOnly: btcjson.Bool(false),
|
||||
|
@ -1007,13 +996,64 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "move",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("move", "from", "to", 0.5)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewMoveCmd("from", "to", 0.5, nil, nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"move","params":["from","to",0.5],"id":1}`,
|
||||
unmarshalled: &btcjson.MoveCmd{
|
||||
FromAccount: "from",
|
||||
ToAccount: "to",
|
||||
Amount: 0.5,
|
||||
MinConf: btcjson.Int(1),
|
||||
Comment: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "move optional1",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("move", "from", "to", 0.5, 6)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewMoveCmd("from", "to", 0.5, btcjson.Int(6), nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"move","params":["from","to",0.5,6],"id":1}`,
|
||||
unmarshalled: &btcjson.MoveCmd{
|
||||
FromAccount: "from",
|
||||
ToAccount: "to",
|
||||
Amount: 0.5,
|
||||
MinConf: btcjson.Int(6),
|
||||
Comment: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "move optional2",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("move", "from", "to", 0.5, 6, "comment")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewMoveCmd("from", "to", 0.5, btcjson.Int(6), btcjson.String("comment"))
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"move","params":["from","to",0.5,6,"comment"],"id":1}`,
|
||||
unmarshalled: &btcjson.MoveCmd{
|
||||
FromAccount: "from",
|
||||
ToAccount: "to",
|
||||
Amount: 0.5,
|
||||
MinConf: btcjson.Int(6),
|
||||
Comment: btcjson.String("comment"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sendfrom",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("sendfrom", "from", "1Address", 0.5)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewSendFromCmd("from", "1Address", 0.5, nil, nil, nil, nil)
|
||||
return btcjson.NewSendFromCmd("from", "1Address", 0.5, nil, nil, nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendfrom","params":["from","1Address",0.5],"id":1}`,
|
||||
unmarshalled: &btcjson.SendFromCmd{
|
||||
|
@ -1021,7 +1061,6 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
ToAddress: "1Address",
|
||||
Amount: 0.5,
|
||||
MinConf: btcjson.Int(1),
|
||||
AddressType: btcjson.String("*"),
|
||||
Comment: nil,
|
||||
CommentTo: nil,
|
||||
},
|
||||
|
@ -1032,7 +1071,7 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
return btcjson.NewCmd("sendfrom", "from", "1Address", 0.5, 6)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewSendFromCmd("from", "1Address", 0.5, btcjson.Int(6), nil, nil, nil)
|
||||
return btcjson.NewSendFromCmd("from", "1Address", 0.5, btcjson.Int(6), nil, nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendfrom","params":["from","1Address",0.5,6],"id":1}`,
|
||||
unmarshalled: &btcjson.SendFromCmd{
|
||||
|
@ -1040,7 +1079,6 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
ToAddress: "1Address",
|
||||
Amount: 0.5,
|
||||
MinConf: btcjson.Int(6),
|
||||
AddressType: btcjson.String("*"),
|
||||
Comment: nil,
|
||||
CommentTo: nil,
|
||||
},
|
||||
|
@ -1048,59 +1086,37 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
{
|
||||
name: "sendfrom optional2",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("sendfrom", "from", "1Address", 0.5, 6, "legacy")
|
||||
return btcjson.NewCmd("sendfrom", "from", "1Address", 0.5, 6, "comment")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewSendFromCmd("from", "1Address", 0.5, btcjson.Int(6), btcjson.String("legacy"),
|
||||
nil, nil)
|
||||
return btcjson.NewSendFromCmd("from", "1Address", 0.5, btcjson.Int(6),
|
||||
btcjson.String("comment"), nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendfrom","params":["from","1Address",0.5,6,"legacy"],"id":1}`,
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendfrom","params":["from","1Address",0.5,6,"comment"],"id":1}`,
|
||||
unmarshalled: &btcjson.SendFromCmd{
|
||||
FromAccount: "from",
|
||||
ToAddress: "1Address",
|
||||
Amount: 0.5,
|
||||
MinConf: btcjson.Int(6),
|
||||
AddressType: btcjson.String("legacy"),
|
||||
Comment: nil,
|
||||
Comment: btcjson.String("comment"),
|
||||
CommentTo: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sendfrom optional3",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("sendfrom", "from", "1Address", 0.5, 6, "legacy", "comment")
|
||||
return btcjson.NewCmd("sendfrom", "from", "1Address", 0.5, 6, "comment", "commentto")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewSendFromCmd("from", "1Address", 0.5, btcjson.Int(6), btcjson.String("legacy"),
|
||||
btcjson.String("comment"), nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendfrom","params":["from","1Address",0.5,6,"legacy","comment"],"id":1}`,
|
||||
unmarshalled: &btcjson.SendFromCmd{
|
||||
FromAccount: "from",
|
||||
ToAddress: "1Address",
|
||||
Amount: 0.5,
|
||||
MinConf: btcjson.Int(6),
|
||||
AddressType: btcjson.String("legacy"),
|
||||
Comment: btcjson.String("comment"),
|
||||
CommentTo: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sendfrom optional4",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("sendfrom", "from", "1Address", 0.5, 6, "legacy", "comment", "commentto")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewSendFromCmd("from", "1Address", 0.5, btcjson.Int(6), btcjson.String("legacy"),
|
||||
return btcjson.NewSendFromCmd("from", "1Address", 0.5, btcjson.Int(6),
|
||||
btcjson.String("comment"), btcjson.String("commentto"))
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendfrom","params":["from","1Address",0.5,6,"legacy","comment","commentto"],"id":1}`,
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendfrom","params":["from","1Address",0.5,6,"comment","commentto"],"id":1}`,
|
||||
unmarshalled: &btcjson.SendFromCmd{
|
||||
FromAccount: "from",
|
||||
ToAddress: "1Address",
|
||||
Amount: 0.5,
|
||||
MinConf: btcjson.Int(6),
|
||||
AddressType: btcjson.String("legacy"),
|
||||
Comment: btcjson.String("comment"),
|
||||
CommentTo: btcjson.String("commentto"),
|
||||
},
|
||||
|
@ -1112,14 +1128,13 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
},
|
||||
staticCmd: func() interface{} {
|
||||
amounts := map[string]float64{"1Address": 0.5}
|
||||
return btcjson.NewSendManyCmd("from", amounts, nil, nil, nil)
|
||||
return btcjson.NewSendManyCmd("from", amounts, nil, nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendmany","params":["from",{"1Address":0.5}],"id":1}`,
|
||||
unmarshalled: &btcjson.SendManyCmd{
|
||||
FromAccount: "from",
|
||||
Amounts: map[string]float64{"1Address": 0.5},
|
||||
MinConf: btcjson.Int(1),
|
||||
AddressType: btcjson.String("*"),
|
||||
Comment: nil,
|
||||
},
|
||||
},
|
||||
|
@ -1130,50 +1145,30 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
},
|
||||
staticCmd: func() interface{} {
|
||||
amounts := map[string]float64{"1Address": 0.5}
|
||||
return btcjson.NewSendManyCmd("from", amounts, btcjson.Int(6), nil, nil)
|
||||
return btcjson.NewSendManyCmd("from", amounts, btcjson.Int(6), nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendmany","params":["from",{"1Address":0.5},6],"id":1}`,
|
||||
unmarshalled: &btcjson.SendManyCmd{
|
||||
FromAccount: "from",
|
||||
Amounts: map[string]float64{"1Address": 0.5},
|
||||
MinConf: btcjson.Int(6),
|
||||
AddressType: btcjson.String("*"),
|
||||
Comment: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sendmany optional2",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("sendmany", "from", `{"1Address":0.5}`, 6, "legacy")
|
||||
return btcjson.NewCmd("sendmany", "from", `{"1Address":0.5}`, 6, "comment")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
amounts := map[string]float64{"1Address": 0.5}
|
||||
return btcjson.NewSendManyCmd("from", amounts, btcjson.Int(6), btcjson.String("legacy"), nil)
|
||||
return btcjson.NewSendManyCmd("from", amounts, btcjson.Int(6), btcjson.String("comment"))
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendmany","params":["from",{"1Address":0.5},6,"legacy"],"id":1}`,
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendmany","params":["from",{"1Address":0.5},6,"comment"],"id":1}`,
|
||||
unmarshalled: &btcjson.SendManyCmd{
|
||||
FromAccount: "from",
|
||||
Amounts: map[string]float64{"1Address": 0.5},
|
||||
MinConf: btcjson.Int(6),
|
||||
AddressType: btcjson.String("legacy"),
|
||||
Comment: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sendmany optional3",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("sendmany", "from", `{"1Address":0.5}`, 6, "legacy", "comment")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
amounts := map[string]float64{"1Address": 0.5}
|
||||
return btcjson.NewSendManyCmd("from", amounts, btcjson.Int(6), btcjson.String("legacy"), btcjson.String("comment"))
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendmany","params":["from",{"1Address":0.5},6,"legacy","comment"],"id":1}`,
|
||||
unmarshalled: &btcjson.SendManyCmd{
|
||||
FromAccount: "from",
|
||||
Amounts: map[string]float64{"1Address": 0.5},
|
||||
MinConf: btcjson.Int(6),
|
||||
AddressType: btcjson.String("legacy"),
|
||||
Comment: btcjson.String("comment"),
|
||||
},
|
||||
},
|
||||
|
@ -1183,50 +1178,45 @@ func TestWalletSvrCmds(t *testing.T) {
|
|||
return btcjson.NewCmd("sendtoaddress", "1Address", 0.5)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewSendToAddressCmd("1Address", 0.5, nil, nil, nil)
|
||||
return btcjson.NewSendToAddressCmd("1Address", 0.5, nil, nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendtoaddress","params":["1Address",0.5],"id":1}`,
|
||||
unmarshalled: &btcjson.SendToAddressCmd{
|
||||
Address: "1Address",
|
||||
Amount: 0.5,
|
||||
AddressType: btcjson.String("*"),
|
||||
Comment: nil,
|
||||
CommentTo: nil,
|
||||
Address: "1Address",
|
||||
Amount: 0.5,
|
||||
Comment: nil,
|
||||
CommentTo: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sendtoaddress optional1",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("sendtoaddress", "1Address", 0.5, "legacy")
|
||||
return btcjson.NewCmd("sendtoaddress", "1Address", 0.5, "comment", "commentto")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewSendToAddressCmd("1Address", 0.5, btcjson.String("legacy"), nil, nil)
|
||||
return btcjson.NewSendToAddressCmd("1Address", 0.5, btcjson.String("comment"),
|
||||
btcjson.String("commentto"))
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendtoaddress","params":["1Address",0.5,"legacy"],"id":1}`,
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendtoaddress","params":["1Address",0.5,"comment","commentto"],"id":1}`,
|
||||
unmarshalled: &btcjson.SendToAddressCmd{
|
||||
Address: "1Address",
|
||||
Amount: 0.5,
|
||||
AddressType: btcjson.String("legacy"),
|
||||
Comment: nil,
|
||||
CommentTo: nil,
|
||||
Address: "1Address",
|
||||
Amount: 0.5,
|
||||
Comment: btcjson.String("comment"),
|
||||
CommentTo: btcjson.String("commentto"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sendtoaddress optional2",
|
||||
name: "setaccount",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("sendtoaddress", "1Address", 0.5, "legacy", "comment", "commentto")
|
||||
return btcjson.NewCmd("setaccount", "1Address", "acct")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewSendToAddressCmd("1Address", 0.5, btcjson.String("legacy"), btcjson.String("comment"),
|
||||
btcjson.String("commentto"))
|
||||
return btcjson.NewSetAccountCmd("1Address", "acct")
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"sendtoaddress","params":["1Address",0.5,"legacy","comment","commentto"],"id":1}`,
|
||||
unmarshalled: &btcjson.SendToAddressCmd{
|
||||
Address: "1Address",
|
||||
Amount: 0.5,
|
||||
AddressType: btcjson.String("legacy"),
|
||||
Comment: btcjson.String("comment"),
|
||||
CommentTo: btcjson.String("commentto"),
|
||||
marshalled: `{"jsonrpc":"1.0","method":"setaccount","params":["1Address","acct"],"id":1}`,
|
||||
unmarshalled: &btcjson.SetAccountCmd{
|
||||
Address: "1Address",
|
||||
Account: "acct",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
|
@ -47,32 +47,13 @@ type embeddedAddressInfo struct {
|
|||
// Reference: https://bitcoincore.org/en/doc/0.20.0/rpc/wallet/getaddressinfo
|
||||
//
|
||||
// The GetAddressInfoResult has three segments:
|
||||
// 1. General information about the address.
|
||||
// 2. Metadata (Timestamp, HDKeyPath, HDSeedID) and wallet fields
|
||||
// (IsMine, IsWatchOnly).
|
||||
// 3. Information about the embedded address in case of P2SH or P2WSH.
|
||||
// Same structure as (1).
|
||||
// 1. General information about the address.
|
||||
// 2. Metadata (Timestamp, HDKeyPath, HDSeedID) and wallet fields
|
||||
// (IsMine, IsWatchOnly).
|
||||
// 3. Information about the embedded address in case of P2SH or P2WSH.
|
||||
// Same structure as (1).
|
||||
type GetAddressInfoResult struct {
|
||||
// The following fields are identical to embeddedAddressInfo.
|
||||
// However, the utility to generate RPC help message can't handle
|
||||
// embedded field properly. So, spell out the attributes individually.
|
||||
Address string `json:"address"`
|
||||
ScriptPubKey string `json:"scriptPubKey"`
|
||||
Descriptor *string `json:"desc,omitempty"`
|
||||
IsScript bool `json:"isscript"`
|
||||
IsChange bool `json:"ischange"`
|
||||
IsWitness bool `json:"iswitness"`
|
||||
WitnessVersion int `json:"witness_version,omitempty"`
|
||||
WitnessProgram *string `json:"witness_program,omitempty"`
|
||||
ScriptType *txscript.ScriptClass `json:"script,omitempty"`
|
||||
Hex *string `json:"hex,omitempty"`
|
||||
PubKeys *[]string `json:"pubkeys,omitempty"`
|
||||
SignaturesRequired *int `json:"sigsrequired,omitempty"`
|
||||
PubKey *string `json:"pubkey,omitempty"`
|
||||
IsCompressed *bool `json:"iscompressed,omitempty"`
|
||||
HDMasterFingerprint *string `json:"hdmasterfingerprint,omitempty"`
|
||||
Labels []string `json:"labels"`
|
||||
|
||||
embeddedAddressInfo
|
||||
IsMine bool `json:"ismine"`
|
||||
IsWatchOnly bool `json:"iswatchonly"`
|
||||
Timestamp *int `json:"timestamp,omitempty"`
|
||||
|
@ -174,7 +155,6 @@ type GetTransactionResult struct {
|
|||
TimeReceived int64 `json:"timereceived"`
|
||||
Details []GetTransactionDetailsResult `json:"details"`
|
||||
Hex string `json:"hex"`
|
||||
Generated bool `json:"generated"`
|
||||
}
|
||||
|
||||
type ScanningOrFalse struct {
|
||||
|
@ -289,6 +269,7 @@ type ListReceivedByAccountResult struct {
|
|||
// ListReceivedByAddressResult models the data from the listreceivedbyaddress
|
||||
// command.
|
||||
type ListReceivedByAddressResult struct {
|
||||
Account string `json:"account"`
|
||||
Address string `json:"address"`
|
||||
Amount float64 `json:"amount"`
|
||||
Confirmations uint64 `json:"confirmations"`
|
||||
|
@ -317,12 +298,6 @@ type ListUnspentResult struct {
|
|||
IsStake bool `json:"isstake"`
|
||||
}
|
||||
|
||||
// RescanBlockchainResult models the data returned from the rescanblockchain command.
|
||||
type RescanBlockchainResult struct {
|
||||
StartHeight int32 `json:"start_height"`
|
||||
StoptHeight int32 `json:"stop_height"`
|
||||
}
|
||||
|
||||
// SignRawTransactionError models the data that contains script verification
|
||||
// errors from the signrawtransaction request.
|
||||
type SignRawTransactionError struct {
|
||||
|
|
|
@ -37,8 +37,10 @@ func TestGetAddressInfoResult(t *testing.T) {
|
|||
name: "GetAddressInfoResult - ScriptType",
|
||||
result: `{"script":"nonstandard","address":"1abc"}`,
|
||||
want: GetAddressInfoResult{
|
||||
Address: "1abc",
|
||||
ScriptType: nonStandard,
|
||||
embeddedAddressInfo: embeddedAddressInfo{
|
||||
Address: "1abc",
|
||||
ScriptType: nonStandard,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
|
@ -40,7 +40,7 @@ func NewExportWatchingWalletCmd(account *string, download *bool) *ExportWatching
|
|||
|
||||
// GetUnconfirmedBalanceCmd defines the getunconfirmedbalance JSON-RPC command.
|
||||
type GetUnconfirmedBalanceCmd struct {
|
||||
Account *string `jsonrpcdefault:"\"default\""`
|
||||
Account *string
|
||||
}
|
||||
|
||||
// NewGetUnconfirmedBalanceCmd returns a new instance which can be used to issue
|
||||
|
@ -58,7 +58,7 @@ func NewGetUnconfirmedBalanceCmd(account *string) *GetUnconfirmedBalanceCmd {
|
|||
// command.
|
||||
type ListAddressTransactionsCmd struct {
|
||||
Addresses []string
|
||||
Account *string `jsonrpcdefault:"\"default\""`
|
||||
Account *string
|
||||
}
|
||||
|
||||
// NewListAddressTransactionsCmd returns a new instance which can be used to
|
||||
|
@ -75,7 +75,7 @@ func NewListAddressTransactionsCmd(addresses []string, account *string) *ListAdd
|
|||
|
||||
// ListAllTransactionsCmd defines the listalltransactions JSON-RPC command.
|
||||
type ListAllTransactionsCmd struct {
|
||||
Account *string `jsonrpcdefault:"\"default\""`
|
||||
Account *string
|
||||
}
|
||||
|
||||
// NewListAllTransactionsCmd returns a new instance which can be used to issue a
|
||||
|
@ -114,8 +114,9 @@ func NewWalletIsLockedCmd() *WalletIsLockedCmd {
|
|||
}
|
||||
|
||||
func init() {
|
||||
// The commands in this file are only usable with a wallet server.
|
||||
flags := UFWalletOnly
|
||||
// The commands in this file are only usable with a wallet server via
|
||||
// websockets.
|
||||
flags := UFWalletOnly | UFWebsocketOnly
|
||||
|
||||
MustRegisterCmd("createencryptedwallet", (*CreateEncryptedWalletCmd)(nil), flags)
|
||||
MustRegisterCmd("exportwatchingwallet", (*ExportWatchingWalletCmd)(nil), flags)
|
||||
|
|
|
@ -71,7 +71,7 @@ func TestWalletSvrWsCmds(t *testing.T) {
|
|||
{
|
||||
name: "exportwatchingwallet optional2",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("exportwatchingwallet", btcjson.String("acct"), true)
|
||||
return btcjson.NewCmd("exportwatchingwallet", "acct", true)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewExportWatchingWalletCmd(btcjson.String("acct"),
|
||||
|
@ -93,7 +93,7 @@ func TestWalletSvrWsCmds(t *testing.T) {
|
|||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getunconfirmedbalance","params":[],"id":1}`,
|
||||
unmarshalled: &btcjson.GetUnconfirmedBalanceCmd{
|
||||
Account: btcjson.String("default"),
|
||||
Account: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -120,7 +120,7 @@ func TestWalletSvrWsCmds(t *testing.T) {
|
|||
marshalled: `{"jsonrpc":"1.0","method":"listaddresstransactions","params":[["1Address"]],"id":1}`,
|
||||
unmarshalled: &btcjson.ListAddressTransactionsCmd{
|
||||
Addresses: []string{"1Address"},
|
||||
Account: btcjson.String("default"),
|
||||
Account: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -148,7 +148,7 @@ func TestWalletSvrWsCmds(t *testing.T) {
|
|||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"listalltransactions","params":[],"id":1}`,
|
||||
unmarshalled: &btcjson.ListAllTransactionsCmd{
|
||||
Account: btcjson.String("default"),
|
||||
Account: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
|
@ -39,11 +39,11 @@ func DoubleHashH(b []byte) Hash {
|
|||
|
||||
// LbryPoWHashH calculates returns the PoW Hash.
|
||||
//
|
||||
// doubled := SHA256(SHA256(b))
|
||||
// expanded := SHA512(doubled)
|
||||
// left := RIPEMD160(expanded[0:32])
|
||||
// right := RIPEMD160(expanded[32:64])
|
||||
// result := SHA256(SHA256(left||right))
|
||||
// doubled := SHA256(SHA256(b))
|
||||
// expanded := SHA512(doubled)
|
||||
// left := RIPEMD160(expanded[0:32])
|
||||
// right := RIPEMD160(expanded[32:64])
|
||||
// result := SHA256(SHA256(left||right))
|
||||
func LbryPoWHashH(b []byte) Hash {
|
||||
doubled := DoubleHashB(b)
|
||||
expanded := sha512.Sum512(doubled)
|
||||
|
|
|
@ -18,40 +18,40 @@
|
|||
// When a network parameter is needed, it may then be looked up through this
|
||||
// variable (either directly, or hidden in a library call).
|
||||
//
|
||||
// package main
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "flag"
|
||||
// "fmt"
|
||||
// "log"
|
||||
// import (
|
||||
// "flag"
|
||||
// "fmt"
|
||||
// "log"
|
||||
//
|
||||
// btcutil "github.com/lbryio/lbcutil"
|
||||
// "github.com/lbryio/lbcd/chaincfg"
|
||||
// )
|
||||
// btcutil "github.com/lbryio/lbcutil"
|
||||
// "github.com/lbryio/lbcd/chaincfg"
|
||||
// )
|
||||
//
|
||||
// var testnet = flag.Bool("testnet", false, "operate on the testnet Bitcoin network")
|
||||
// var testnet = flag.Bool("testnet", false, "operate on the testnet Bitcoin network")
|
||||
//
|
||||
// // By default (without -testnet), use mainnet.
|
||||
// var chainParams = &chaincfg.MainNetParams
|
||||
// // By default (without -testnet), use mainnet.
|
||||
// var chainParams = &chaincfg.MainNetParams
|
||||
//
|
||||
// func main() {
|
||||
// flag.Parse()
|
||||
// func main() {
|
||||
// flag.Parse()
|
||||
//
|
||||
// // Modify active network parameters if operating on testnet.
|
||||
// if *testnet {
|
||||
// chainParams = &chaincfg.TestNet3Params
|
||||
// }
|
||||
// // Modify active network parameters if operating on testnet.
|
||||
// if *testnet {
|
||||
// chainParams = &chaincfg.TestNet3Params
|
||||
// }
|
||||
//
|
||||
// // later...
|
||||
// // later...
|
||||
//
|
||||
// // Create and print new payment address, specific to the active network.
|
||||
// pubKeyHash := make([]byte, 20)
|
||||
// addr, err := btcutil.NewAddressPubKeyHash(pubKeyHash, chainParams)
|
||||
// if err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
// fmt.Println(addr)
|
||||
// }
|
||||
// // Create and print new payment address, specific to the active network.
|
||||
// pubKeyHash := make([]byte, 20)
|
||||
// addr, err := btcutil.NewAddressPubKeyHash(pubKeyHash, chainParams)
|
||||
// if err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
// fmt.Println(addr)
|
||||
// }
|
||||
//
|
||||
// If an application does not use one of the three standard Bitcoin networks,
|
||||
// a new Params struct may be created which defines the parameters for the
|
||||
|
|
|
@ -794,9 +794,8 @@ func IsBech32SegwitPrefix(prefix string) bool {
|
|||
// ErrInvalidHDKeyID error will be returned.
|
||||
//
|
||||
// Reference:
|
||||
//
|
||||
// SLIP-0132 : Registered HD version bytes for BIP-0032
|
||||
// https://github.com/satoshilabs/slips/blob/master/slip-0132.md
|
||||
// SLIP-0132 : Registered HD version bytes for BIP-0032
|
||||
// https://github.com/satoshilabs/slips/blob/master/slip-0132.md
|
||||
func RegisterHDKeyID(hdPublicKeyID []byte, hdPrivateKeyID []byte) error {
|
||||
if len(hdPublicKeyID) != 4 || len(hdPrivateKeyID) != 4 {
|
||||
return ErrInvalidHDKeyID
|
||||
|
|
|
@ -78,10 +78,9 @@ func (c *Change) Marshal(enc *bytes.Buffer) error {
|
|||
binary.BigEndian.PutUint32(temp[:4], uint32(len(c.SpentChildren)))
|
||||
enc.Write(temp[:4])
|
||||
for key := range c.SpentChildren {
|
||||
keySize := uint16(len(key))
|
||||
binary.BigEndian.PutUint16(temp[:2], keySize) // technically limited to 255; not sure we trust it
|
||||
binary.BigEndian.PutUint16(temp[:2], uint16(len(key))) // technically limited to 255; not sure we trust it
|
||||
enc.Write(temp[:2])
|
||||
enc.WriteString(key[:keySize])
|
||||
enc.WriteString(key)
|
||||
}
|
||||
} else {
|
||||
binary.BigEndian.PutUint32(temp[:4], 0)
|
||||
|
|
|
@ -4,7 +4,9 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
|
@ -47,9 +49,6 @@ type ClaimTrie struct {
|
|||
|
||||
// Registrered cleanup functions which are invoked in the Close() in reverse order.
|
||||
cleanups []func() error
|
||||
|
||||
// claimLogger communicates progress of claimtrie rebuild.
|
||||
claimLogger *claimProgressLogger
|
||||
}
|
||||
|
||||
func New(cfg config.Config) (*ClaimTrie, error) {
|
||||
|
@ -90,7 +89,9 @@ func New(cfg config.Config) (*ClaimTrie, error) {
|
|||
return nil, errors.Wrap(err, "creating node base manager")
|
||||
}
|
||||
normalizingManager := node.NewNormalizingManager(baseManager)
|
||||
nodeManager := &node.HashV2Manager{Manager: normalizingManager}
|
||||
hashV2Manager := &node.HashV2Manager{Manager: normalizingManager}
|
||||
nodeManager := &node.HashV3Manager{Manager: hashV2Manager}
|
||||
|
||||
cleanups = append(cleanups, nodeManager.Close)
|
||||
|
||||
var trie merkletrie.MerkleTrie
|
||||
|
@ -134,7 +135,7 @@ func New(cfg config.Config) (*ClaimTrie, error) {
|
|||
ct.Close() // TODO: the cleanups aren't run when we exit with an err above here (but should be)
|
||||
return nil, errors.Wrap(err, "block repo get")
|
||||
}
|
||||
_, err = nodeManager.IncrementHeightTo(previousHeight, false)
|
||||
_, err = nodeManager.IncrementHeightTo(previousHeight)
|
||||
if err != nil {
|
||||
ct.Close()
|
||||
return nil, errors.Wrap(err, "increment height to")
|
||||
|
@ -222,11 +223,11 @@ func (ct *ClaimTrie) SpendSupport(name []byte, op wire.OutPoint, id change.Claim
|
|||
}
|
||||
|
||||
// AppendBlock increases block by one.
|
||||
func (ct *ClaimTrie) AppendBlock(temporary bool) error {
|
||||
func (ct *ClaimTrie) AppendBlock() error {
|
||||
|
||||
ct.height++
|
||||
|
||||
names, err := ct.nodeManager.IncrementHeightTo(ct.height, temporary)
|
||||
names, err := ct.nodeManager.IncrementHeightTo(ct.height)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "node manager increment")
|
||||
}
|
||||
|
@ -247,19 +248,19 @@ func (ct *ClaimTrie) AppendBlock(temporary bool) error {
|
|||
names = append(names, expirations...)
|
||||
names = removeDuplicates(names)
|
||||
|
||||
for _, name := range names {
|
||||
nhns := ct.makeNameHashNext(names, false, nil)
|
||||
for nhn := range nhns {
|
||||
|
||||
hash, next := ct.nodeManager.Hash(name)
|
||||
ct.merkleTrie.Update(name, hash, true)
|
||||
if next <= 0 {
|
||||
ct.merkleTrie.Update(nhn.Name, nhn.Hash, true)
|
||||
if nhn.Next <= 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
newName := normalization.NormalizeIfNecessary(name, next)
|
||||
newName := normalization.NormalizeIfNecessary(nhn.Name, nhn.Next)
|
||||
updateNames = append(updateNames, newName)
|
||||
updateHeights = append(updateHeights, next)
|
||||
updateHeights = append(updateHeights, nhn.Next)
|
||||
}
|
||||
if !temporary && len(updateNames) > 0 {
|
||||
if len(updateNames) != 0 {
|
||||
err = ct.temporalRepo.SetNodesAt(updateNames, updateHeights)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "temporal repo set")
|
||||
|
@ -267,11 +268,9 @@ func (ct *ClaimTrie) AppendBlock(temporary bool) error {
|
|||
}
|
||||
|
||||
hitFork := ct.updateTrieForHashForkIfNecessary()
|
||||
h := ct.MerkleHash()
|
||||
|
||||
if !temporary {
|
||||
ct.blockRepo.Set(ct.height, h)
|
||||
}
|
||||
h := ct.MerkleHash()
|
||||
ct.blockRepo.Set(ct.height, h)
|
||||
|
||||
if hitFork {
|
||||
err = ct.merkleTrie.SetRoot(h) // for clearing the memory entirely
|
||||
|
@ -281,7 +280,8 @@ func (ct *ClaimTrie) AppendBlock(temporary bool) error {
|
|||
}
|
||||
|
||||
func (ct *ClaimTrie) updateTrieForHashForkIfNecessary() bool {
|
||||
if ct.height != param.ActiveParams.AllClaimsInMerkleForkHeight {
|
||||
if ct.height != param.ActiveParams.AllClaimsInMerkleForkHeight &&
|
||||
ct.height != param.ActiveParams.GrandForkHeight {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -303,7 +303,7 @@ func removeDuplicates(names [][]byte) [][]byte { // this might be too expensive;
|
|||
return names
|
||||
}
|
||||
|
||||
// ResetHeight resets the ClaimTrie to a previous known height..
|
||||
// ResetHeight resets the ClaimTrie to a previous known height.
|
||||
func (ct *ClaimTrie) ResetHeight(height int32) error {
|
||||
|
||||
names := make([][]byte, 0)
|
||||
|
@ -314,12 +314,15 @@ func (ct *ClaimTrie) ResetHeight(height int32) error {
|
|||
}
|
||||
names = append(names, results...)
|
||||
}
|
||||
names, err := ct.nodeManager.DecrementHeightTo(names, height)
|
||||
err := ct.nodeManager.DecrementHeightTo(names, height)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
passedHashFork := ct.height >= param.ActiveParams.AllClaimsInMerkleForkHeight && height < param.ActiveParams.AllClaimsInMerkleForkHeight
|
||||
if !passedHashFork {
|
||||
passedHashFork = ct.height >= param.ActiveParams.GrandForkHeight && height < param.ActiveParams.GrandForkHeight
|
||||
}
|
||||
hash, err := ct.blockRepo.Get(height)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -331,52 +334,31 @@ func (ct *ClaimTrie) ResetHeight(height int32) error {
|
|||
if passedHashFork {
|
||||
names = nil // force them to reconsider all names
|
||||
}
|
||||
|
||||
var fullRebuildRequired bool
|
||||
|
||||
err = ct.merkleTrie.SetRoot(hash)
|
||||
if err == merkletrie.ErrFullRebuildRequired {
|
||||
fullRebuildRequired = true
|
||||
} else if err != nil {
|
||||
return errors.Wrapf(err, "setRoot")
|
||||
}
|
||||
|
||||
if fullRebuildRequired {
|
||||
ct.runFullTrieRebuild(names, nil)
|
||||
}
|
||||
|
||||
if !ct.MerkleHash().IsEqual(hash) {
|
||||
return errors.Errorf("unable to restore the hash at height %d"+
|
||||
" (fullTriedRebuilt: %t)", height, fullRebuildRequired)
|
||||
return errors.Errorf("unable to restore the hash at height %d", height)
|
||||
}
|
||||
|
||||
return errors.WithStack(ct.blockRepo.Delete(height+1, oldHeight))
|
||||
}
|
||||
|
||||
func (ct *ClaimTrie) runFullTrieRebuild(names [][]byte, interrupt <-chan struct{}) {
|
||||
var nhns chan NameHashNext
|
||||
if names == nil {
|
||||
node.Log("Building the entire claim trie in RAM...")
|
||||
ct.claimLogger = newClaimProgressLogger("Processed", node.GetLogger())
|
||||
|
||||
ct.nodeManager.IterateNames(func(name []byte) bool {
|
||||
if interruptRequested(interrupt) {
|
||||
return false
|
||||
}
|
||||
clone := make([]byte, len(name))
|
||||
copy(clone, name)
|
||||
hash, _ := ct.nodeManager.Hash(clone)
|
||||
ct.merkleTrie.Update(clone, hash, false)
|
||||
ct.claimLogger.LogName(name)
|
||||
return true
|
||||
})
|
||||
node.LogOnce("Building the entire claim trie in RAM...")
|
||||
|
||||
nhns = ct.makeNameHashNext(nil, true, interrupt)
|
||||
} else {
|
||||
for _, name := range names {
|
||||
hash, _ := ct.nodeManager.Hash(name)
|
||||
ct.merkleTrie.Update(name, hash, false)
|
||||
}
|
||||
nhns = ct.makeNameHashNext(names, false, interrupt)
|
||||
}
|
||||
|
||||
for nhn := range nhns {
|
||||
ct.merkleTrie.Update(nhn.Name, nhn.Hash, false)
|
||||
}
|
||||
}
|
||||
|
||||
// MerkleHash returns the Merkle Hash of the claimTrie.
|
||||
|
@ -442,6 +424,12 @@ func (ct *ClaimTrie) FlushToDisk() {
|
|||
}
|
||||
}
|
||||
|
||||
type NameHashNext struct {
|
||||
Name []byte
|
||||
Hash *chainhash.Hash
|
||||
Next int32
|
||||
}
|
||||
|
||||
func interruptRequested(interrupted <-chan struct{}) bool {
|
||||
select {
|
||||
case <-interrupted: // should never block on nil
|
||||
|
@ -451,3 +439,71 @@ func interruptRequested(interrupted <-chan struct{}) bool {
|
|||
|
||||
return false
|
||||
}
|
||||
|
||||
func (ct *ClaimTrie) makeNameHashNext(names [][]byte, all bool, interrupt <-chan struct{}) chan NameHashNext {
|
||||
inputs := make(chan []byte, 512)
|
||||
outputs := make(chan NameHashNext, 512)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
hashComputationWorker := func() {
|
||||
for name := range inputs {
|
||||
hash, next := ct.nodeManager.Hash(name)
|
||||
outputs <- NameHashNext{name, hash, next}
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
threads := int(0.8 * float32(runtime.NumCPU()))
|
||||
if threads < 1 {
|
||||
threads = 1
|
||||
}
|
||||
for threads > 0 {
|
||||
threads--
|
||||
wg.Add(1)
|
||||
go hashComputationWorker()
|
||||
}
|
||||
go func() {
|
||||
if all {
|
||||
ct.nodeManager.IterateNames(func(name []byte) bool {
|
||||
if interruptRequested(interrupt) {
|
||||
return false
|
||||
}
|
||||
clone := make([]byte, len(name))
|
||||
copy(clone, name) // iteration name buffer is reused on future loops
|
||||
inputs <- clone
|
||||
return true
|
||||
})
|
||||
} else {
|
||||
for _, name := range names {
|
||||
if interruptRequested(interrupt) {
|
||||
break
|
||||
}
|
||||
inputs <- name
|
||||
}
|
||||
}
|
||||
close(inputs)
|
||||
}()
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(outputs)
|
||||
}()
|
||||
return outputs
|
||||
}
|
||||
|
||||
func (ct *ClaimTrie) MerklePath(name []byte, n *node.Node, bid int) []merkletrie.HashSidePair {
|
||||
pairs := ct.merkleTrie.MerklePath(name)
|
||||
// TODO: organize this code better
|
||||
// this is the 2nd half of the above merkle tree computation
|
||||
// it's done like this so we don't have to create the Node object multiple times
|
||||
claimHashes := node.ComputeClaimHashes(name, n)
|
||||
partials := node.ComputeMerklePath(claimHashes, bid)
|
||||
for i := len(partials) - 1; i >= 0; i-- {
|
||||
pairs = append(pairs, merkletrie.HashSidePair{Right: ((bid >> i) & 1) > 0, Hash: partials[i]})
|
||||
}
|
||||
|
||||
// reverse the list order:
|
||||
for i, j := 0, len(pairs)-1; i < j; i, j = i+1, j-1 {
|
||||
pairs[i], pairs[j] = pairs[j], pairs[i]
|
||||
}
|
||||
return pairs
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/lbryio/lbcd/claimtrie/change"
|
||||
"github.com/lbryio/lbcd/claimtrie/config"
|
||||
"github.com/lbryio/lbcd/claimtrie/merkletrie"
|
||||
"github.com/lbryio/lbcd/claimtrie/node"
|
||||
"github.com/lbryio/lbcd/claimtrie/param"
|
||||
|
||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||
|
@ -81,7 +82,7 @@ func TestEmptyHashFork(t *testing.T) {
|
|||
defer ct.Close()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
err := ct.AppendBlock(false)
|
||||
err := ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
}
|
||||
}
|
||||
|
@ -378,7 +379,7 @@ func incrementBlock(r *require.Assertions, ct *ClaimTrie, c int32) {
|
|||
r.NoError(err)
|
||||
} else {
|
||||
for ; c > 0; c-- {
|
||||
err := ct.AppendBlock(false)
|
||||
err := ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
}
|
||||
}
|
||||
|
@ -996,7 +997,7 @@ func TestBlock884431(t *testing.T) {
|
|||
o6 := add("testing", 20)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
err = ct.AppendBlock(false)
|
||||
err = ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
}
|
||||
n, err := ct.NodeAt(ct.height, []byte("go"))
|
||||
|
@ -1025,3 +1026,66 @@ func TestBlock884431(t *testing.T) {
|
|||
r.NoError(err)
|
||||
r.Equal(o11.String(), n.BestClaim.OutPoint.String())
|
||||
}
|
||||
|
||||
func TestMerklePath(t *testing.T) {
|
||||
r := require.New(t)
|
||||
setup(t)
|
||||
param.ActiveParams.ActiveDelayFactor = 1
|
||||
param.ActiveParams.NormalizedNameForkHeight = 5
|
||||
param.ActiveParams.AllClaimsInMerkleForkHeight = 6
|
||||
param.ActiveParams.GrandForkHeight = 7
|
||||
|
||||
ct, err := New(cfg)
|
||||
r.NoError(err)
|
||||
r.NotNil(ct)
|
||||
defer ct.Close()
|
||||
|
||||
hash := chainhash.HashH([]byte{1, 2, 3})
|
||||
o1 := wire.OutPoint{Hash: hash, Index: 1}
|
||||
o2 := wire.OutPoint{Hash: hash, Index: 2}
|
||||
o3 := wire.OutPoint{Hash: hash, Index: 3}
|
||||
|
||||
err = ct.AddClaim([]byte("test"), o1, change.NewClaimID(o1), 1)
|
||||
r.NoError(err)
|
||||
|
||||
err = ct.AddClaim([]byte("test"), o2, change.NewClaimID(o2), 2)
|
||||
r.NoError(err)
|
||||
|
||||
err = ct.AddClaim([]byte("tester"), o3, change.NewClaimID(o3), 1)
|
||||
r.NoError(err)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
err = ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
}
|
||||
|
||||
n, err := ct.NodeAt(ct.height, []byte("test"))
|
||||
r.NoError(err)
|
||||
pairs := ct.MerklePath([]byte("test"), n, 0)
|
||||
claimHash, err := node.ComputeBidSeqNameHash([]byte("test"), n.Claims[0], 0, n.TakenOverAt)
|
||||
r.NoError(err)
|
||||
validatePairs(r, pairs, ct.MerkleHash(), claimHash)
|
||||
|
||||
pairs = ct.MerklePath([]byte("test"), n, 1)
|
||||
claimHash, err = node.ComputeBidSeqNameHash([]byte("test"), n.Claims[1], 1, n.TakenOverAt)
|
||||
r.NoError(err)
|
||||
validatePairs(r, pairs, ct.MerkleHash(), claimHash)
|
||||
|
||||
n, err = ct.NodeAt(ct.height, []byte("tester"))
|
||||
r.NoError(err)
|
||||
pairs = ct.MerklePath([]byte("tester"), n, 0)
|
||||
claimHash, err = node.ComputeBidSeqNameHash([]byte("tester"), n.Claims[0], 0, n.TakenOverAt)
|
||||
r.NoError(err)
|
||||
validatePairs(r, pairs, ct.MerkleHash(), claimHash)
|
||||
}
|
||||
|
||||
func validatePairs(r *require.Assertions, pairs []merkletrie.HashSidePair, target *chainhash.Hash, claimHash *chainhash.Hash) {
|
||||
for i := range pairs {
|
||||
if pairs[i].Right {
|
||||
claimHash = node.HashMerkleBranches(pairs[i].Hash, claimHash)
|
||||
} else {
|
||||
claimHash = node.HashMerkleBranches(claimHash, pairs[i].Hash)
|
||||
}
|
||||
}
|
||||
r.True(claimHash.IsEqual(target))
|
||||
}
|
||||
|
|
|
@ -23,14 +23,12 @@ func NewBlocCommands() *cobra.Command {
|
|||
|
||||
return cmd
|
||||
}
|
||||
func NewBlockBestCommand() *cobra.Command {
|
||||
|
||||
var showHash bool
|
||||
var showHeight bool
|
||||
func NewBlockBestCommand() *cobra.Command {
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "best",
|
||||
Short: "Show the block hash and height of the best block",
|
||||
Short: "Show the height and hash of the best block",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
db, err := loadBlocksDB()
|
||||
|
@ -45,23 +43,12 @@ func NewBlockBestCommand() *cobra.Command {
|
|||
}
|
||||
|
||||
state := chain.BestSnapshot()
|
||||
|
||||
switch {
|
||||
case showHeight && showHash:
|
||||
fmt.Printf("%s:%d\n", state.Hash, state.Height)
|
||||
case !showHeight && showHash:
|
||||
fmt.Printf("%s\n", state.Hash)
|
||||
case showHeight && !showHash:
|
||||
fmt.Printf("%d\n", state.Height)
|
||||
}
|
||||
fmt.Printf("Block %7d: %s\n", state.Height, state.Hash.String())
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&showHeight, "showheight", true, "Display block height")
|
||||
cmd.Flags().BoolVar(&showHash, "showhash", true, "Display block hash")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
@ -69,12 +56,10 @@ func NewBlockListCommand() *cobra.Command {
|
|||
|
||||
var fromHeight int32
|
||||
var toHeight int32
|
||||
var showHash bool
|
||||
var showHeight bool
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List block hash and height between blocks <from_height> <to_height>",
|
||||
Short: "List merkle hash of blocks between <from_height> <to_height>",
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
|
@ -98,14 +83,7 @@ func NewBlockListCommand() *cobra.Command {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "load hash for %d", ht)
|
||||
}
|
||||
switch {
|
||||
case showHeight && showHash:
|
||||
fmt.Printf("%s:%d\n", hash, ht)
|
||||
case !showHeight && showHash:
|
||||
fmt.Printf("%s\n", hash)
|
||||
case showHeight && !showHash:
|
||||
fmt.Printf("%d\n", ht)
|
||||
}
|
||||
fmt.Printf("Block %7d: %s\n", ht, hash.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -114,8 +92,6 @@ func NewBlockListCommand() *cobra.Command {
|
|||
|
||||
cmd.Flags().Int32Var(&fromHeight, "from", 0, "From height (inclusive)")
|
||||
cmd.Flags().Int32Var(&toHeight, "to", 0, "To height (inclusive)")
|
||||
cmd.Flags().BoolVar(&showHeight, "showheight", true, "Display block height")
|
||||
cmd.Flags().BoolVar(&showHash, "showhash", true, "Display block hash")
|
||||
cmd.Flags().SortFlags = false
|
||||
|
||||
return cmd
|
||||
|
|
|
@ -193,7 +193,7 @@ func NewChainReplayCommand() *cobra.Command {
|
|||
|
||||
func appendBlock(ct *claimtrie.ClaimTrie, chain *blockchain.BlockChain) error {
|
||||
|
||||
err := ct.AppendBlock(false)
|
||||
err := ct.AppendBlock()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "append block: %w")
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
func loadBlocksDB() (database.DB, error) {
|
||||
|
||||
dbPath := filepath.Join(dataDir, netName, "blocks_ffldb")
|
||||
log.Debugf("Loading blocks database: %s", dbPath)
|
||||
log.Infof("Loading blocks database: %s", dbPath)
|
||||
db, err := database.Open("ffldb", dbPath, chainPramas().Net)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "open blocks database")
|
||||
|
@ -27,7 +27,7 @@ func loadBlocksDB() (database.DB, error) {
|
|||
func loadChain(db database.DB) (*blockchain.BlockChain, error) {
|
||||
paramsCopy := chaincfg.MainNetParams
|
||||
|
||||
log.Debugf("Loading chain from database")
|
||||
log.Infof("Loading chain from database")
|
||||
|
||||
startTime := time.Now()
|
||||
chain, err := blockchain.New(&blockchain.Config{
|
||||
|
@ -40,7 +40,7 @@ func loadChain(db database.DB) (*blockchain.BlockChain, error) {
|
|||
return nil, errors.Wrapf(err, "create blockchain")
|
||||
}
|
||||
|
||||
log.Debugf("Loaded chain from database (%s)", time.Since(startTime))
|
||||
log.Infof("Loaded chain from database (%s)", time.Since(startTime))
|
||||
|
||||
return chain, err
|
||||
|
||||
|
|
|
@ -3,21 +3,20 @@ package cmd
|
|||
import (
|
||||
"os"
|
||||
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/lbryio/lbcd/claimtrie/config"
|
||||
"github.com/lbryio/lbcd/claimtrie/param"
|
||||
"github.com/lbryio/lbcd/limits"
|
||||
"github.com/lbryio/lbcd/wire"
|
||||
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
log = btclog.NewBackend(os.Stdout).Logger("CMDL")
|
||||
cfg = config.DefaultConfig
|
||||
netName string
|
||||
dataDir string
|
||||
debugLevel string
|
||||
log btclog.Logger
|
||||
cfg = config.DefaultConfig
|
||||
netName string
|
||||
dataDir string
|
||||
)
|
||||
|
||||
var rootCmd = NewRootCommand()
|
||||
|
@ -29,9 +28,6 @@ func NewRootCommand() *cobra.Command {
|
|||
Short: "ClaimTrie Command Line Interface",
|
||||
SilenceUsage: true,
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
level, _ := btclog.LevelFromString(debugLevel)
|
||||
log.SetLevel(level)
|
||||
|
||||
switch netName {
|
||||
case "mainnet":
|
||||
param.SetNetwork(wire.MainNet)
|
||||
|
@ -41,20 +37,21 @@ func NewRootCommand() *cobra.Command {
|
|||
param.SetNetwork(wire.TestNet)
|
||||
}
|
||||
},
|
||||
PersistentPostRun: func(cmd *cobra.Command, args []string) {
|
||||
os.Stdout.Sync()
|
||||
},
|
||||
}
|
||||
|
||||
cmd.PersistentFlags().StringVar(&netName, "netname", "mainnet", "Net name")
|
||||
cmd.PersistentFlags().StringVarP(&dataDir, "datadir", "b", cfg.DataDir, "Data dir")
|
||||
cmd.PersistentFlags().StringVarP(&debugLevel, "debuglevel", "d", cfg.DebugLevel, "Debug level")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
|
||||
backendLogger := btclog.NewBackend(os.Stdout)
|
||||
defer os.Stdout.Sync()
|
||||
log = backendLogger.Logger("CMDL")
|
||||
log.SetLevel(btclog.LevelDebug)
|
||||
|
||||
// Up some limits.
|
||||
if err := limits.SetLimits(); err != nil {
|
||||
log.Errorf("failed to set limits: %v\n", err)
|
||||
|
|
|
@ -8,10 +8,11 @@ import (
|
|||
)
|
||||
|
||||
var DefaultConfig = Config{
|
||||
Params: param.MainNet,
|
||||
RamTrie: true, // as it stands the other trie uses more RAM, more time, and 40GB+ of disk space
|
||||
DebugLevel: "info",
|
||||
DataDir: filepath.Join(btcutil.AppDataDir("lbcd", false), "data"),
|
||||
Params: param.MainNet,
|
||||
|
||||
RamTrie: true, // as it stands the other trie uses more RAM, more time, and 40GB+ of disk space
|
||||
|
||||
DataDir: filepath.Join(btcutil.AppDataDir("chain", false), "data"),
|
||||
|
||||
BlockRepoPebble: pebbleConfig{
|
||||
Path: "blocks_pebble_db",
|
||||
|
@ -29,10 +30,11 @@ var DefaultConfig = Config{
|
|||
|
||||
// Config is the container of all configurations.
|
||||
type Config struct {
|
||||
Params param.ClaimTrieParams
|
||||
RamTrie bool
|
||||
DataDir string
|
||||
DebugLevel string
|
||||
Params param.ClaimTrieParams
|
||||
|
||||
RamTrie bool
|
||||
|
||||
DataDir string
|
||||
|
||||
BlockRepoPebble pebbleConfig
|
||||
NodeRepoPebble pebbleConfig
|
||||
|
|
|
@ -1,73 +0,0 @@
|
|||
// Copyright (c) 2015-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package claimtrie
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btclog"
|
||||
)
|
||||
|
||||
// claimProgressLogger provides periodic logging for other services in order
|
||||
// to show users progress of certain "actions" involving some or all current
|
||||
// claim names. Ex: rebuilding claimtrie.
|
||||
type claimProgressLogger struct {
|
||||
totalLogName int64
|
||||
recentLogName int64
|
||||
lastLogNameTime time.Time
|
||||
|
||||
subsystemLogger btclog.Logger
|
||||
progressAction string
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// newClaimProgressLogger returns a new name progress logger.
|
||||
// The progress message is templated as follows:
|
||||
//
|
||||
// {progressAction} {numProcessed} {names|name} in the last {timePeriod} (total {totalProcessed})
|
||||
func newClaimProgressLogger(progressMessage string, logger btclog.Logger) *claimProgressLogger {
|
||||
return &claimProgressLogger{
|
||||
lastLogNameTime: time.Now(),
|
||||
progressAction: progressMessage,
|
||||
subsystemLogger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// LogName logs a new claim name as an information message to show progress
|
||||
// to the user. In order to prevent spam, it limits logging to one message
|
||||
// every 10 seconds with duration and totals included.
|
||||
func (n *claimProgressLogger) LogName(name []byte) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
n.totalLogName++
|
||||
n.recentLogName++
|
||||
|
||||
now := time.Now()
|
||||
duration := now.Sub(n.lastLogNameTime)
|
||||
if duration < time.Second*10 {
|
||||
return
|
||||
}
|
||||
|
||||
// Truncate the duration to 10s of milliseconds.
|
||||
durationMillis := int64(duration / time.Millisecond)
|
||||
tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10)
|
||||
|
||||
// Log information about progress.
|
||||
nameStr := "names"
|
||||
if n.recentLogName == 1 {
|
||||
nameStr = "name"
|
||||
}
|
||||
n.subsystemLogger.Infof("%s %d claim %s in the last %s (total %d)",
|
||||
n.progressAction, n.recentLogName, nameStr, tDuration, n.totalLogName)
|
||||
|
||||
n.recentLogName = 0
|
||||
n.lastLogNameTime = now
|
||||
}
|
||||
|
||||
func (n *claimProgressLogger) SetLastLogTime(time time.Time) {
|
||||
n.lastLogNameTime = time
|
||||
}
|
|
@ -253,3 +253,7 @@ func (t *PersistentTrie) Dump(s string) {
|
|||
func (t *PersistentTrie) Flush() error {
|
||||
return t.repo.Flush()
|
||||
}
|
||||
|
||||
func (t *PersistentTrie) MerklePath(name []byte) []HashSidePair {
|
||||
panic("MerklePath not implemented in PersistentTrie")
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ type MerkleTrie interface {
|
|||
MerkleHash() *chainhash.Hash
|
||||
MerkleHashAllClaims() *chainhash.Hash
|
||||
Flush() error
|
||||
MerklePath(name []byte) []HashSidePair
|
||||
}
|
||||
|
||||
type RamTrie struct {
|
||||
|
@ -111,29 +112,80 @@ func (rt *RamTrie) merkleHashAllClaims(v *collapsedVertex) *chainhash.Hash {
|
|||
return v.merkleHash
|
||||
}
|
||||
|
||||
childHashes := make([]*chainhash.Hash, 0, len(v.children))
|
||||
for _, ch := range v.children {
|
||||
h := rt.merkleHashAllClaims(ch)
|
||||
childHashes = append(childHashes, h)
|
||||
}
|
||||
childHash, hasChildren := rt.computeChildHash(v)
|
||||
|
||||
claimHash := NoClaimsHash
|
||||
if v.claimHash != nil {
|
||||
claimHash = v.claimHash
|
||||
} else if len(childHashes) == 0 {
|
||||
} else if !hasChildren {
|
||||
return nil
|
||||
}
|
||||
|
||||
childHash := NoChildrenHash
|
||||
if len(childHashes) > 0 {
|
||||
// this shouldn't be referencing node; where else can we put this merkle root func?
|
||||
childHash = node.ComputeMerkleRoot(childHashes)
|
||||
}
|
||||
|
||||
v.merkleHash = node.HashMerkleBranches(childHash, claimHash)
|
||||
return v.merkleHash
|
||||
}
|
||||
|
||||
func (rt *RamTrie) computeChildHash(v *collapsedVertex) (*chainhash.Hash, bool) {
|
||||
childHashes := make([]*chainhash.Hash, 0, len(v.children))
|
||||
for _, ch := range v.children {
|
||||
h := rt.merkleHashAllClaims(ch)
|
||||
childHashes = append(childHashes, h)
|
||||
}
|
||||
childHash := NoChildrenHash
|
||||
if len(childHashes) > 0 {
|
||||
// this shouldn't be referencing node; where else can we put this merkle root func?
|
||||
childHash = node.ComputeMerkleRoot(childHashes)
|
||||
}
|
||||
return childHash, len(childHashes) > 0
|
||||
}
|
||||
|
||||
func (rt *RamTrie) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type HashSidePair struct {
|
||||
Right bool
|
||||
Hash *chainhash.Hash
|
||||
}
|
||||
|
||||
func (rt *RamTrie) MerklePath(name []byte) []HashSidePair {
|
||||
|
||||
// algorithm:
|
||||
// for each node in the path to key:
|
||||
// get all the childHashes for that node and the index of our path
|
||||
// get all the claimHashes for that node as well
|
||||
// if we're at the end of the path:
|
||||
// push(true, root(childHashes))
|
||||
// push all of merklePath(claimHashes, bid)
|
||||
// else
|
||||
// push(false, root(claimHashes)
|
||||
// push all of merklePath(childHashes, child index)
|
||||
|
||||
var results []HashSidePair
|
||||
|
||||
indexes, path := rt.FindPath(name)
|
||||
for i := 0; i < len(indexes); i++ {
|
||||
if i == len(indexes)-1 {
|
||||
childHash, _ := rt.computeChildHash(path[i])
|
||||
results = append(results, HashSidePair{Right: true, Hash: childHash})
|
||||
// letting the caller append the claim hashes at present (needs better code organization)
|
||||
} else {
|
||||
ch := path[i].claimHash
|
||||
if ch == nil {
|
||||
ch = NoClaimsHash
|
||||
}
|
||||
results = append(results, HashSidePair{Right: false, Hash: ch})
|
||||
childHashes := make([]*chainhash.Hash, 0, len(path[i].children))
|
||||
for j := range path[i].children {
|
||||
childHashes = append(childHashes, path[i].children[j].merkleHash)
|
||||
}
|
||||
if len(childHashes) > 0 {
|
||||
partials := node.ComputeMerklePath(childHashes, indexes[i+1])
|
||||
for i := len(partials) - 1; i >= 0; i-- {
|
||||
results = append(results, HashSidePair{Right: ((indexes[i+1] >> i) & 1) > 0, Hash: partials[i]})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
|
|
@ -17,11 +17,10 @@ func newVertex(hash *chainhash.Hash) *vertex {
|
|||
// TODO: more professional to use msgpack here?
|
||||
|
||||
// nbuf decodes the on-disk format of a node, which has the following form:
|
||||
//
|
||||
// ch(1B) hash(32B)
|
||||
// ...
|
||||
// ch(1B) hash(32B)
|
||||
// vhash(32B)
|
||||
// ch(1B) hash(32B)
|
||||
// ...
|
||||
// ch(1B) hash(32B)
|
||||
// vhash(32B)
|
||||
type nbuf []byte
|
||||
|
||||
func (nb nbuf) entries() int {
|
||||
|
|
|
@ -1,85 +0,0 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
|
||||
"github.com/lbryio/lbcd/claimtrie/change"
|
||||
)
|
||||
|
||||
type cacheLeaf struct {
|
||||
node *Node
|
||||
element *list.Element
|
||||
changes []change.Change
|
||||
height int32
|
||||
}
|
||||
|
||||
type Cache struct {
|
||||
nodes map[string]*cacheLeaf
|
||||
order *list.List
|
||||
limit int
|
||||
}
|
||||
|
||||
func (nc *Cache) insert(name []byte, n *Node, height int32) {
|
||||
key := string(name)
|
||||
|
||||
existing := nc.nodes[key]
|
||||
if existing != nil {
|
||||
existing.node = n
|
||||
existing.height = height
|
||||
existing.changes = nil
|
||||
nc.order.MoveToFront(existing.element)
|
||||
return
|
||||
}
|
||||
|
||||
for nc.order.Len() >= nc.limit {
|
||||
// TODO: maybe ensure that we don't remove nodes that have a lot of changes?
|
||||
delete(nc.nodes, nc.order.Back().Value.(string))
|
||||
nc.order.Remove(nc.order.Back())
|
||||
}
|
||||
|
||||
element := nc.order.PushFront(key)
|
||||
nc.nodes[key] = &cacheLeaf{node: n, element: element, height: height}
|
||||
}
|
||||
|
||||
func (nc *Cache) fetch(name []byte, height int32) (*Node, []change.Change, int32) {
|
||||
key := string(name)
|
||||
|
||||
existing := nc.nodes[key]
|
||||
if existing != nil && existing.height <= height {
|
||||
nc.order.MoveToFront(existing.element)
|
||||
return existing.node, existing.changes, existing.height
|
||||
}
|
||||
return nil, nil, -1
|
||||
}
|
||||
|
||||
func (nc *Cache) addChanges(changes []change.Change, height int32) {
|
||||
for _, c := range changes {
|
||||
key := string(c.Name)
|
||||
existing := nc.nodes[key]
|
||||
if existing != nil && existing.height <= height {
|
||||
existing.changes = append(existing.changes, c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (nc *Cache) drop(names [][]byte) {
|
||||
for _, name := range names {
|
||||
key := string(name)
|
||||
existing := nc.nodes[key]
|
||||
if existing != nil {
|
||||
// we can't roll it backwards because we don't know its previous height value; just toast it
|
||||
delete(nc.nodes, key)
|
||||
nc.order.Remove(existing.element)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (nc *Cache) clear() {
|
||||
nc.nodes = map[string]*cacheLeaf{}
|
||||
nc.order = list.New()
|
||||
// we'll let the GC sort out the remains...
|
||||
}
|
||||
|
||||
func NewCache(limit int) *Cache {
|
||||
return &Cache{limit: limit, nodes: map[string]*cacheLeaf{}, order: list.New()}
|
||||
}
|
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||
"github.com/lbryio/lbcd/claimtrie/change"
|
||||
"github.com/lbryio/lbcd/claimtrie/param"
|
||||
"github.com/lbryio/lbcd/wire"
|
||||
)
|
||||
|
||||
|
@ -57,6 +58,15 @@ func (c *Claim) setStatus(status Status) *Claim {
|
|||
return c
|
||||
}
|
||||
|
||||
func (c *Claim) ExpireAt() int32 {
|
||||
|
||||
if c.AcceptedAt+param.ActiveParams.OriginalClaimExpirationTime > param.ActiveParams.ExtendedClaimExpirationForkHeight {
|
||||
return c.AcceptedAt + param.ActiveParams.ExtendedClaimExpirationTime
|
||||
}
|
||||
|
||||
return c.AcceptedAt + param.ActiveParams.OriginalClaimExpirationTime
|
||||
}
|
||||
|
||||
func OutPointLess(a, b wire.OutPoint) bool {
|
||||
|
||||
switch cmp := bytes.Compare(a.Hash[:], b.Hash[:]); {
|
||||
|
|
|
@ -1,7 +1,11 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||
"github.com/lbryio/lbcd/claimtrie/change"
|
||||
"github.com/lbryio/lbcd/claimtrie/param"
|
||||
)
|
||||
|
||||
|
@ -9,6 +13,10 @@ type HashV2Manager struct {
|
|||
Manager
|
||||
}
|
||||
|
||||
type HashV3Manager struct {
|
||||
Manager
|
||||
}
|
||||
|
||||
func (nm *HashV2Manager) computeClaimHashes(name []byte) (*chainhash.Hash, int32) {
|
||||
|
||||
n, err := nm.NodeAt(nm.Height(), name)
|
||||
|
@ -24,9 +32,9 @@ func (nm *HashV2Manager) computeClaimHashes(name []byte) (*chainhash.Hash, int32
|
|||
}
|
||||
}
|
||||
if len(claimHashes) > 0 {
|
||||
return ComputeMerkleRoot(claimHashes), n.NextUpdate()
|
||||
return ComputeMerkleRoot(claimHashes), n.NextUpdate(nm.Height())
|
||||
}
|
||||
return nil, n.NextUpdate()
|
||||
return nil, n.NextUpdate(nm.Height())
|
||||
}
|
||||
|
||||
func (nm *HashV2Manager) Hash(name []byte) (*chainhash.Hash, int32) {
|
||||
|
@ -37,3 +45,72 @@ func (nm *HashV2Manager) Hash(name []byte) (*chainhash.Hash, int32) {
|
|||
|
||||
return nm.Manager.Hash(name)
|
||||
}
|
||||
|
||||
func (nm *HashV3Manager) AppendChange(chg change.Change) {
|
||||
if nm.Height() >= param.ActiveParams.GrandForkHeight && len(chg.Name) == 0 {
|
||||
return
|
||||
}
|
||||
nm.Manager.AppendChange(chg)
|
||||
}
|
||||
|
||||
func ComputeBidSeqNameHash(name []byte, c *Claim, bid, takeover int32) (*chainhash.Hash, error) {
|
||||
|
||||
s := sha256.New()
|
||||
|
||||
s.Write(c.OutPoint.Hash[:])
|
||||
|
||||
var temp [4]byte
|
||||
binary.BigEndian.PutUint32(temp[:], c.OutPoint.Index)
|
||||
s.Write(temp[:])
|
||||
|
||||
binary.BigEndian.PutUint32(temp[:], uint32(bid))
|
||||
s.Write(temp[:])
|
||||
|
||||
binary.BigEndian.PutUint32(temp[:], uint32(c.Sequence))
|
||||
s.Write(temp[:])
|
||||
|
||||
binary.BigEndian.PutUint32(temp[:], uint32(takeover))
|
||||
s.Write(temp[:])
|
||||
|
||||
s.Write(name)
|
||||
|
||||
var m [sha256.Size]byte
|
||||
return chainhash.NewHash(s.Sum(m[:0]))
|
||||
}
|
||||
|
||||
func (nm *HashV3Manager) bidSeqNameHash(name []byte) (*chainhash.Hash, int32) {
|
||||
n, err := nm.NodeAt(nm.Height(), name)
|
||||
if err != nil || n == nil {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
claimHashes := ComputeClaimHashes(name, n)
|
||||
if len(claimHashes) > 0 {
|
||||
return ComputeMerkleRoot(claimHashes), n.NextUpdate(nm.Height())
|
||||
}
|
||||
return nil, n.NextUpdate(nm.Height())
|
||||
}
|
||||
|
||||
func ComputeClaimHashes(name []byte, n *Node) []*chainhash.Hash {
|
||||
n.SortClaimsByBid()
|
||||
claimHashes := make([]*chainhash.Hash, 0, len(n.Claims))
|
||||
for i, c := range n.Claims {
|
||||
if c.Status == Activated {
|
||||
h, _ := ComputeBidSeqNameHash(name, c, int32(i), n.TakenOverAt)
|
||||
claimHashes = append(claimHashes, h)
|
||||
}
|
||||
}
|
||||
return claimHashes
|
||||
}
|
||||
|
||||
func (nm *HashV3Manager) Hash(name []byte) (*chainhash.Hash, int32) {
|
||||
|
||||
if nm.Height() >= param.ActiveParams.GrandForkHeight {
|
||||
if len(name) == 0 {
|
||||
return nil, 0 // empty name's claims are not included in the hash
|
||||
}
|
||||
return nm.bidSeqNameHash(name)
|
||||
}
|
||||
|
||||
return nm.Manager.Hash(name)
|
||||
}
|
||||
|
|
|
@ -55,3 +55,59 @@ func calculateNodeHash(op wire.OutPoint, takeover int32) *chainhash.Hash {
|
|||
|
||||
return &hh
|
||||
}
|
||||
|
||||
func ComputeMerklePath(hashes []*chainhash.Hash, idx int) []*chainhash.Hash {
|
||||
count := 0
|
||||
matchlevel := -1
|
||||
matchh := false
|
||||
var h *chainhash.Hash
|
||||
var res []*chainhash.Hash
|
||||
var inner [32]*chainhash.Hash // old code had 32; dunno if it's big enough for all scenarios
|
||||
|
||||
iterateInner := func(level int) int {
|
||||
for ; (count & (1 << level)) == 0; level++ {
|
||||
ihash := inner[level]
|
||||
if matchh {
|
||||
res = append(res, ihash)
|
||||
} else if matchlevel == level {
|
||||
res = append(res, h)
|
||||
matchh = true
|
||||
}
|
||||
h = HashMerkleBranches(ihash, h)
|
||||
}
|
||||
return level
|
||||
}
|
||||
|
||||
for count < len(hashes) {
|
||||
h = hashes[count]
|
||||
matchh = count == idx
|
||||
count++
|
||||
level := iterateInner(0)
|
||||
// Store the resulting hash at inner position level.
|
||||
inner[level] = h
|
||||
if matchh {
|
||||
matchlevel = level
|
||||
}
|
||||
}
|
||||
|
||||
level := 0
|
||||
for (count & (1 << level)) == 0 {
|
||||
level++
|
||||
}
|
||||
|
||||
h = inner[level]
|
||||
matchh = matchlevel == level
|
||||
|
||||
for count != (1 << level) {
|
||||
// If we reach this point, h is an inner value that is not the top.
|
||||
if matchh {
|
||||
res = append(res, h)
|
||||
}
|
||||
h = HashMerkleBranches(h, h)
|
||||
// Increment count to the value it would have if two entries at this
|
||||
count += 1 << level
|
||||
level++
|
||||
level = iterateInner(level)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
|
|
@ -29,10 +29,6 @@ func UseLogger(logger btclog.Logger) {
|
|||
log = logger
|
||||
}
|
||||
|
||||
func GetLogger() btclog.Logger {
|
||||
return log
|
||||
}
|
||||
|
||||
var loggedStrings = map[string]bool{} // is this gonna get too large?
|
||||
var loggedStringsMutex sync.Mutex
|
||||
|
||||
|
@ -46,10 +42,6 @@ func LogOnce(s string) {
|
|||
log.Info(s)
|
||||
}
|
||||
|
||||
func Log(s string) {
|
||||
log.Info(s)
|
||||
}
|
||||
|
||||
func Warn(s string) {
|
||||
log.Warn(s)
|
||||
}
|
||||
|
|
|
@ -13,15 +13,14 @@ import (
|
|||
|
||||
type Manager interface {
|
||||
AppendChange(chg change.Change)
|
||||
IncrementHeightTo(height int32, temporary bool) ([][]byte, error)
|
||||
DecrementHeightTo(affectedNames [][]byte, height int32) ([][]byte, error)
|
||||
IncrementHeightTo(height int32) ([][]byte, error)
|
||||
DecrementHeightTo(affectedNames [][]byte, height int32) error
|
||||
Height() int32
|
||||
Close() error
|
||||
NodeAt(height int32, name []byte) (*Node, error)
|
||||
IterateNames(predicate func(name []byte) bool)
|
||||
Hash(name []byte) (*chainhash.Hash, int32)
|
||||
Flush() error
|
||||
ClearCache()
|
||||
}
|
||||
|
||||
type BaseManager struct {
|
||||
|
@ -29,82 +28,43 @@ type BaseManager struct {
|
|||
|
||||
height int32
|
||||
changes []change.Change
|
||||
|
||||
tempChanges map[string][]change.Change
|
||||
|
||||
cache *Cache
|
||||
}
|
||||
|
||||
func NewBaseManager(repo Repo) (*BaseManager, error) {
|
||||
|
||||
nm := &BaseManager{
|
||||
repo: repo,
|
||||
cache: NewCache(10000), // TODO: how many should we cache?
|
||||
repo: repo,
|
||||
}
|
||||
|
||||
return nm, nil
|
||||
}
|
||||
|
||||
func (nm *BaseManager) ClearCache() {
|
||||
nm.cache.clear()
|
||||
}
|
||||
|
||||
func (nm *BaseManager) NodeAt(height int32, name []byte) (*Node, error) {
|
||||
|
||||
n, changes, oldHeight := nm.cache.fetch(name, height)
|
||||
if n == nil {
|
||||
changes, err := nm.repo.LoadChanges(name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "in load changes")
|
||||
}
|
||||
changes, err := nm.repo.LoadChanges(name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "in load changes")
|
||||
}
|
||||
|
||||
if nm.tempChanges != nil { // making an assumption that we only ever have tempChanges for a single block
|
||||
changes = append(changes, nm.tempChanges[string(name)]...)
|
||||
}
|
||||
|
||||
n, err = nm.newNodeFromChanges(changes, height)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "in new node")
|
||||
}
|
||||
// TODO: how can we tell what needs to be cached?
|
||||
if nm.tempChanges == nil && height == nm.height && n != nil && (len(changes) > 4 || len(name) < 12) {
|
||||
nm.cache.insert(name, n, height)
|
||||
}
|
||||
} else {
|
||||
if nm.tempChanges != nil { // making an assumption that we only ever have tempChanges for a single block
|
||||
changes = append(changes, nm.tempChanges[string(name)]...)
|
||||
n = n.Clone()
|
||||
} else if height != nm.height {
|
||||
n = n.Clone()
|
||||
}
|
||||
updated, err := nm.updateFromChanges(n, changes, height)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "in update from changes")
|
||||
}
|
||||
if !updated {
|
||||
n.AdjustTo(oldHeight, height, name)
|
||||
}
|
||||
if nm.tempChanges == nil && height == nm.height {
|
||||
nm.cache.insert(name, n, height)
|
||||
}
|
||||
n, err := nm.newNodeFromChanges(changes, height)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "in new node")
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Node returns a node at the current height.
|
||||
// The returned node may have pending changes.
|
||||
func (nm *BaseManager) node(name []byte) (*Node, error) {
|
||||
return nm.NodeAt(nm.height, name)
|
||||
}
|
||||
// newNodeFromChanges returns a new Node constructed from the changes.
|
||||
// The changes must preserve their order received.
|
||||
func (nm *BaseManager) newNodeFromChanges(changes []change.Change, height int32) (*Node, error) {
|
||||
|
||||
func (nm *BaseManager) updateFromChanges(n *Node, changes []change.Change, height int32) (bool, error) {
|
||||
|
||||
count := len(changes)
|
||||
if count == 0 {
|
||||
return false, nil
|
||||
if len(changes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
n := New()
|
||||
previous := changes[0].Height
|
||||
count := len(changes)
|
||||
|
||||
for i, chg := range changes {
|
||||
if chg.Height < previous {
|
||||
|
@ -123,37 +83,15 @@ func (nm *BaseManager) updateFromChanges(n *Node, changes []change.Change, heigh
|
|||
delay := nm.getDelayForName(n, chg)
|
||||
err := n.ApplyChange(chg, delay)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "in apply change")
|
||||
return nil, errors.Wrap(err, "in apply change")
|
||||
}
|
||||
}
|
||||
|
||||
if count <= 0 {
|
||||
// we applied no changes, which means we shouldn't exist if we had all the changes
|
||||
// or might mean nothing significant if we are applying a partial changeset
|
||||
return false, nil
|
||||
}
|
||||
lastChange := changes[count-1]
|
||||
n.AdjustTo(lastChange.Height, height, lastChange.Name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// newNodeFromChanges returns a new Node constructed from the changes.
|
||||
// The changes must preserve their order received.
|
||||
func (nm *BaseManager) newNodeFromChanges(changes []change.Change, height int32) (*Node, error) {
|
||||
|
||||
if len(changes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
n := New()
|
||||
updated, err := nm.updateFromChanges(n, changes, height)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "in update from changes")
|
||||
}
|
||||
if updated {
|
||||
return n, nil
|
||||
}
|
||||
return nil, nil
|
||||
lastChange := changes[count-1]
|
||||
return n.AdjustTo(lastChange.Height, height, lastChange.Name), nil
|
||||
}
|
||||
|
||||
func (nm *BaseManager) AppendChange(chg change.Change) {
|
||||
|
@ -243,37 +181,24 @@ func collectChildNames(changes []change.Change) {
|
|||
// }
|
||||
//}
|
||||
|
||||
func (nm *BaseManager) IncrementHeightTo(height int32, temporary bool) ([][]byte, error) {
|
||||
func (nm *BaseManager) IncrementHeightTo(height int32) ([][]byte, error) {
|
||||
|
||||
if height <= nm.height {
|
||||
panic("invalid height")
|
||||
}
|
||||
|
||||
if height >= param.ActiveParams.MaxRemovalWorkaroundHeight {
|
||||
if height >= param.ActiveParams.MaxRemovalWorkaroundHeight && height < param.ActiveParams.GrandForkHeight {
|
||||
// not technically needed until block 884430, but to be true to the arbitrary rollback length...
|
||||
collectChildNames(nm.changes)
|
||||
}
|
||||
|
||||
if temporary {
|
||||
if nm.tempChanges != nil {
|
||||
return nil, errors.Errorf("expected nil temporary changes")
|
||||
}
|
||||
nm.tempChanges = map[string][]change.Change{}
|
||||
}
|
||||
names := make([][]byte, 0, len(nm.changes))
|
||||
for i := range nm.changes {
|
||||
names = append(names, nm.changes[i].Name)
|
||||
if temporary {
|
||||
name := string(nm.changes[i].Name)
|
||||
nm.tempChanges[name] = append(nm.tempChanges[name], nm.changes[i])
|
||||
}
|
||||
}
|
||||
|
||||
if !temporary {
|
||||
nm.cache.addChanges(nm.changes, height)
|
||||
if err := nm.repo.AppendChanges(nm.changes); err != nil { // destroys names
|
||||
return nil, errors.Wrap(err, "in append changes")
|
||||
}
|
||||
if err := nm.repo.AppendChanges(nm.changes); err != nil { // destroys names
|
||||
return nil, errors.Wrap(err, "in append changes")
|
||||
}
|
||||
|
||||
// Truncate the buffer size to zero.
|
||||
|
@ -287,31 +212,20 @@ func (nm *BaseManager) IncrementHeightTo(height int32, temporary bool) ([][]byte
|
|||
return names, nil
|
||||
}
|
||||
|
||||
func (nm *BaseManager) DecrementHeightTo(affectedNames [][]byte, height int32) ([][]byte, error) {
|
||||
func (nm *BaseManager) DecrementHeightTo(affectedNames [][]byte, height int32) error {
|
||||
if height >= nm.height {
|
||||
return affectedNames, errors.Errorf("invalid height of %d for %d", height, nm.height)
|
||||
return errors.Errorf("invalid height of %d for %d", height, nm.height)
|
||||
}
|
||||
|
||||
if nm.tempChanges != nil {
|
||||
if height != nm.height-1 {
|
||||
return affectedNames, errors.Errorf("invalid temporary rollback at %d to %d", height, nm.height)
|
||||
for _, name := range affectedNames {
|
||||
if err := nm.repo.DropChanges(name, height); err != nil {
|
||||
return errors.Wrap(err, "in drop changes")
|
||||
}
|
||||
for key := range nm.tempChanges {
|
||||
affectedNames = append(affectedNames, []byte(key))
|
||||
}
|
||||
nm.tempChanges = nil
|
||||
} else {
|
||||
for _, name := range affectedNames {
|
||||
if err := nm.repo.DropChanges(name, height); err != nil {
|
||||
return affectedNames, errors.Wrap(err, "in drop changes")
|
||||
}
|
||||
}
|
||||
|
||||
nm.cache.drop(affectedNames)
|
||||
}
|
||||
|
||||
nm.height = height
|
||||
|
||||
return affectedNames, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nm *BaseManager) getDelayForName(n *Node, chg change.Change) int32 {
|
||||
|
@ -355,6 +269,10 @@ func hasZeroActiveClaims(n *Node) bool {
|
|||
// aWorkaroundIsNeeded handles bugs that existed in previous versions
|
||||
func (nm *BaseManager) aWorkaroundIsNeeded(n *Node, chg change.Change) bool {
|
||||
|
||||
if chg.Height >= param.ActiveParams.GrandForkHeight {
|
||||
return false
|
||||
}
|
||||
|
||||
if chg.Type == change.SpendClaim || chg.Type == change.SpendSupport {
|
||||
return false
|
||||
}
|
||||
|
@ -436,17 +354,17 @@ func (nm *BaseManager) IterateNames(predicate func(name []byte) bool) {
|
|||
|
||||
func (nm *BaseManager) Hash(name []byte) (*chainhash.Hash, int32) {
|
||||
|
||||
n, err := nm.node(name)
|
||||
n, err := nm.NodeAt(nm.height, name)
|
||||
if err != nil || n == nil {
|
||||
return nil, 0
|
||||
}
|
||||
if len(n.Claims) > 0 {
|
||||
if n.BestClaim != nil && n.BestClaim.Status == Activated {
|
||||
h := calculateNodeHash(n.BestClaim.OutPoint, n.TakenOverAt)
|
||||
return h, n.NextUpdate()
|
||||
return h, n.NextUpdate(nm.height)
|
||||
}
|
||||
}
|
||||
return nil, n.NextUpdate()
|
||||
return nil, n.NextUpdate(nm.height)
|
||||
}
|
||||
|
||||
func (nm *BaseManager) Flush() error {
|
||||
|
|
|
@ -54,38 +54,38 @@ func TestSimpleAddClaim(t *testing.T) {
|
|||
r.NoError(err)
|
||||
defer m.Close()
|
||||
|
||||
_, err = m.IncrementHeightTo(10, false)
|
||||
_, err = m.IncrementHeightTo(10)
|
||||
r.NoError(err)
|
||||
|
||||
chg := change.NewChange(change.AddClaim).SetName(name1).SetOutPoint(out1).SetHeight(11)
|
||||
m.AppendChange(chg)
|
||||
_, err = m.IncrementHeightTo(11, false)
|
||||
_, err = m.IncrementHeightTo(11)
|
||||
r.NoError(err)
|
||||
|
||||
chg = chg.SetName(name2).SetOutPoint(out2).SetHeight(12)
|
||||
m.AppendChange(chg)
|
||||
_, err = m.IncrementHeightTo(12, false)
|
||||
_, err = m.IncrementHeightTo(12)
|
||||
r.NoError(err)
|
||||
|
||||
n1, err := m.node(name1)
|
||||
n1, err := m.NodeAt(m.height, name1)
|
||||
r.NoError(err)
|
||||
r.Equal(1, len(n1.Claims))
|
||||
r.NotNil(n1.Claims.find(byOut(*out1)))
|
||||
|
||||
n2, err := m.node(name2)
|
||||
n2, err := m.NodeAt(m.height, name2)
|
||||
r.NoError(err)
|
||||
r.Equal(1, len(n2.Claims))
|
||||
r.NotNil(n2.Claims.find(byOut(*out2)))
|
||||
|
||||
_, err = m.DecrementHeightTo([][]byte{name2}, 11)
|
||||
err = m.DecrementHeightTo([][]byte{name2}, 11)
|
||||
r.NoError(err)
|
||||
n2, err = m.node(name2)
|
||||
n2, err = m.NodeAt(m.height, name2)
|
||||
r.NoError(err)
|
||||
r.Nil(n2)
|
||||
|
||||
_, err = m.DecrementHeightTo([][]byte{name1}, 1)
|
||||
err = m.DecrementHeightTo([][]byte{name1}, 1)
|
||||
r.NoError(err)
|
||||
n2, err = m.node(name1)
|
||||
n2, err = m.NodeAt(m.height, name1)
|
||||
r.NoError(err)
|
||||
r.Nil(n2)
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ func TestSupportAmounts(t *testing.T) {
|
|||
r.NoError(err)
|
||||
defer m.Close()
|
||||
|
||||
_, err = m.IncrementHeightTo(10, false)
|
||||
_, err = m.IncrementHeightTo(10)
|
||||
r.NoError(err)
|
||||
|
||||
chg := change.NewChange(change.AddClaim).SetName(name1).SetOutPoint(out1).SetHeight(11).SetAmount(3)
|
||||
|
@ -113,7 +113,7 @@ func TestSupportAmounts(t *testing.T) {
|
|||
chg.ClaimID = change.NewClaimID(*out2)
|
||||
m.AppendChange(chg)
|
||||
|
||||
_, err = m.IncrementHeightTo(11, false)
|
||||
_, err = m.IncrementHeightTo(11)
|
||||
r.NoError(err)
|
||||
|
||||
chg = change.NewChange(change.AddSupport).SetName(name1).SetOutPoint(out3).SetHeight(12).SetAmount(2)
|
||||
|
@ -128,10 +128,10 @@ func TestSupportAmounts(t *testing.T) {
|
|||
chg.ClaimID = change.NewClaimID(*out2)
|
||||
m.AppendChange(chg)
|
||||
|
||||
_, err = m.IncrementHeightTo(20, false)
|
||||
_, err = m.IncrementHeightTo(20)
|
||||
r.NoError(err)
|
||||
|
||||
n1, err := m.node(name1)
|
||||
n1, err := m.NodeAt(m.height, name1)
|
||||
r.NoError(err)
|
||||
r.Equal(2, len(n1.Claims))
|
||||
r.Equal(int64(5), n1.BestClaim.Amount+n1.SupportSums[n1.BestClaim.ClaimID.Key()])
|
||||
|
@ -194,14 +194,14 @@ func TestHasChildren(t *testing.T) {
|
|||
chg := change.NewChange(change.AddClaim).SetName([]byte("a")).SetOutPoint(out1).SetHeight(1).SetAmount(2)
|
||||
chg.ClaimID = change.NewClaimID(*out1)
|
||||
m.AppendChange(chg)
|
||||
_, err = m.IncrementHeightTo(1, false)
|
||||
_, err = m.IncrementHeightTo(1)
|
||||
r.NoError(err)
|
||||
r.False(m.hasChildren([]byte("a"), 1, nil, 1))
|
||||
|
||||
chg = change.NewChange(change.AddClaim).SetName([]byte("ab")).SetOutPoint(out2).SetHeight(2).SetAmount(2)
|
||||
chg.ClaimID = change.NewClaimID(*out2)
|
||||
m.AppendChange(chg)
|
||||
_, err = m.IncrementHeightTo(2, false)
|
||||
_, err = m.IncrementHeightTo(2)
|
||||
r.NoError(err)
|
||||
r.False(m.hasChildren([]byte("a"), 2, nil, 2))
|
||||
r.True(m.hasChildren([]byte("a"), 2, nil, 1))
|
||||
|
@ -209,14 +209,14 @@ func TestHasChildren(t *testing.T) {
|
|||
chg = change.NewChange(change.AddClaim).SetName([]byte("abc")).SetOutPoint(out3).SetHeight(3).SetAmount(2)
|
||||
chg.ClaimID = change.NewClaimID(*out3)
|
||||
m.AppendChange(chg)
|
||||
_, err = m.IncrementHeightTo(3, false)
|
||||
_, err = m.IncrementHeightTo(3)
|
||||
r.NoError(err)
|
||||
r.False(m.hasChildren([]byte("a"), 3, nil, 2))
|
||||
|
||||
chg = change.NewChange(change.AddClaim).SetName([]byte("ac")).SetOutPoint(out1).SetHeight(4).SetAmount(2)
|
||||
chg.ClaimID = change.NewClaimID(*out4)
|
||||
m.AppendChange(chg)
|
||||
_, err = m.IncrementHeightTo(4, false)
|
||||
_, err = m.IncrementHeightTo(4)
|
||||
r.NoError(err)
|
||||
r.True(m.hasChildren([]byte("a"), 4, nil, 2))
|
||||
}
|
||||
|
@ -249,8 +249,7 @@ func TestCollectChildren(t *testing.T) {
|
|||
r.Len(c[7].SpentChildren, 0)
|
||||
}
|
||||
|
||||
func TestTemporaryAddClaim(t *testing.T) {
|
||||
|
||||
func TestEndOfExpiration(t *testing.T) {
|
||||
r := require.New(t)
|
||||
|
||||
param.SetNetwork(wire.TestNet)
|
||||
|
@ -261,39 +260,23 @@ func TestTemporaryAddClaim(t *testing.T) {
|
|||
r.NoError(err)
|
||||
defer m.Close()
|
||||
|
||||
_, err = m.IncrementHeightTo(10, false)
|
||||
r.NoError(err)
|
||||
et := param.ActiveParams.ExtendedClaimExpirationForkHeight
|
||||
gf := param.ActiveParams.GrandForkHeight
|
||||
|
||||
chg := change.NewChange(change.AddClaim).SetName(name1).SetOutPoint(out1).SetHeight(11)
|
||||
chg := change.NewChange(change.AddClaim).SetName(name1).SetOutPoint(out1).SetHeight(et).SetAmount(2)
|
||||
chg.ClaimID = change.NewClaimID(*out1)
|
||||
m.AppendChange(chg)
|
||||
_, err = m.IncrementHeightTo(11, false)
|
||||
|
||||
_, err = m.IncrementHeightTo(et)
|
||||
r.NoError(err)
|
||||
n, err := m.NodeAt(m.height, name1)
|
||||
r.NoError(err)
|
||||
r.Equal(m.height+param.ActiveParams.ExtendedClaimExpirationTime, n.NextUpdate(m.height))
|
||||
|
||||
_, err = m.IncrementHeightTo(gf)
|
||||
r.NoError(err)
|
||||
|
||||
chg = chg.SetName(name2).SetOutPoint(out2).SetHeight(12)
|
||||
m.AppendChange(chg)
|
||||
_, err = m.IncrementHeightTo(12, true)
|
||||
n, err = m.NodeAt(m.height, name1)
|
||||
r.NoError(err)
|
||||
|
||||
n1, err := m.node(name1)
|
||||
r.NoError(err)
|
||||
r.Equal(1, len(n1.Claims))
|
||||
r.NotNil(n1.Claims.find(byOut(*out1)))
|
||||
|
||||
n2, err := m.node(name2)
|
||||
r.NoError(err)
|
||||
r.Equal(1, len(n2.Claims))
|
||||
r.NotNil(n2.Claims.find(byOut(*out2)))
|
||||
|
||||
names, err := m.DecrementHeightTo([][]byte{name2}, 11)
|
||||
r.Equal(names[0], name2)
|
||||
r.NoError(err)
|
||||
n2, err = m.node(name2)
|
||||
r.NoError(err)
|
||||
r.Nil(n2)
|
||||
|
||||
_, err = m.DecrementHeightTo([][]byte{name1}, 1)
|
||||
r.NoError(err)
|
||||
n2, err = m.node(name1)
|
||||
r.NoError(err)
|
||||
r.Nil(n2)
|
||||
r.Equal(int32(0), n.NextUpdate(m.height))
|
||||
}
|
||||
|
|
|
@ -110,16 +110,20 @@ func (n *Node) ApplyChange(chg change.Change, delay int32) error {
|
|||
}
|
||||
|
||||
// AdjustTo activates claims and computes takeovers until it reaches the specified height.
|
||||
func (n *Node) AdjustTo(height, maxHeight int32, name []byte) {
|
||||
func (n *Node) AdjustTo(height, maxHeight int32, name []byte) *Node {
|
||||
changed := n.handleExpiredAndActivated(height) > 0
|
||||
n.updateTakeoverHeight(height, name, changed)
|
||||
if maxHeight > height {
|
||||
for h := n.NextUpdate(); h <= maxHeight; h = n.NextUpdate() {
|
||||
for h := n.NextUpdate(height); h <= maxHeight; h = n.NextUpdate(height) {
|
||||
if h <= 0 {
|
||||
break
|
||||
}
|
||||
changed = n.handleExpiredAndActivated(h) > 0
|
||||
n.updateTakeoverHeight(h, name, changed)
|
||||
height = h
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (n *Node) updateTakeoverHeight(height int32, name []byte, refindBest bool) {
|
||||
|
@ -155,16 +159,6 @@ func (n *Node) updateTakeoverHeight(height int32, name []byte, refindBest bool)
|
|||
|
||||
func (n *Node) handleExpiredAndActivated(height int32) int {
|
||||
|
||||
ot := param.ActiveParams.OriginalClaimExpirationTime
|
||||
et := param.ActiveParams.ExtendedClaimExpirationTime
|
||||
fk := param.ActiveParams.ExtendedClaimExpirationForkHeight
|
||||
expiresAt := func(c *Claim) int32 {
|
||||
if c.AcceptedAt+ot > fk {
|
||||
return c.AcceptedAt + et
|
||||
}
|
||||
return c.AcceptedAt + ot
|
||||
}
|
||||
|
||||
changes := 0
|
||||
update := func(items ClaimList, sums map[string]int64) ClaimList {
|
||||
for i := 0; i < len(items); i++ {
|
||||
|
@ -176,7 +170,7 @@ func (n *Node) handleExpiredAndActivated(height int32) int {
|
|||
sums[c.ClaimID.Key()] += c.Amount
|
||||
}
|
||||
}
|
||||
if c.Status == Deactivated || expiresAt(c) <= height {
|
||||
if c.Status == Deactivated || (height < param.ActiveParams.GrandForkHeight && c.ExpireAt() <= height) {
|
||||
if i < len(items)-1 {
|
||||
items[i] = items[len(items)-1]
|
||||
i--
|
||||
|
@ -197,24 +191,13 @@ func (n *Node) handleExpiredAndActivated(height int32) int {
|
|||
|
||||
// NextUpdate returns the nearest height in the future that the node should
|
||||
// be refreshed due to changes of claims or supports.
|
||||
func (n Node) NextUpdate() int32 {
|
||||
|
||||
ot := param.ActiveParams.OriginalClaimExpirationTime
|
||||
et := param.ActiveParams.ExtendedClaimExpirationTime
|
||||
fk := param.ActiveParams.ExtendedClaimExpirationForkHeight
|
||||
expiresAt := func(c *Claim) int32 {
|
||||
if c.AcceptedAt+ot > fk {
|
||||
return c.AcceptedAt + et
|
||||
}
|
||||
return c.AcceptedAt + ot
|
||||
}
|
||||
func (n Node) NextUpdate(height int32) int32 {
|
||||
|
||||
next := int32(math.MaxInt32)
|
||||
|
||||
for _, c := range n.Claims {
|
||||
ea := expiresAt(c)
|
||||
if ea < next {
|
||||
next = ea
|
||||
if height < param.ActiveParams.GrandForkHeight && c.ExpireAt() < next {
|
||||
next = c.ExpireAt()
|
||||
}
|
||||
// if we're not active, we need to go to activeAt unless we're still invisible there
|
||||
if c.Status == Accepted {
|
||||
|
@ -229,9 +212,8 @@ func (n Node) NextUpdate() int32 {
|
|||
}
|
||||
|
||||
for _, s := range n.Supports {
|
||||
es := expiresAt(s)
|
||||
if es < next {
|
||||
next = es
|
||||
if height < param.ActiveParams.GrandForkHeight && s.ExpireAt() < next {
|
||||
next = s.ExpireAt()
|
||||
}
|
||||
if s.Status == Accepted {
|
||||
min := s.ActiveAt
|
||||
|
@ -244,6 +226,10 @@ func (n Node) NextUpdate() int32 {
|
|||
}
|
||||
}
|
||||
|
||||
if next == int32(math.MaxInt32) || next <= height {
|
||||
return 0
|
||||
}
|
||||
|
||||
return next
|
||||
}
|
||||
|
||||
|
@ -339,28 +325,3 @@ func (n *Node) SortClaimsByBid() {
|
|||
return OutPointLess(n.Claims[j].OutPoint, n.Claims[i].OutPoint)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Node) Clone() *Node {
|
||||
clone := New()
|
||||
if n.SupportSums != nil {
|
||||
clone.SupportSums = map[string]int64{}
|
||||
for key, value := range n.SupportSums {
|
||||
clone.SupportSums[key] = value
|
||||
}
|
||||
}
|
||||
clone.Supports = make(ClaimList, len(n.Supports))
|
||||
for i, support := range n.Supports {
|
||||
clone.Supports[i] = &Claim{}
|
||||
*clone.Supports[i] = *support
|
||||
}
|
||||
clone.Claims = make(ClaimList, len(n.Claims))
|
||||
for i, claim := range n.Claims {
|
||||
clone.Claims[i] = &Claim{}
|
||||
*clone.Claims[i] = *claim
|
||||
}
|
||||
clone.TakenOverAt = n.TakenOverAt
|
||||
if n.BestClaim != nil {
|
||||
clone.BestClaim = clone.Claims.find(byID(n.BestClaim.ClaimID))
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@ package noderepo
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/cockroachdb/pebble"
|
||||
"github.com/lbryio/lbcd/claimtrie/change"
|
||||
|
@ -15,81 +13,15 @@ type Pebble struct {
|
|||
db *pebble.DB
|
||||
}
|
||||
|
||||
type pooledMerger struct {
|
||||
values [][]byte
|
||||
index []int
|
||||
pool *sync.Pool
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (a *pooledMerger) Len() int { return len(a.index) }
|
||||
func (a *pooledMerger) Less(i, j int) bool { return a.index[i] < a.index[j] }
|
||||
func (a *pooledMerger) Swap(i, j int) {
|
||||
a.index[i], a.index[j] = a.index[j], a.index[i]
|
||||
a.values[i], a.values[j] = a.values[j], a.values[i]
|
||||
}
|
||||
|
||||
func (a *pooledMerger) MergeNewer(value []byte) error {
|
||||
vc := a.pool.Get().([]byte)[:0]
|
||||
vc = append(vc, value...)
|
||||
a.values = append(a.values, vc)
|
||||
a.index = append(a.index, len(a.values))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *pooledMerger) MergeOlder(value []byte) error {
|
||||
vc := a.pool.Get().([]byte)[:0]
|
||||
vc = append(vc, value...)
|
||||
a.values = append(a.values, vc)
|
||||
a.index = append(a.index, -len(a.values))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *pooledMerger) Finish(includesBase bool) ([]byte, io.Closer, error) {
|
||||
sort.Sort(a)
|
||||
|
||||
a.buffer = a.pool.Get().([]byte)[:0]
|
||||
for i := range a.values {
|
||||
a.buffer = append(a.buffer, a.values[i]...)
|
||||
}
|
||||
|
||||
return a.buffer, a, nil
|
||||
}
|
||||
|
||||
func (a *pooledMerger) Close() error {
|
||||
for i := range a.values {
|
||||
a.pool.Put(a.values[i])
|
||||
}
|
||||
a.pool.Put(a.buffer)
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewPebble(path string) (*Pebble, error) {
|
||||
|
||||
mp := &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, 0, 256)
|
||||
},
|
||||
}
|
||||
|
||||
db, err := pebble.Open(path, &pebble.Options{
|
||||
Merger: &pebble.Merger{
|
||||
Merge: func(key, value []byte) (pebble.ValueMerger, error) {
|
||||
p := &pooledMerger{pool: mp}
|
||||
return p, p.MergeNewer(value)
|
||||
},
|
||||
Name: pebble.DefaultMerger.Name, // yes, it's a lie
|
||||
},
|
||||
Cache: pebble.NewCache(64 << 20),
|
||||
BytesPerSync: 8 << 20,
|
||||
MaxOpenFiles: 2000,
|
||||
})
|
||||
|
||||
db, err := pebble.Open(path, &pebble.Options{Cache: pebble.NewCache(64 << 20), BytesPerSync: 8 << 20, MaxOpenFiles: 2000})
|
||||
repo := &Pebble{db: db}
|
||||
|
||||
return repo, errors.Wrapf(err, "unable to open %s", path)
|
||||
}
|
||||
|
||||
// AppendChanges makes an assumption that anything you pass to it is newer than what was saved before.
|
||||
func (repo *Pebble) AppendChanges(changes []change.Change) error {
|
||||
|
||||
batch := repo.db.NewBatch()
|
||||
|
@ -130,7 +62,6 @@ func unmarshalChanges(name, data []byte) ([]change.Change, error) {
|
|||
changes := make([]change.Change, 0, len(data)/84+1) // average is 5.1 changes
|
||||
|
||||
buffer := bytes.NewBuffer(data)
|
||||
sortNeeded := false
|
||||
for buffer.Len() > 0 {
|
||||
var chg change.Change
|
||||
err := chg.Unmarshal(buffer)
|
||||
|
@ -138,18 +69,14 @@ func unmarshalChanges(name, data []byte) ([]change.Change, error) {
|
|||
return nil, errors.Wrap(err, "in decode")
|
||||
}
|
||||
chg.Name = name
|
||||
if len(changes) > 0 && chg.Height < changes[len(changes)-1].Height {
|
||||
sortNeeded = true // alternately: sortNeeded || chg.Height != chg.VisibleHeight
|
||||
}
|
||||
changes = append(changes, chg)
|
||||
}
|
||||
|
||||
if sortNeeded {
|
||||
// this was required for the normalization stuff:
|
||||
sort.SliceStable(changes, func(i, j int) bool {
|
||||
return changes[i].Height < changes[j].Height
|
||||
})
|
||||
}
|
||||
// this was required for the normalization stuff:
|
||||
sort.SliceStable(changes, func(i, j int) bool {
|
||||
return changes[i].Height < changes[j].Height
|
||||
})
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
|
@ -158,24 +85,22 @@ func (repo *Pebble) DropChanges(name []byte, finalHeight int32) error {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "in load changes for %s", name)
|
||||
}
|
||||
buffer := bytes.NewBuffer(nil)
|
||||
for i := 0; i < len(changes); i++ { // assuming changes are ordered by height
|
||||
i := 0
|
||||
for ; i < len(changes); i++ { // assuming changes are ordered by height
|
||||
if changes[i].Height > finalHeight {
|
||||
break
|
||||
}
|
||||
if changes[i].VisibleHeight > finalHeight { // created after this height has to be skipped
|
||||
continue
|
||||
}
|
||||
// having to sort the changes really messes up performance here. It would be better to not remarshal
|
||||
err := changes[i].Marshal(buffer)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "in marshaller")
|
||||
if changes[i].VisibleHeight > finalHeight { // created after this height has to be deleted
|
||||
changes = append(changes[:i], changes[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
|
||||
// making a performance assumption that DropChanges won't happen often:
|
||||
err = repo.db.Set(name, buffer.Bytes(), pebble.NoSync)
|
||||
return errors.Wrapf(err, "in set at %s", name)
|
||||
err = repo.db.Set(name, []byte{}, pebble.NoSync)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "in set at %s", name)
|
||||
}
|
||||
return repo.AppendChanges(changes[:i])
|
||||
}
|
||||
|
||||
func (repo *Pebble) IterateChildren(name []byte, f func(changes []change.Change) bool) error {
|
||||
|
|
|
@ -26,15 +26,14 @@ func (nm *NormalizingManager) AppendChange(chg change.Change) {
|
|||
nm.Manager.AppendChange(chg)
|
||||
}
|
||||
|
||||
func (nm *NormalizingManager) IncrementHeightTo(height int32, temporary bool) ([][]byte, error) {
|
||||
func (nm *NormalizingManager) IncrementHeightTo(height int32) ([][]byte, error) {
|
||||
nm.addNormalizationForkChangesIfNecessary(height)
|
||||
return nm.Manager.IncrementHeightTo(height, temporary)
|
||||
return nm.Manager.IncrementHeightTo(height)
|
||||
}
|
||||
|
||||
func (nm *NormalizingManager) DecrementHeightTo(affectedNames [][]byte, height int32) ([][]byte, error) {
|
||||
func (nm *NormalizingManager) DecrementHeightTo(affectedNames [][]byte, height int32) error {
|
||||
if nm.normalizedAt > height {
|
||||
nm.normalizedAt = -1
|
||||
nm.ClearCache()
|
||||
}
|
||||
return nm.Manager.DecrementHeightTo(affectedNames, height)
|
||||
}
|
||||
|
@ -111,7 +110,5 @@ func (nm *NormalizingManager) addNormalizationForkChangesIfNecessary(height int3
|
|||
|
||||
return true
|
||||
}
|
||||
|
||||
nm.Manager.ClearCache()
|
||||
nm.Manager.IterateNames(predicate)
|
||||
}
|
||||
|
|
1584
claimtrie/normalization/CaseFolding_v13.txt
Normal file
1584
claimtrie/normalization/CaseFolding_v13.txt
Normal file
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -38,7 +38,7 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func caseFold(name []byte) []byte {
|
||||
func CaseFold(name []byte) []byte {
|
||||
var b bytes.Buffer
|
||||
b.Grow(len(name))
|
||||
for i := 0; i < len(name); {
|
||||
|
|
|
@ -1,177 +0,0 @@
|
|||
package normalization
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
_ "embed"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
//go:embed NFC_v11.txt
|
||||
var decompositions string // the data file that came from ICU 63.2
|
||||
|
||||
var nfdMap map[rune][]rune
|
||||
var nfdOrder map[rune]int32
|
||||
|
||||
func init() {
|
||||
nfdMap = map[rune][]rune{}
|
||||
nfdOrder = map[rune]int32{}
|
||||
scanner := bufio.NewScanner(strings.NewReader(decompositions))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if len(line) <= 0 || line[0] == '#' || line[0] == '*' {
|
||||
continue
|
||||
}
|
||||
if strings.ContainsAny(line, ":") {
|
||||
// it's a ordering def:
|
||||
addOrdering(line)
|
||||
continue
|
||||
}
|
||||
splits := strings.Split(line, "=")
|
||||
if len(splits) <= 1 {
|
||||
splits = strings.Split(line, ">")
|
||||
if len(splits) <= 1 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
key, err := strconv.ParseUint(splits[0], 16, len(splits[0])*4)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
splits = strings.Split(splits[1], " ")
|
||||
values := make([]rune, 0, len(splits))
|
||||
for j := range splits {
|
||||
value, err := strconv.ParseUint(splits[j], 16, len(splits[j])*4)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
existing := nfdMap[rune(value)]
|
||||
if len(existing) > 0 {
|
||||
values = append(values, existing...)
|
||||
} else {
|
||||
values = append(values, rune(value))
|
||||
}
|
||||
}
|
||||
nfdMap[rune(key)] = values
|
||||
}
|
||||
|
||||
// run one more expansion pass to catch stragglers
|
||||
for key, values := range nfdMap {
|
||||
for i, value := range values {
|
||||
other := nfdMap[value]
|
||||
if len(other) > 0 {
|
||||
newValues := make([]rune, len(values)+len(other)-1)
|
||||
copy(newValues, values[:i])
|
||||
copy(newValues[i:i+len(other)], other)
|
||||
copy(newValues[i+len(other):], values[i+1:])
|
||||
nfdMap[key] = newValues
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// assert no more expansions are necessary:
|
||||
for _, values := range nfdMap {
|
||||
for _, value := range values {
|
||||
other := nfdMap[value]
|
||||
if len(other) > 0 {
|
||||
panic("Failed in NFD expansion")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addOrdering(line string) {
|
||||
splits := strings.Split(line, ":")
|
||||
ranges := strings.Split(splits[0], "..")
|
||||
|
||||
value, err := strconv.ParseUint(splits[1], 16, len(splits[1])*4)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
start, err := strconv.ParseUint(ranges[0], 16, len(ranges[0])*4)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
end := start
|
||||
if len(ranges) > 1 {
|
||||
end, err = strconv.ParseUint(ranges[1], 16, len(ranges[0])*4)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
for i := start; i <= end; i++ {
|
||||
nfdOrder[rune(i)] = int32(value)
|
||||
}
|
||||
}
|
||||
|
||||
func decompose(name []byte) []byte {
|
||||
// see https://unicode.org/reports/tr15/ section 1.3
|
||||
runes := make([]rune, 0, len(name)) // we typically use ascii don't increase the length
|
||||
for i := 0; i < len(name); {
|
||||
r, w := utf8.DecodeRune(name[i:])
|
||||
if r == utf8.RuneError && w < 2 {
|
||||
// HACK: their RuneError is actually a valid character if coming from a width of 2 or more
|
||||
return name
|
||||
}
|
||||
replacements := nfdMap[r]
|
||||
if len(replacements) > 0 {
|
||||
runes = append(runes, replacements...)
|
||||
} else {
|
||||
hanguls := decomposeHangul(r)
|
||||
if len(hanguls) > 0 {
|
||||
runes = append(runes, hanguls...)
|
||||
} else {
|
||||
runes = append(runes, r)
|
||||
}
|
||||
}
|
||||
i += w
|
||||
}
|
||||
repairOrdering(runes)
|
||||
return []byte(string(runes))
|
||||
}
|
||||
|
||||
func decomposeHangul(s rune) []rune {
|
||||
// see https://www.unicode.org/versions/Unicode11.0.0/ch03.pdf
|
||||
|
||||
const SBase int32 = 0xAC00
|
||||
const LBase int32 = 0x1100
|
||||
const VBase int32 = 0x1161
|
||||
const TBase int32 = 0x11A7
|
||||
const LCount int32 = 19
|
||||
const VCount int32 = 21
|
||||
const TCount int32 = 28
|
||||
const NCount = VCount * TCount // 588
|
||||
const SCount = LCount * NCount // 11172
|
||||
|
||||
SIndex := s - SBase
|
||||
if SIndex < 0 || SIndex >= SCount {
|
||||
return nil
|
||||
}
|
||||
L := LBase + SIndex/NCount
|
||||
V := VBase + (SIndex%NCount)/TCount
|
||||
T := TBase + SIndex%TCount
|
||||
result := []rune{L, V}
|
||||
if T != TBase {
|
||||
result = append(result, T)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func repairOrdering(runes []rune) {
|
||||
for i := 1; i < len(runes); i++ {
|
||||
a := runes[i-1]
|
||||
b := runes[i]
|
||||
oa := nfdOrder[a]
|
||||
ob := nfdOrder[b]
|
||||
if oa > ob && ob > 0 {
|
||||
runes[i-1], runes[i] = b, a
|
||||
if i >= 2 {
|
||||
i -= 2
|
||||
} else {
|
||||
i = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -2,10 +2,11 @@ package normalization
|
|||
|
||||
import (
|
||||
"github.com/lbryio/lbcd/claimtrie/param"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
var Normalize = normalizeGo
|
||||
var NormalizeTitle = "Normalizing strings via Go. Casefold and NFD table versions: 11.0.0 (from ICU 63.2)"
|
||||
var NormalizeTitle = "Normalizing strings via Go. Casefold table version = 11.0.0, NFD version = " + norm.Version
|
||||
|
||||
func NormalizeIfNecessary(name []byte, height int32) []byte {
|
||||
if height < param.ActiveParams.NormalizedNameForkHeight {
|
||||
|
@ -16,7 +17,7 @@ func NormalizeIfNecessary(name []byte, height int32) []byte {
|
|||
|
||||
func normalizeGo(value []byte) []byte {
|
||||
|
||||
normalized := decompose(value) // may need to hard-code the version on this
|
||||
normalized := norm.NFD.Bytes(value) // may need to hard-code the version on this
|
||||
// not using x/text/cases because it does too good of a job; it seems to use v14 tables even when it claims v13
|
||||
return caseFold(normalized)
|
||||
return CaseFold(normalized)
|
||||
}
|
||||
|
|
|
@ -31,8 +31,6 @@ package normalization
|
|||
// }
|
||||
import "C"
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
@ -49,29 +47,21 @@ func IcuVersion() string {
|
|||
}
|
||||
|
||||
func normalizeICU(value []byte) []byte {
|
||||
original := value
|
||||
if len(value) <= 0 {
|
||||
return value
|
||||
}
|
||||
|
||||
other := normalizeGo(value)
|
||||
|
||||
name := (*C.char)(unsafe.Pointer(&value[0]))
|
||||
length := C.int(len(value))
|
||||
|
||||
// hopefully this is a stack alloc (but it may be a bit large for that):
|
||||
var resultName [512]byte // inputs are restricted to 255 chars; it shouldn't expand too much past that
|
||||
pointer := unsafe.Pointer(&resultName[0])
|
||||
result := unsafe.Pointer(&resultName[0])
|
||||
|
||||
resultLength := C.normalize(name, length, (*C.char)(pointer))
|
||||
if resultLength > 0 {
|
||||
value = C.GoBytes(pointer, resultLength)
|
||||
resultLength := C.normalize(name, length, (*C.char)(result))
|
||||
if resultLength == 0 {
|
||||
return value
|
||||
}
|
||||
|
||||
// return resultName[0:resultLength] -- we want to shrink the pointer (not use a slice on 1024)
|
||||
if !bytes.Equal(other, value) {
|
||||
fmt.Printf("Failed with %s, %s != %s,\n\t%s, %s != %s,\n", original, value, other,
|
||||
hex.EncodeToString(original), hex.EncodeToString(value), hex.EncodeToString(other))
|
||||
}
|
||||
return value
|
||||
// return resultName[0:resultLength] -- we want to shrink the result (not use a slice on 1024)
|
||||
return C.GoBytes(result, resultLength)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
package normalization
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
"unicode/utf8"
|
||||
|
@ -64,11 +63,3 @@ func TestBlock760150_1020105(t *testing.T) {
|
|||
// t.Logf("%s -> %s", s, string(b))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlock1085612(t *testing.T) {
|
||||
s, err := hex.DecodeString("6eccb7cd9dcc92cd90cc86cc80cc80cd91cd9dcd8acd80cd92cc94cc85cc8fccbdcda0ccbdcd80cda0cd84cc94cc8ccc9acd84cc94cd9bcda0cca7cc99ccaccd99cca9cca7")
|
||||
assert.NoError(t, err)
|
||||
a := normalizeICU(s)
|
||||
b := normalizeGo(s)
|
||||
assert.Equal(t, a, b, "%s != %s, %v", string(a), string(b), bytes.Equal(b, s))
|
||||
}
|
||||
|
|
|
@ -1,12 +1,7 @@
|
|||
package normalization
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -57,33 +52,3 @@ func benchmarkNormalize(b *testing.B, normalize func(value []byte) []byte) {
|
|||
require.True(b, len(s) >= 8)
|
||||
}
|
||||
}
|
||||
|
||||
//go:embed NormalizationTest_v11.txt
|
||||
var nfdTests string
|
||||
|
||||
func TestDecomposition(t *testing.T) {
|
||||
r := require.New(t)
|
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(nfdTests))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if len(line) <= 0 || line[0] == '@' || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
splits := strings.Split(line, ";")
|
||||
source := convertToBytes(splits[0])
|
||||
targetNFD := convertToBytes(splits[2])
|
||||
fixed := decompose(source)
|
||||
r.True(bytes.Equal(targetNFD, fixed), "Failed on %s -> %s. Got %U, not %U", splits[0], splits[2], fixed, targetNFD)
|
||||
}
|
||||
}
|
||||
|
||||
func convertToBytes(s string) []byte {
|
||||
splits := strings.Split(s, " ")
|
||||
var b bytes.Buffer
|
||||
for i := range splits {
|
||||
value, _ := strconv.ParseUint(splits[i], 16, len(splits[i])*4)
|
||||
b.WriteRune(rune(value))
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ type ClaimTrieParams struct {
|
|||
|
||||
NormalizedNameForkHeight int32
|
||||
AllClaimsInMerkleForkHeight int32
|
||||
GrandForkHeight int32
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -32,6 +33,7 @@ var (
|
|||
MaxRemovalWorkaroundHeight: 658300,
|
||||
NormalizedNameForkHeight: 539940, // targeting 21 March 2019}, https://lbry.com/news/hf1903
|
||||
AllClaimsInMerkleForkHeight: 658309, // targeting 30 Oct 2019}, https://lbry.com/news/hf1910
|
||||
GrandForkHeight: 1200000,
|
||||
}
|
||||
|
||||
TestNet = ClaimTrieParams{
|
||||
|
@ -45,6 +47,7 @@ var (
|
|||
MaxRemovalWorkaroundHeight: 1, // if you get a hash mismatch, come back to this
|
||||
NormalizedNameForkHeight: 993380,
|
||||
AllClaimsInMerkleForkHeight: 1198559,
|
||||
GrandForkHeight: 1200000,
|
||||
}
|
||||
|
||||
Regtest = ClaimTrieParams{
|
||||
|
@ -58,6 +61,7 @@ var (
|
|||
MaxRemovalWorkaroundHeight: -1,
|
||||
NormalizedNameForkHeight: 250,
|
||||
AllClaimsInMerkleForkHeight: 349,
|
||||
GrandForkHeight: 850,
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
flags "github.com/jessevdk/go-flags"
|
||||
"github.com/lbryio/lbcd/btcjson"
|
||||
"github.com/lbryio/lbcd/chaincfg"
|
||||
"github.com/lbryio/lbcd/version"
|
||||
btcutil "github.com/lbryio/lbcutil"
|
||||
)
|
||||
|
||||
|
@ -111,8 +110,6 @@ type config struct {
|
|||
SigNet bool `long:"signet" description:"Connect to signet (default RPC server: localhost:49245)"`
|
||||
Wallet bool `long:"wallet" description:"Connect to wallet RPC server instead (default: localhost:9244, testnet: localhost:19244, regtest: localhost:29244)"`
|
||||
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
|
||||
Timed bool `short:"t" long:"timed" description:"Display RPC response time"`
|
||||
Quiet bool `short:"q" long:"quiet" description:"Do not output results to stdout"`
|
||||
}
|
||||
|
||||
// normalizeAddress returns addr with the passed default port appended if
|
||||
|
@ -177,10 +174,10 @@ func cleanAndExpandPath(path string) string {
|
|||
// line options.
|
||||
//
|
||||
// The configuration proceeds as follows:
|
||||
// 1. Start with a default config with sane settings
|
||||
// 2. Pre-parse the command line to check for an alternative config file
|
||||
// 3. Load configuration file overwriting defaults with any specified options
|
||||
// 4. Parse CLI options and overwrite/add any specified options
|
||||
// 1) Start with a default config with sane settings
|
||||
// 2) Pre-parse the command line to check for an alternative config file
|
||||
// 3) Load configuration file overwriting defaults with any specified options
|
||||
// 4) Parse CLI options and overwrite/add any specified options
|
||||
//
|
||||
// The above results in functioning properly without any config settings
|
||||
// while still allowing the user to override settings with config files and
|
||||
|
@ -217,7 +214,7 @@ func loadConfig() (*config, []string, error) {
|
|||
appName = strings.TrimSuffix(appName, filepath.Ext(appName))
|
||||
usageMessage := fmt.Sprintf("Use %s -h to show options", appName)
|
||||
if preCfg.ShowVersion {
|
||||
fmt.Println(appName, "version", version.Full())
|
||||
fmt.Println(appName, "version", version())
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/lbcd/btcjson"
|
||||
)
|
||||
|
@ -134,8 +133,6 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
started := time.Now()
|
||||
|
||||
// Send the JSON-RPC request to the server using the user-specified
|
||||
// connection configuration.
|
||||
result, err := sendPostRequest(marshalledJSON, cfg)
|
||||
|
@ -144,16 +141,6 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
if cfg.Timed {
|
||||
elapsed := time.Since(started)
|
||||
defer fmt.Fprintf(os.Stderr, "%s\n", elapsed)
|
||||
}
|
||||
|
||||
var output io.Writer = os.Stdout
|
||||
if cfg.Quiet {
|
||||
output = io.Discard
|
||||
}
|
||||
|
||||
// Choose how to display the result based on its type.
|
||||
strResult := string(result)
|
||||
if strings.HasPrefix(strResult, "{") || strings.HasPrefix(strResult, "[") {
|
||||
|
@ -163,7 +150,7 @@ func main() {
|
|||
err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Fprintln(output, dst.String())
|
||||
fmt.Println(dst.String())
|
||||
|
||||
} else if strings.HasPrefix(strResult, `"`) {
|
||||
var str string
|
||||
|
@ -172,9 +159,9 @@ func main() {
|
|||
err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Fprintln(output, str)
|
||||
fmt.Println(str)
|
||||
|
||||
} else if strResult != "null" {
|
||||
fmt.Fprintln(output, strResult)
|
||||
fmt.Println(strResult)
|
||||
}
|
||||
}
|
||||
|
|
75
cmd/lbcctl/version.go
Normal file
75
cmd/lbcctl/version.go
Normal file
|
@ -0,0 +1,75 @@
|
|||
// Copyright (c) 2013 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// semanticAlphabet
|
||||
const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
|
||||
|
||||
// These constants define the application version and follow the semantic
|
||||
// versioning 2.0.0 spec (http://semver.org/).
|
||||
const (
|
||||
appMajor uint = 0
|
||||
appMinor uint = 22
|
||||
appPatch uint = 100
|
||||
|
||||
// appPreRelease MUST only contain characters from semanticAlphabet
|
||||
// per the semantic versioning spec.
|
||||
appPreRelease = "beta"
|
||||
)
|
||||
|
||||
// appBuild is defined as a variable so it can be overridden during the build
|
||||
// process with '-ldflags "-X main.appBuild foo' if needed. It MUST only
|
||||
// contain characters from semanticAlphabet per the semantic versioning spec.
|
||||
var appBuild string
|
||||
|
||||
// version returns the application version as a properly formed string per the
|
||||
// semantic versioning 2.0.0 spec (http://semver.org/).
|
||||
func version() string {
|
||||
// Start with the major, minor, and patch versions.
|
||||
version := fmt.Sprintf("%d.%d.%d", appMajor, appMinor, appPatch)
|
||||
|
||||
// Append pre-release version if there is one. The hyphen called for
|
||||
// by the semantic versioning spec is automatically appended and should
|
||||
// not be contained in the pre-release string. The pre-release version
|
||||
// is not appended if it contains invalid characters.
|
||||
preRelease := normalizeVerString(appPreRelease)
|
||||
if preRelease != "" {
|
||||
version = fmt.Sprintf("%s-%s", version, preRelease)
|
||||
}
|
||||
|
||||
// Append build metadata if there is any. The plus called for
|
||||
// by the semantic versioning spec is automatically appended and should
|
||||
// not be contained in the build metadata string. The build metadata
|
||||
// string is not appended if it contains invalid characters.
|
||||
build := normalizeVerString(appBuild)
|
||||
if build != "" {
|
||||
version = fmt.Sprintf("%s+%s", version, build)
|
||||
}
|
||||
|
||||
return version
|
||||
}
|
||||
|
||||
// normalizeVerString returns the passed string stripped of all characters which
|
||||
// are not valid according to the semantic versioning guidelines for pre-release
|
||||
// version and build metadata strings. In particular they MUST only contain
|
||||
// characters in semanticAlphabet.
|
||||
func normalizeVerString(str string) string {
|
||||
var result bytes.Buffer
|
||||
for _, r := range str {
|
||||
if strings.ContainsRune(semanticAlphabet, r) {
|
||||
// Ignoring the error here since it can only fail if
|
||||
// the the system is out of memory and there are much
|
||||
// bigger issues at that point.
|
||||
_, _ = result.WriteRune(r)
|
||||
}
|
||||
}
|
||||
return result.String()
|
||||
}
|
49
config.go
49
config.go
|
@ -7,7 +7,6 @@ package main
|
|||
import (
|
||||
"bufio"
|
||||
"crypto/rand"
|
||||
_ "embed"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
|
@ -32,7 +31,6 @@ import (
|
|||
_ "github.com/lbryio/lbcd/database/ffldb"
|
||||
"github.com/lbryio/lbcd/mempool"
|
||||
"github.com/lbryio/lbcd/peer"
|
||||
"github.com/lbryio/lbcd/version"
|
||||
"github.com/lbryio/lbcd/wire"
|
||||
btcutil "github.com/lbryio/lbcutil"
|
||||
)
|
||||
|
@ -66,9 +64,8 @@ const (
|
|||
defaultMaxOrphanTxSize = 100000
|
||||
defaultSigCacheMaxSize = 100000
|
||||
sampleConfigFilename = "sample-lbcd.conf"
|
||||
defaultTxIndex = true
|
||||
defaultTxIndex = false
|
||||
defaultAddrIndex = false
|
||||
defaultUpnp = true
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -81,9 +78,6 @@ var (
|
|||
defaultLogDir = filepath.Join(defaultHomeDir, defaultLogDirname)
|
||||
)
|
||||
|
||||
//go:embed sample-lbcd.conf
|
||||
var sampleConfig string
|
||||
|
||||
// runServiceCommand is only set to a real function on Windows. It is used
|
||||
// to parse and execute service commands specified via the -s flag.
|
||||
var runServiceCommand func(string) error
|
||||
|
@ -105,7 +99,7 @@ type config struct {
|
|||
AddPeers []string `short:"a" long:"addpeer" description:"Add a peer to connect with at startup"`
|
||||
AddrIndex bool `long:"addrindex" description:"Maintain a full address-based transaction index which makes the searchrawtransactions RPC available"`
|
||||
AgentBlacklist []string `long:"agentblacklist" description:"A comma separated list of user-agent substrings which will cause lbcd to reject any peers whose user-agent contains any of the blacklisted substrings."`
|
||||
AgentWhitelist []string `long:"agentwhitelist" description:"A comma separated list of user-agent substrings which will cause lbcd to require all peers' user-agents to contain one of the whitelisted substrings. The blacklist is applied before the whitelist, and an empty whitelist will allow all agents that do not fail the blacklist."`
|
||||
AgentWhitelist []string `long:"agentwhitelist" description:"A comma separated list of user-agent substrings which will cause lbcd to require all peers' user-agents to contain one of the whitelisted substrings. The blacklist is applied before the blacklist, and an empty whitelist will allow all agents that do not fail the blacklist."`
|
||||
BanDuration time.Duration `long:"banduration" description:"How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second"`
|
||||
BanThreshold uint32 `long:"banthreshold" description:"Maximum allowed ban score before disconnecting and banning misbehaving peers."`
|
||||
BlockMaxSize uint32 `long:"blockmaxsize" description:"Maximum block size in bytes to be used when creating a block"`
|
||||
|
@ -117,7 +111,6 @@ type config struct {
|
|||
ConfigFile string `short:"C" long:"configfile" description:"Path to configuration file"`
|
||||
ConnectPeers []string `long:"connect" description:"Connect only to the specified peers at startup"`
|
||||
CPUProfile string `long:"cpuprofile" description:"Write CPU profile to the specified file"`
|
||||
MemProfile string `long:"memprofile" description:"Write memory profile to the specified file"`
|
||||
DataDir string `short:"b" long:"datadir" description:"Directory to store data"`
|
||||
DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"`
|
||||
DebugLevel string `short:"d" long:"debuglevel" description:"Logging level for all subsystems {trace, debug, info, warn, error, critical} -- You may also specify <subsystem>=<level>,<subsystem2>=<level>,... to set the log level for individual subsystems -- Use show to list available subsystems"`
|
||||
|
@ -177,7 +170,7 @@ type config struct {
|
|||
TrickleInterval time.Duration `long:"trickleinterval" description:"Minimum time between attempts to send new inventory to a connected peer"`
|
||||
TxIndex bool `long:"txindex" description:"Maintain a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"`
|
||||
UserAgentComments []string `long:"uacomment" description:"Comment to add to the user agent -- See BIP 14 for more information."`
|
||||
Upnp bool `long:"upnp" description:"Use UPnP to map our listening port outside of NAT"`
|
||||
NoUpnp bool `long:"noupnp" description:"Don't use UPnP to map our listening port outside of NAT"`
|
||||
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
|
||||
Whitelists []string `long:"whitelist" description:"Add an IP network or IP that will not be banned. (eg. 192.168.1.0/24 or ::1)"`
|
||||
lookup func(string) ([]net.IP, error)
|
||||
|
@ -409,10 +402,10 @@ func newConfigParser(cfg *config, so *serviceOptions, options flags.Options) *fl
|
|||
// line options.
|
||||
//
|
||||
// The configuration proceeds as follows:
|
||||
// 1. Start with a default config with sane settings
|
||||
// 2. Pre-parse the command line to check for an alternative config file
|
||||
// 3. Load configuration file overwriting defaults with any specified options
|
||||
// 4. Parse CLI options and overwrite/add any specified options
|
||||
// 1) Start with a default config with sane settings
|
||||
// 2) Pre-parse the command line to check for an alternative config file
|
||||
// 3) Load configuration file overwriting defaults with any specified options
|
||||
// 4) Parse CLI options and overwrite/add any specified options
|
||||
//
|
||||
// The above results in lbcd functioning properly without any config settings
|
||||
// while still allowing the user to override settings with config files and
|
||||
|
@ -446,7 +439,6 @@ func loadConfig() (*config, []string, error) {
|
|||
Generate: defaultGenerate,
|
||||
TxIndex: defaultTxIndex,
|
||||
AddrIndex: defaultAddrIndex,
|
||||
Upnp: defaultUpnp,
|
||||
}
|
||||
|
||||
// Service options which are only added on Windows.
|
||||
|
@ -471,7 +463,7 @@ func loadConfig() (*config, []string, error) {
|
|||
appName = strings.TrimSuffix(appName, filepath.Ext(appName))
|
||||
usageMessage := fmt.Sprintf("Use %s -h to show usage", appName)
|
||||
if preCfg.ShowVersion {
|
||||
fmt.Println(appName, "version", version.Full())
|
||||
fmt.Println(appName, "version", version())
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
|
@ -977,8 +969,13 @@ func loadConfig() (*config, []string, error) {
|
|||
// Only allow TLS to be disabled if the RPC is bound to localhost
|
||||
// addresses.
|
||||
if !cfg.DisableRPC && cfg.DisableTLS {
|
||||
allowedTLSListeners := map[string]struct{}{
|
||||
"localhost": {},
|
||||
"127.0.0.1": {},
|
||||
"::1": {},
|
||||
}
|
||||
for _, addr := range cfg.RPCListeners {
|
||||
_, _, err := net.SplitHostPort(addr)
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
str := "%s: RPC listen interface '%s' is " +
|
||||
"invalid: %v"
|
||||
|
@ -987,6 +984,15 @@ func loadConfig() (*config, []string, error) {
|
|||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
if _, ok := allowedTLSListeners[host]; !ok {
|
||||
str := "%s: the --notls option may not be used " +
|
||||
"when binding RPC to non localhost " +
|
||||
"addresses: %s"
|
||||
err := fmt.Errorf(str, funcName, addr)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1170,15 +1176,11 @@ func createDefaultConfigFile(destinationPath string) error {
|
|||
}
|
||||
generatedRPCPass := base64.StdEncoding.EncodeToString(randomBytes)
|
||||
|
||||
var reader *bufio.Reader
|
||||
src, err := os.Open(sampleConfigPath)
|
||||
if err != nil {
|
||||
// Fall back to sample config embedded at build time.
|
||||
reader = bufio.NewReader(strings.NewReader(sampleConfig))
|
||||
} else {
|
||||
reader = bufio.NewReader(src)
|
||||
defer src.Close()
|
||||
return err
|
||||
}
|
||||
defer src.Close()
|
||||
|
||||
dest, err := os.OpenFile(destinationPath,
|
||||
os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
|
@ -1189,6 +1191,7 @@ func createDefaultConfigFile(destinationPath string) error {
|
|||
|
||||
// We copy every line from the sample config file to the destination,
|
||||
// only replacing the two lines for rpcuser and rpcpass
|
||||
reader := bufio.NewReader(src)
|
||||
for err != io.EOF {
|
||||
var line string
|
||||
line, err = reader.ReadString('\n')
|
||||
|
|
103
config_test.go
103
config_test.go
|
@ -1,11 +1,9 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
@ -16,107 +14,6 @@ var (
|
|||
rpcpassRegexp = regexp.MustCompile("(?m)^rpcpass=.+$")
|
||||
)
|
||||
|
||||
// Define a struct "configCmdLineOnly" containing a subset of configuration
|
||||
// parameters which are command-line only. These fields are copied line-by-line
|
||||
// from "config" struct in "config.go", and the field names, types, and tags must
|
||||
// match for the test to work.
|
||||
type configCmdLineOnly struct {
|
||||
ConfigFile string `short:"C" long:"configfile" description:"Path to configuration file"`
|
||||
DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"`
|
||||
DropCfIndex bool `long:"dropcfindex" description:"Deletes the index used for committed filtering (CF) support from the database on start up and then exits."`
|
||||
DropTxIndex bool `long:"droptxindex" description:"Deletes the hash-based transaction index from the database on start up and then exits."`
|
||||
DisableCheckpoints bool `long:"nocheckpoints" description:"Disable built-in checkpoints. Don't do this unless you know what you're doing."`
|
||||
NoWinService bool `long:"nowinservice" description:"Do not start as a background service on Windows -- NOTE: This flag only works on the command line, not in the config file"`
|
||||
DisableStallHandler bool `long:"nostalldetect" description:"Disables the stall handler system for each peer, useful in simnet/regtest integration tests frameworks"`
|
||||
RegressionTest bool `long:"regtest" description:"Use the regression test network"`
|
||||
SimNet bool `long:"simnet" description:"Use the simulation test network"`
|
||||
SigNet bool `long:"signet" description:"Use the signet test network"`
|
||||
SigNetChallenge string `long:"signetchallenge" description:"Connect to a custom signet network defined by this challenge instead of using the global default signet test network -- Can be specified multiple times"`
|
||||
SigNetSeedNode []string `long:"signetseednode" description:"Specify a seed node for the signet network instead of using the global default signet network seed nodes"`
|
||||
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
|
||||
}
|
||||
|
||||
func fieldEq(f1, f2 reflect.StructField) bool {
|
||||
return (f1.Name == f2.Name) && (f1.Type == f2.Type) && (f1.Tag == f2.Tag)
|
||||
}
|
||||
|
||||
func TestSampleConfigFileComplete(t *testing.T) {
|
||||
// find out where the sample config lives
|
||||
_, path, _, ok := runtime.Caller(0)
|
||||
if !ok {
|
||||
t.Fatalf("Failed finding config file path")
|
||||
}
|
||||
sampleConfigFile := filepath.Join(filepath.Dir(path), sampleConfigFilename)
|
||||
|
||||
// Read the sample config file
|
||||
content, err := ioutil.ReadFile(sampleConfigFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed reading sample config file: %v", err)
|
||||
}
|
||||
|
||||
allFields := reflect.VisibleFields(reflect.TypeOf(config{}))
|
||||
cmdlineFields := reflect.VisibleFields(reflect.TypeOf(configCmdLineOnly{}))
|
||||
|
||||
// Verify cmdlineFields is a subset of allFields.
|
||||
for _, cf := range cmdlineFields {
|
||||
// Check for presence of field "cf" in config struct.
|
||||
var field *reflect.StructField
|
||||
for _, f := range allFields {
|
||||
f := f // new instance of loop var for return
|
||||
if fieldEq(cf, f) {
|
||||
field = &f
|
||||
break
|
||||
}
|
||||
}
|
||||
if field == nil {
|
||||
t.Errorf("cmdline field: %s type: %s is not present in type %s",
|
||||
cf.Name, cf.Type, reflect.TypeOf(config{}))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify sample config covers all parameters.
|
||||
for _, f := range allFields {
|
||||
longname, ok := f.Tag.Lookup("long")
|
||||
if !ok {
|
||||
// Field has no long-form name, so not eligible for
|
||||
// inclusion in sample config.
|
||||
continue
|
||||
}
|
||||
|
||||
// Check for presence of field "f" in our configCmdLineOnly struct.
|
||||
var cmdline *reflect.StructField
|
||||
for _, cf := range cmdlineFields {
|
||||
cf := cf // new instance of loop var for return
|
||||
if fieldEq(cf, f) {
|
||||
cmdline = &cf
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Look for assignment (<longname>="), or commented assignment ("; <longname>=").
|
||||
pattern := fmt.Sprintf("(?m)^(;\\s*)?%s=.*$", longname)
|
||||
assignment, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
t.Errorf("config field: %s longname: %s failed compiling regexp (%s): %v",
|
||||
f.Name, longname, pattern, err)
|
||||
continue
|
||||
}
|
||||
|
||||
assigned := assignment.Match(content)
|
||||
|
||||
// Field "f" must be present in either the sample config (<longname>=X),
|
||||
// or it must be one of the command line only fields, but not both.
|
||||
if !assigned && (cmdline == nil) {
|
||||
t.Errorf("config field: %s longname: %s assignment (%s) should be present in %s",
|
||||
f.Name, longname, assignment, sampleConfigFilename)
|
||||
}
|
||||
if assigned && (cmdline != nil) {
|
||||
t.Errorf("config field: %s longname: %s should not be present in both %s and type %s",
|
||||
f.Name, longname, sampleConfigFilename, reflect.TypeOf(configCmdLineOnly{}).Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateDefaultConfigFile(t *testing.T) {
|
||||
// find out where the sample config lives
|
||||
_, path, _, ok := runtime.Caller(0)
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
/*
|
||||
Package connmgr implements a generic Bitcoin network connection manager.
|
||||
|
||||
# Connection Manager Overview
|
||||
Connection Manager Overview
|
||||
|
||||
Connection Manager handles all the general connection concerns such as
|
||||
maintaining a set number of outbound connections, sourcing peers, banning,
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
#! /bin/bash
|
||||
|
||||
read -r -d '' help << EOM
|
||||
$0 - helper script for displaying miner of a mined block.
|
||||
|
||||
Options:
|
||||
|
||||
-h Display this message.
|
||||
|
||||
--height Specify blockheight.
|
||||
--hash Specify blockhash.
|
||||
EOM
|
||||
|
||||
while getopts ":h-:" optchar; do
|
||||
case "${optchar}" in
|
||||
-)
|
||||
case "${OPTARG}" in
|
||||
hash)
|
||||
blockhash="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 ))
|
||||
;;
|
||||
height)
|
||||
blockheight="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 ))
|
||||
blockhash=$(lbcctl getblockhash ${blockheight})
|
||||
;;
|
||||
*) echo "Unknown long option --${OPTARG}" >&2; exit -2 ;;
|
||||
esac
|
||||
;;
|
||||
h) printf "${help}\n\n"; exit 2;;
|
||||
*) echo "Unknown option -${OPTARG}" >&2; exit -2;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
block=$(lbcctl getblock $blockhash)
|
||||
blockheight=$(lbcctl getblock $blockhash | jq -r .height)
|
||||
|
||||
coinbase_txid=$(echo ${block} | jq -r '.tx[0]')
|
||||
coinbase_raw=$(lbcctl getrawtransaction ${coinbase_txid} 1)
|
||||
coinbase=$(echo ${coinbase_raw} | jq '.vin[0].coinbase')
|
||||
miner=$(echo ${coinbase} | grep -o '2f.*2f' | xxd -r -p | strings)
|
||||
|
||||
echo ${blockheight}: ${blockhash}: ${miner}
|
|
@ -1,63 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
read -r -d '' help << EOM
|
||||
snapshot.sh - helper script for generating snapshot from lbcd's app dir.
|
||||
|
||||
The default output name "lbcd_snapshot_<height>_<lbcd_ver>_<date>.tar.zst"
|
||||
|
||||
To extract the snapshot (data/mainter/):
|
||||
|
||||
zstd -d lbcd_snapshot_<height>_<lbcd_ver>_<date>.tar.zst | tar xf - -C <appdir>
|
||||
|
||||
Default appdir of lbcd on different OSes:
|
||||
|
||||
Darwin) "\${HOME}/Library/Application Support/Lbcd"
|
||||
Linux) "\${HOME}/.lbcd"
|
||||
Windows) "%%LOCALAPPDATA%%/lbcd"
|
||||
|
||||
Options:
|
||||
|
||||
-h Display this message.
|
||||
-d Specify APPDIR to copy the snapshot from.
|
||||
|
||||
-o Specify the output filename of snapshot.
|
||||
-b Specify the best block height of the snapshot. (ignored if -o is specified)
|
||||
-l Specify git tag of the running lbcd. (ignored if -o is specified)
|
||||
-t Specify the date when the snapshot is generated. (ignored if -o is specified)
|
||||
EOM
|
||||
|
||||
while getopts o:d:b:l:t:h flag
|
||||
do
|
||||
case "${flag}" in
|
||||
h) printf "${help}\n\n"; exit 0;;
|
||||
d) appdir=${OPTARG};;
|
||||
|
||||
o) snapshot=${OPTARG};;
|
||||
b) height=${OPTARG};;
|
||||
l) lbcd_ver=${OPTARG};;
|
||||
t) date=${OPTARG};;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$appdir" ]; then
|
||||
case $(uname) in
|
||||
Darwin) appdir="${HOME}/Library/Application Support/Lbcd" ;;
|
||||
Linux) appdir="${HOME}/.lbcd" ;;
|
||||
Windows) appdir="%LOCALAPPDATA%/lbcd" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
|
||||
if [ -z ${snapshot} ]; then
|
||||
git_repo=$(git rev-parse --show-toplevel)
|
||||
[ -z "${height}" ] && height=$(go run ${git_repo}/claimtrie/cmd block best --showhash=false)
|
||||
[ -z "${lbcd_ver}" ] && lbcd_ver=$(git describe --tags)
|
||||
[ -z "${date}" ] && date=$(date +"%Y-%m-%d")
|
||||
snapshot="lbcd_snapshot_${height}_${lbcd_ver}_${date}.tar.zst"
|
||||
fi
|
||||
|
||||
|
||||
echo "Generating $snapshot ..."
|
||||
|
||||
tar c -C "${appdir}" data/mainnet | zstd -9 --no-progress -o "${snapshot}"
|
||||
|
|
@ -5,7 +5,7 @@
|
|||
/*
|
||||
Package database provides a block and metadata storage database.
|
||||
|
||||
# Overview
|
||||
Overview
|
||||
|
||||
As of Feb 2016, there are over 400,000 blocks in the Bitcoin block chain and
|
||||
and over 112 million transactions (which turns out to be over 60GB of data).
|
||||
|
@ -18,15 +18,15 @@ storage, and strict checksums in key areas to ensure data integrity.
|
|||
|
||||
A quick overview of the features database provides are as follows:
|
||||
|
||||
- Key/value metadata store
|
||||
- Bitcoin block storage
|
||||
- Efficient retrieval of block headers and regions (transactions, scripts, etc)
|
||||
- Read-only and read-write transactions with both manual and managed modes
|
||||
- Nested buckets
|
||||
- Supports registration of backend databases
|
||||
- Comprehensive test coverage
|
||||
- Key/value metadata store
|
||||
- Bitcoin block storage
|
||||
- Efficient retrieval of block headers and regions (transactions, scripts, etc)
|
||||
- Read-only and read-write transactions with both manual and managed modes
|
||||
- Nested buckets
|
||||
- Supports registration of backend databases
|
||||
- Comprehensive test coverage
|
||||
|
||||
# Database
|
||||
Database
|
||||
|
||||
The main entry point is the DB interface. It exposes functionality for
|
||||
transactional-based access and storage of metadata and block data. It is
|
||||
|
@ -43,14 +43,14 @@ The Begin function provides an unmanaged transaction while the View and Update
|
|||
functions provide a managed transaction. These are described in more detail
|
||||
below.
|
||||
|
||||
# Transactions
|
||||
Transactions
|
||||
|
||||
The Tx interface provides facilities for rolling back or committing changes that
|
||||
took place while the transaction was active. It also provides the root metadata
|
||||
bucket under which all keys, values, and nested buckets are stored. A
|
||||
transaction can either be read-only or read-write and managed or unmanaged.
|
||||
|
||||
# Managed versus Unmanaged Transactions
|
||||
Managed versus Unmanaged Transactions
|
||||
|
||||
A managed transaction is one where the caller provides a function to execute
|
||||
within the context of the transaction and the commit or rollback is handled
|
||||
|
@ -63,7 +63,7 @@ call Commit or Rollback when they are finished with it. Leaving transactions
|
|||
open for long periods of time can have several adverse effects, so it is
|
||||
recommended that managed transactions are used instead.
|
||||
|
||||
# Buckets
|
||||
Buckets
|
||||
|
||||
The Bucket interface provides the ability to manipulate key/value pairs and
|
||||
nested buckets as well as iterate through them.
|
||||
|
@ -73,7 +73,7 @@ CreateBucket, CreateBucketIfNotExists, and DeleteBucket functions work with
|
|||
buckets. The ForEach function allows the caller to provide a function to be
|
||||
called with each key/value pair and nested bucket in the current bucket.
|
||||
|
||||
# Metadata Bucket
|
||||
Metadata Bucket
|
||||
|
||||
As discussed above, all of the functions which are used to manipulate key/value
|
||||
pairs and nested buckets exist on the Bucket interface. The root metadata
|
||||
|
@ -81,7 +81,7 @@ bucket is the upper-most bucket in which data is stored and is created at the
|
|||
same time as the database. Use the Metadata function on the Tx interface
|
||||
to retrieve it.
|
||||
|
||||
# Nested Buckets
|
||||
Nested Buckets
|
||||
|
||||
The CreateBucket and CreateBucketIfNotExists functions on the Bucket interface
|
||||
provide the ability to create an arbitrary number of nested buckets. It is
|
||||
|
|
|
@ -622,8 +622,8 @@ func (s *blockStore) syncBlocks() error {
|
|||
// were partially written.
|
||||
//
|
||||
// There are effectively two scenarios to consider here:
|
||||
// 1. Transient write failures from which recovery is possible
|
||||
// 2. More permanent failures such as hard disk death and/or removal
|
||||
// 1) Transient write failures from which recovery is possible
|
||||
// 2) More permanent failures such as hard disk death and/or removal
|
||||
//
|
||||
// In either case, the write cursor will be repositioned to the old block file
|
||||
// offset regardless of any other errors that occur while attempting to undo
|
||||
|
|
|
@ -10,7 +10,7 @@ This driver is the recommended driver for use with lbcd. It makes use leveldb
|
|||
for the metadata, flat files for block storage, and checksums in key areas to
|
||||
ensure data integrity.
|
||||
|
||||
# Usage
|
||||
Usage
|
||||
|
||||
This package is a driver to the database package and provides the database type
|
||||
of "ffldb". The parameters the Open and Create functions take are the
|
||||
|
|
|
@ -318,14 +318,13 @@ func (iter *Iterator) ForceReseek() {
|
|||
// unexpected keys and/or values.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// iter := t.Iterator(nil, nil)
|
||||
// for iter.Next() {
|
||||
// if someCondition {
|
||||
// t.Delete(iter.Key())
|
||||
// iter.ForceReseek()
|
||||
// }
|
||||
// }
|
||||
// iter := t.Iterator(nil, nil)
|
||||
// for iter.Next() {
|
||||
// if someCondition {
|
||||
// t.Delete(iter.Key())
|
||||
// iter.ForceReseek()
|
||||
// }
|
||||
// }
|
||||
func (t *Mutable) Iterator(startKey, limitKey []byte) *Iterator {
|
||||
iter := &Iterator{
|
||||
t: t,
|
||||
|
|
267
doc.go
267
doc.go
|
@ -18,144 +18,143 @@ on Windows. The -C (--configfile) flag, as shown below, can be used to override
|
|||
this location.
|
||||
|
||||
Usage:
|
||||
|
||||
lbcd [OPTIONS]
|
||||
lbcd [OPTIONS]
|
||||
|
||||
Application Options:
|
||||
|
||||
--addcheckpoint= Add a custom checkpoint. Format:
|
||||
'<height>:<hash>'
|
||||
-a, --addpeer= Add a peer to connect with at startup
|
||||
--addrindex Maintain a full address-based transaction index
|
||||
which makes the searchrawtransactions RPC
|
||||
available
|
||||
--banduration= How long to ban misbehaving peers. Valid time
|
||||
units are {s, m, h}. Minimum 1 second (default:
|
||||
24h0m0s)
|
||||
--banthreshold= Maximum allowed ban score before disconnecting
|
||||
and banning misbehaving peers. (default: 100)
|
||||
--blockmaxsize= Maximum block size in bytes to be used when
|
||||
creating a block (default: 750000)
|
||||
--blockminsize= Mininum block size in bytes to be used when
|
||||
creating a block
|
||||
--blockmaxweight= Maximum block weight to be used when creating a
|
||||
block (default: 3000000)
|
||||
--blockminweight= Mininum block weight to be used when creating a
|
||||
block
|
||||
--blockprioritysize= Size in bytes for high-priority/low-fee
|
||||
transactions when creating a block (default:
|
||||
50000)
|
||||
--blocksonly Do not accept transactions from remote peers.
|
||||
-C, --configfile= Path to configuration file
|
||||
--connect= Connect only to the specified peers at startup
|
||||
--cpuprofile= Write CPU profile to the specified file
|
||||
-b, --datadir= Directory to store data
|
||||
--dbtype= Database backend to use for the Block Chain
|
||||
(default: ffldb)
|
||||
-d, --debuglevel= Logging level for all subsystems {trace, debug,
|
||||
info, warn, error, critical} -- You may also
|
||||
specify
|
||||
<subsystem>=<level>,<subsystem2>=<level>,... to
|
||||
set the log level for individual subsystems --
|
||||
Use show to list available subsystems (default:
|
||||
info)
|
||||
--dropaddrindex Deletes the address-based transaction index from
|
||||
the database on start up and then exits.
|
||||
--dropcfindex Deletes the index used for committed filtering
|
||||
(CF) support from the database on start up and
|
||||
then exits.
|
||||
--droptxindex Deletes the hash-based transaction index from the
|
||||
database on start up and then exits.
|
||||
--externalip= Add an ip to the list of local addresses we claim
|
||||
to listen on to peers
|
||||
--generate Generate (mine) bitcoins using the CPU
|
||||
--limitfreerelay= Limit relay of transactions with no transaction
|
||||
fee to the given amount in thousands of bytes per
|
||||
minute (default: 15)
|
||||
--listen= Add an interface/port to listen for connections
|
||||
(default all interfaces port: 9246, testnet:
|
||||
19246, regtest: 29246, signet: 39246)
|
||||
--logdir= Directory to log output
|
||||
--maxorphantx= Max number of orphan transactions to keep in
|
||||
memory (default: 100)
|
||||
--maxpeers= Max number of inbound and outbound peers
|
||||
(default: 125)
|
||||
--memprofile= Write memory profile to the specified file
|
||||
--miningaddr= Add the specified payment address to the list of
|
||||
addresses to use for generated blocks -- At least
|
||||
one address is required if the generate option is
|
||||
set
|
||||
--minrelaytxfee= The minimum transaction fee in BTC/kB to be
|
||||
considered a non-zero fee. (default: 1e-05)
|
||||
--nobanning Disable banning of misbehaving peers
|
||||
--nocfilters Disable committed filtering (CF) support
|
||||
--nocheckpoints Disable built-in checkpoints. Don't do this
|
||||
unless you know what you're doing.
|
||||
--nodnsseed Disable DNS seeding for peers
|
||||
--nolisten Disable listening for incoming connections --
|
||||
NOTE: Listening is automatically disabled if the
|
||||
--connect or --proxy options are used without
|
||||
also specifying listen interfaces via --listen
|
||||
--noonion Disable connecting to tor hidden services
|
||||
--nopeerbloomfilters Disable bloom filtering support
|
||||
--norelaypriority Do not require free or low-fee transactions to
|
||||
have high priority for relaying
|
||||
--norpc Disable built-in RPC server -- NOTE: The RPC
|
||||
server is disabled by default if no
|
||||
rpcuser/rpcpass or rpclimituser/rpclimitpass is
|
||||
specified
|
||||
--notls Disable TLS for the RPC server
|
||||
--onion= Connect to tor hidden services via SOCKS5 proxy
|
||||
(eg. 127.0.0.1:9050)
|
||||
--onionpass= Password for onion proxy server
|
||||
--onionuser= Username for onion proxy server
|
||||
--profile= Enable HTTP profiling on given port -- NOTE port
|
||||
must be between 1024 and 65536
|
||||
--proxy= Connect via SOCKS5 proxy (eg. 127.0.0.1:9050)
|
||||
--proxypass= Password for proxy server
|
||||
--proxyuser= Username for proxy server
|
||||
--regtest Use the regression test network
|
||||
--rejectnonstd Reject non-standard transactions regardless of
|
||||
the default settings for the active network.
|
||||
--relaynonstd Relay non-standard transactions regardless of the
|
||||
default settings for the active network.
|
||||
--rpccert= File containing the certificate file
|
||||
--rpckey= File containing the certificate key
|
||||
--rpclimitpass= Password for limited RPC connections
|
||||
--rpclimituser= Username for limited RPC connections
|
||||
--rpclisten= Add an interface/port to listen for RPC
|
||||
connections (default port: 9245, testnet: 19245, regtest: 29245)
|
||||
--rpcmaxclients= Max number of RPC clients for standard
|
||||
connections (default: 10)
|
||||
--rpcmaxconcurrentreqs= Max number of concurrent RPC requests that may be
|
||||
processed concurrently (default: 20)
|
||||
--rpcmaxwebsockets= Max number of RPC websocket connections (default:
|
||||
25)
|
||||
--rpcquirks Mirror some JSON-RPC quirks of Bitcoin Core --
|
||||
NOTE: Discouraged unless interoperability issues
|
||||
need to be worked around
|
||||
-P, --rpcpass= Password for RPC connections
|
||||
-u, --rpcuser= Username for RPC connections
|
||||
--sigcachemaxsize= The maximum number of entries in the signature
|
||||
verification cache (default: 100000)
|
||||
--simnet Use the simulation test network
|
||||
--testnet Use the test network
|
||||
--torisolation Enable Tor stream isolation by randomizing user
|
||||
credentials for each connection.
|
||||
--trickleinterval= Minimum time between attempts to send new
|
||||
inventory to a connected peer (default: 10s)
|
||||
--txindex Maintain a full hash-based transaction index
|
||||
which makes all transactions available via the
|
||||
getrawtransaction RPC
|
||||
--uacomment= Comment to add to the user agent -- See BIP 14
|
||||
for more information.
|
||||
--upnp Use UPnP to map our listening port outside of NAT
|
||||
-V, --version Display version information and exit
|
||||
--whitelist= Add an IP network or IP that will not be banned.
|
||||
(eg. 192.168.1.0/24 or ::1)
|
||||
--addcheckpoint= Add a custom checkpoint. Format:
|
||||
'<height>:<hash>'
|
||||
-a, --addpeer= Add a peer to connect with at startup
|
||||
--addrindex Maintain a full address-based transaction index
|
||||
which makes the searchrawtransactions RPC
|
||||
available
|
||||
--banduration= How long to ban misbehaving peers. Valid time
|
||||
units are {s, m, h}. Minimum 1 second (default:
|
||||
24h0m0s)
|
||||
--banthreshold= Maximum allowed ban score before disconnecting
|
||||
and banning misbehaving peers. (default: 100)
|
||||
--blockmaxsize= Maximum block size in bytes to be used when
|
||||
creating a block (default: 750000)
|
||||
--blockminsize= Mininum block size in bytes to be used when
|
||||
creating a block
|
||||
--blockmaxweight= Maximum block weight to be used when creating a
|
||||
block (default: 3000000)
|
||||
--blockminweight= Mininum block weight to be used when creating a
|
||||
block
|
||||
--blockprioritysize= Size in bytes for high-priority/low-fee
|
||||
transactions when creating a block (default:
|
||||
50000)
|
||||
--blocksonly Do not accept transactions from remote peers.
|
||||
-C, --configfile= Path to configuration file
|
||||
--connect= Connect only to the specified peers at startup
|
||||
--cpuprofile= Write CPU profile to the specified file
|
||||
-b, --datadir= Directory to store data
|
||||
--dbtype= Database backend to use for the Block Chain
|
||||
(default: ffldb)
|
||||
-d, --debuglevel= Logging level for all subsystems {trace, debug,
|
||||
info, warn, error, critical} -- You may also
|
||||
specify
|
||||
<subsystem>=<level>,<subsystem2>=<level>,... to
|
||||
set the log level for individual subsystems --
|
||||
Use show to list available subsystems (default:
|
||||
info)
|
||||
--dropaddrindex Deletes the address-based transaction index from
|
||||
the database on start up and then exits.
|
||||
--dropcfindex Deletes the index used for committed filtering
|
||||
(CF) support from the database on start up and
|
||||
then exits.
|
||||
--droptxindex Deletes the hash-based transaction index from the
|
||||
database on start up and then exits.
|
||||
--externalip= Add an ip to the list of local addresses we claim
|
||||
to listen on to peers
|
||||
--generate Generate (mine) bitcoins using the CPU
|
||||
--limitfreerelay= Limit relay of transactions with no transaction
|
||||
fee to the given amount in thousands of bytes per
|
||||
minute (default: 15)
|
||||
--listen= Add an interface/port to listen for connections
|
||||
(default all interfaces port: 9246, testnet:
|
||||
19246, regtest: 29246, signet: 39246)
|
||||
--logdir= Directory to log output
|
||||
--maxorphantx= Max number of orphan transactions to keep in
|
||||
memory (default: 100)
|
||||
--maxpeers= Max number of inbound and outbound peers
|
||||
(default: 125)
|
||||
--miningaddr= Add the specified payment address to the list of
|
||||
addresses to use for generated blocks -- At least
|
||||
one address is required if the generate option is
|
||||
set
|
||||
--minrelaytxfee= The minimum transaction fee in BTC/kB to be
|
||||
considered a non-zero fee. (default: 1e-05)
|
||||
--nobanning Disable banning of misbehaving peers
|
||||
--nocfilters Disable committed filtering (CF) support
|
||||
--nocheckpoints Disable built-in checkpoints. Don't do this
|
||||
unless you know what you're doing.
|
||||
--nodnsseed Disable DNS seeding for peers
|
||||
--nolisten Disable listening for incoming connections --
|
||||
NOTE: Listening is automatically disabled if the
|
||||
--connect or --proxy options are used without
|
||||
also specifying listen interfaces via --listen
|
||||
--noonion Disable connecting to tor hidden services
|
||||
--nopeerbloomfilters Disable bloom filtering support
|
||||
--norelaypriority Do not require free or low-fee transactions to
|
||||
have high priority for relaying
|
||||
--norpc Disable built-in RPC server -- NOTE: The RPC
|
||||
server is disabled by default if no
|
||||
rpcuser/rpcpass or rpclimituser/rpclimitpass is
|
||||
specified
|
||||
--notls Disable TLS for the RPC server -- NOTE: This is
|
||||
only allowed if the RPC server is bound to
|
||||
localhost
|
||||
--onion= Connect to tor hidden services via SOCKS5 proxy
|
||||
(eg. 127.0.0.1:9050)
|
||||
--onionpass= Password for onion proxy server
|
||||
--onionuser= Username for onion proxy server
|
||||
--profile= Enable HTTP profiling on given port -- NOTE port
|
||||
must be between 1024 and 65536
|
||||
--proxy= Connect via SOCKS5 proxy (eg. 127.0.0.1:9050)
|
||||
--proxypass= Password for proxy server
|
||||
--proxyuser= Username for proxy server
|
||||
--regtest Use the regression test network
|
||||
--rejectnonstd Reject non-standard transactions regardless of
|
||||
the default settings for the active network.
|
||||
--relaynonstd Relay non-standard transactions regardless of the
|
||||
default settings for the active network.
|
||||
--rpccert= File containing the certificate file
|
||||
--rpckey= File containing the certificate key
|
||||
--rpclimitpass= Password for limited RPC connections
|
||||
--rpclimituser= Username for limited RPC connections
|
||||
--rpclisten= Add an interface/port to listen for RPC
|
||||
connections (default port: 9245, testnet: 19245, regtest: 29245)
|
||||
--rpcmaxclients= Max number of RPC clients for standard
|
||||
connections (default: 10)
|
||||
--rpcmaxconcurrentreqs= Max number of concurrent RPC requests that may be
|
||||
processed concurrently (default: 20)
|
||||
--rpcmaxwebsockets= Max number of RPC websocket connections (default:
|
||||
25)
|
||||
--rpcquirks Mirror some JSON-RPC quirks of Bitcoin Core --
|
||||
NOTE: Discouraged unless interoperability issues
|
||||
need to be worked around
|
||||
-P, --rpcpass= Password for RPC connections
|
||||
-u, --rpcuser= Username for RPC connections
|
||||
--sigcachemaxsize= The maximum number of entries in the signature
|
||||
verification cache (default: 100000)
|
||||
--simnet Use the simulation test network
|
||||
--testnet Use the test network
|
||||
--torisolation Enable Tor stream isolation by randomizing user
|
||||
credentials for each connection.
|
||||
--trickleinterval= Minimum time between attempts to send new
|
||||
inventory to a connected peer (default: 10s)
|
||||
--txindex Maintain a full hash-based transaction index
|
||||
which makes all transactions available via the
|
||||
getrawtransaction RPC
|
||||
--uacomment= Comment to add to the user agent -- See BIP 14
|
||||
for more information.
|
||||
--upnp Use UPnP to map our listening port outside of NAT
|
||||
-V, --version Display version information and exit
|
||||
--whitelist= Add an IP network or IP that will not be banned.
|
||||
(eg. 192.168.1.0/24 or ::1)
|
||||
|
||||
Help Options:
|
||||
-h, --help Show this help message
|
||||
|
||||
-h, --help Show this help message
|
||||
*/
|
||||
package main
|
||||
|
|
|
@ -436,7 +436,7 @@ the method name for further details such as parameter and return information.
|
|||
| | |
|
||||
| -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Method | getrawtransaction |
|
||||
| Parameters | 1. transaction hash (string, required) - the hash of the transaction<br />2. verbose (bool, optional, default=false) - specifies the transaction is returned as a JSON object instead of hex-encoded string |
|
||||
| Parameters | 1. transaction hash (string, required) - the hash of the transaction<br />2. verbose (int, optional, default=0) - specifies the transaction is returned as a JSON object instead of hex-encoded string |
|
||||
| Description | Returns information about a transaction given its hash. |
|
||||
| Returns (verbose=0) | `"data" (string) hex-encoded bytes of the serialized transaction` |
|
||||
| Returns (verbose=1) | `{ (json object)`<br /> `"hex": "data", (string) hex-encoded transaction`<br /> `"txid": "hash", (string) the hash of the transaction`<br /> `"version": n, (numeric) the transaction version`<br /> `"locktime": n, (numeric) the transaction lock time`<br /> `"vin": [ (array of json objects) the transaction inputs as json objects`<br /> <font color="orange">For coinbase transactions:</font><br /> `{ (json object)`<br /> `"coinbase": "data", (string) the hex-encoded bytes of the signature script`<br /> `"sequence": n, (numeric) the script sequence number`<br /> `"txinwitness": “data", (string) the witness stack for the input`<br /> `}`<br /> <font color="orange">For non-coinbase transactions:</font><br /> `{ (json object)`<br /> `"txid": "hash", (string) the hash of the origin transaction`<br /> `"vout": n, (numeric) the index of the output being redeemed from the origin transaction`<br /> `"scriptSig": { (json object) the signature script used to redeem the origin transaction`<br /> `"asm": "asm", (string) disassembly of the script`<br /> `"hex": "data", (string) hex-encoded bytes of the script`<br /> `}`<br /> `"sequence": n, (numeric) the script sequence number`<br /> `"txinwitness": “data", (string) the witness stack for the input`<br /> `}, ...`<br /> `]`<br /> `"vout": [ (array of json objects) the transaction outputs as json objects`<br /> `{ (json object)`<br /> `"value": n, (numeric) the value in BTC`<br /> `"n": n, (numeric) the index of this transaction output`<br /> `"scriptPubKey": { (json object) the public key script used to pay coins`<br /> `"asm": "asm", (string) disassembly of the script`<br /> `"hex": "data", (string) hex-encoded bytes of the script`<br /> `"reqSigs": n, (numeric) the number of required signatures`<br /> `"type": "scripttype" (string) the type of the script (e.g. 'pubkeyhash')`<br /> `"addresses": [ (json array of string) the bitcoin addresses associated with this output`<br /> `"bitcoinaddress", (string) the bitcoin address`<br /> `...`<br /> `]`<br /> `}`<br /> `}, ...`<br /> `]`<br />`}` |
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
fees
|
||||
====
|
||||
|
||||
|
||||
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
|
||||
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
|
||||
[![Doc](https://img.shields.io/badge/doc-reference-blue.svg)](https://pkg.go.dev/github.com/decred/dcrd/internal/fees)
|
||||
|
||||
Package fees provides decred-specific methods for tracking and estimating fee
|
||||
rates for new transactions to be mined into the network. Fee rate estimation has
|
||||
two main goals:
|
||||
|
||||
- Ensuring transactions are mined within a target _confirmation range_
|
||||
(expressed in blocks);
|
||||
- Attempting to minimize fees while maintaining be above restriction.
|
||||
|
||||
This package was started in order to resolve issue decred/dcrd#1412 and related.
|
||||
See that issue for discussion of the selected approach.
|
||||
|
||||
## License
|
||||
|
||||
Package dcrutil is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
|
@ -1,56 +0,0 @@
|
|||
// Copyright (c) 2018-2020 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Tool dumpfeedb can be used to dump the internal state of the buckets of an
|
||||
// estimator's feedb so that it can be externally analyzed.
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/btcsuite/btclog"
|
||||
flags "github.com/jessevdk/go-flags"
|
||||
"github.com/lbryio/lbcd/fees"
|
||||
"github.com/lbryio/lbcutil"
|
||||
)
|
||||
|
||||
type config struct {
|
||||
DB string `short:"b" long:"db" description:"Path to fee database"`
|
||||
}
|
||||
|
||||
var feesLog = btclog.NewBackend(os.Stdout).Logger("FEES")
|
||||
|
||||
func main() {
|
||||
cfg := config{
|
||||
DB: path.Join(lbcutil.AppDataDir("lbcd", false), "data", "mainnet", "feesdb"),
|
||||
}
|
||||
|
||||
fees.UseLogger(feesLog)
|
||||
parser := flags.NewParser(&cfg, flags.Default)
|
||||
_, err := parser.Parse()
|
||||
if err != nil {
|
||||
var e *flags.Error
|
||||
if !errors.As(err, &e) || e.Type != flags.ErrHelp {
|
||||
parser.WriteHelp(os.Stderr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
ecfg := fees.EstimatorConfig{
|
||||
DatabaseFile: cfg.DB,
|
||||
ReplaceBucketsOnLoad: true,
|
||||
MinBucketFee: 1,
|
||||
MaxBucketFee: 2,
|
||||
FeeRateStep: fees.DefaultFeeRateStep,
|
||||
}
|
||||
est, err := fees.NewEstimator(&ecfg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Println(est.DumpBuckets())
|
||||
}
|
107
fees/doc.go
107
fees/doc.go
|
@ -1,107 +0,0 @@
|
|||
// Copyright (c) 2018-2020 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package fees provides decred-specific methods for tracking and estimating fee
|
||||
rates for new transactions to be mined into the network. Fee rate estimation has
|
||||
two main goals:
|
||||
|
||||
- Ensuring transactions are mined within a target _confirmation range_
|
||||
(expressed in blocks);
|
||||
- Attempting to minimize fees while maintaining be above restriction.
|
||||
|
||||
# Preliminaries
|
||||
|
||||
There are two main regimes against which fee estimation needs to be evaluated
|
||||
according to how full blocks being mined are (and consequently how important fee
|
||||
rates are): _low contention_ and _high contention_:
|
||||
|
||||
In a low contention regime, the mempool sits mostly empty, transactions are
|
||||
usually mined very soon after being published and transaction fees are mostly
|
||||
sent using the minimum relay fee.
|
||||
|
||||
In a high contention regime, the mempool is usually filled with unmined
|
||||
transactions, there is active dispute for space in a block (by transactions
|
||||
using higher fees) and blocks are usually full.
|
||||
|
||||
The exact point of where these two regimes intersect is arbitrary, but it should
|
||||
be clear in the examples and simulations which of these is being discussed.
|
||||
|
||||
Note: a very high contention scenario (> 90% of blocks being full and
|
||||
transactions remaining in the mempool indefinitely) is one in which stakeholders
|
||||
should be discussing alternative solutions (increase block size, provide other
|
||||
second layer alternatives, etc). Also, the current fill rate of blocks in decred
|
||||
is low, so while we try to account for this regime, I personally expect that the
|
||||
implementation will need more tweaks as it approaches this.
|
||||
|
||||
The current approach to implement this estimation is based on bitcoin core's
|
||||
algorithm. References [1] and [2] provide a high level description of how it
|
||||
works there. Actual code is linked in references [3] and [4].
|
||||
|
||||
# Outline of the Algorithm
|
||||
|
||||
The algorithm is currently based in fee estimation as used in v0.14 of bitcoin
|
||||
core (which is also the basis for the v0.15+ method). A more comprehensive
|
||||
overview is available in reference [1].
|
||||
|
||||
This particular version was chosen because it's simpler to implement and should
|
||||
be sufficient for low contention regimes. It probably overestimates fees in
|
||||
higher contention regimes and longer target confirmation windows, but as pointed
|
||||
out earlier should be sufficient for current fill rate of decred's network.
|
||||
|
||||
The basic algorithm is as follows (as executed by a single full node):
|
||||
|
||||
Stats building stage:
|
||||
|
||||
- For each transaction observed entering mempool, record the block at which it
|
||||
was first seen
|
||||
- For each mined transaction which was previously observed to enter the mempool,
|
||||
record how long (in blocks) it took to be mined and its fee rate
|
||||
- Group mined transactions into fee rate _buckets_ and _confirmation ranges_,
|
||||
creating a table of how many transactions were mined at each confirmation
|
||||
range and fee rate bucket and their total committed fee
|
||||
- Whenever a new block is mined, decay older transactions to account for a
|
||||
dynamic fee environment
|
||||
|
||||
Estimation stage:
|
||||
|
||||
- Input a target confirmation range (how many blocks to wait for the tx to be
|
||||
mined)
|
||||
- Starting at the highest fee bucket, look for buckets where the chance of
|
||||
confirmation within the desired confirmation window is > 95%
|
||||
- Average all such buckets to get the estimated fee rate
|
||||
|
||||
# Simulation
|
||||
|
||||
Development of the estimator was originally performed and simulated using the
|
||||
code in [5]. Simulation of the current code can be performed by using the
|
||||
dcrfeesim tool available in [6].
|
||||
|
||||
# Acknowledgements
|
||||
|
||||
Thanks to @davecgh for providing the initial review of the results and the
|
||||
original developers of the bitcoin core code (the brunt of which seems to have
|
||||
been made by @morcos).
|
||||
|
||||
## References
|
||||
|
||||
[1] Introduction to Bitcoin Core Estimation:
|
||||
https://bitcointechtalk.com/an-introduction-to-bitcoin-core-fee-estimation-27920880ad0
|
||||
|
||||
[2] Proposed Changes to Fee Estimation in version 0.15:
|
||||
https://gist.github.com/morcos/d3637f015bc4e607e1fd10d8351e9f41
|
||||
|
||||
[3] Source for fee estimation in v0.14:
|
||||
https://github.com/bitcoin/bitcoin/blob/v0.14.2/src/policy/fees.cpp
|
||||
|
||||
[4] Source for fee estimation in version 0.16.2:
|
||||
https://github.com/bitcoin/bitcoin/blob/v0.16.2/src/policy/fees.cpp
|
||||
|
||||
[5] Source for the original dcrfeesim and estimator work:
|
||||
https://github.com/matheusd/dcrfeesim_dev
|
||||
|
||||
[6] Source for the current dcrfeesim, using this module:
|
||||
https://github.com/matheusd/dcrfeesim
|
||||
*/
|
||||
package fees
|
|
@ -1,908 +0,0 @@
|
|||
// Copyright (c) 2018-2020 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fees
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||
"github.com/lbryio/lbcutil"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
ldbutil "github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultMaxBucketFeeMultiplier is the default multiplier used to find the
|
||||
// largest fee bucket, starting at the minimum fee.
|
||||
DefaultMaxBucketFeeMultiplier int = 100
|
||||
|
||||
// DefaultMaxConfirmations is the default number of confirmation ranges to
|
||||
// track in the estimator.
|
||||
DefaultMaxConfirmations uint32 = 42
|
||||
|
||||
// DefaultFeeRateStep is the default multiplier between two consecutive fee
|
||||
// rate buckets.
|
||||
DefaultFeeRateStep float64 = 1.05
|
||||
|
||||
// defaultDecay is the default value used to decay old transactions from the
|
||||
// estimator.
|
||||
defaultDecay float64 = 0.998
|
||||
|
||||
// maxAllowedBucketFees is an upper bound of how many bucket fees can be
|
||||
// used in the estimator. This is verified during estimator initialization
|
||||
// and database loading.
|
||||
maxAllowedBucketFees = 2000
|
||||
|
||||
// maxAllowedConfirms is an upper bound of how many confirmation ranges can
|
||||
// be used in the estimator. This is verified during estimator
|
||||
// initialization and database loading.
|
||||
maxAllowedConfirms = 788
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoSuccessPctBucketFound is the error returned when no bucket has been
|
||||
// found with the minimum required percentage success.
|
||||
ErrNoSuccessPctBucketFound = errors.New("no bucket with the minimum " +
|
||||
"required success percentage found")
|
||||
|
||||
// ErrNotEnoughTxsForEstimate is the error returned when not enough
|
||||
// transactions have been seen by the fee generator to give an estimate.
|
||||
ErrNotEnoughTxsForEstimate = errors.New("not enough transactions seen for " +
|
||||
"estimation")
|
||||
|
||||
dbByteOrder = binary.BigEndian
|
||||
|
||||
dbKeyVersion = []byte("version")
|
||||
dbKeyBucketFees = []byte("bucketFeeBounds")
|
||||
dbKeyMaxConfirms = []byte("maxConfirms")
|
||||
dbKeyBestHeight = []byte("bestHeight")
|
||||
dbKeyBucketPrefix = []byte{0x01, 0x70, 0x1d, 0x00}
|
||||
)
|
||||
|
||||
// ErrTargetConfTooLarge is the type of error returned when an user of the
|
||||
// estimator requested a confirmation range higher than tracked by the estimator.
|
||||
type ErrTargetConfTooLarge struct {
|
||||
MaxConfirms int32
|
||||
ReqConfirms int32
|
||||
}
|
||||
|
||||
func (e ErrTargetConfTooLarge) Error() string {
|
||||
return fmt.Sprintf("target confirmation requested (%d) higher than "+
|
||||
"maximum confirmation range tracked by estimator (%d)", e.ReqConfirms,
|
||||
e.MaxConfirms)
|
||||
}
|
||||
|
||||
type feeRate float64
|
||||
|
||||
type txConfirmStatBucketCount struct {
|
||||
txCount float64
|
||||
feeSum float64
|
||||
}
|
||||
|
||||
type txConfirmStatBucket struct {
|
||||
confirmed []txConfirmStatBucketCount
|
||||
confirmCount float64
|
||||
feeSum float64
|
||||
}
|
||||
|
||||
// EstimatorConfig stores the configuration parameters for a given fee
|
||||
// estimator. It is used to initialize an empty fee estimator.
|
||||
type EstimatorConfig struct {
|
||||
// MaxConfirms is the maximum number of confirmation ranges to check.
|
||||
MaxConfirms uint32
|
||||
|
||||
// MinBucketFee is the value of the fee rate of the lowest bucket for which
|
||||
// estimation is tracked.
|
||||
MinBucketFee lbcutil.Amount
|
||||
|
||||
// MaxBucketFee is the value of the fee for the highest bucket for which
|
||||
// estimation is tracked.
|
||||
//
|
||||
// It MUST be higher than MinBucketFee.
|
||||
MaxBucketFee lbcutil.Amount
|
||||
|
||||
// ExtraBucketFee is an additional bucket fee rate to include in the
|
||||
// database for tracking transactions. Specifying this can be useful when
|
||||
// the default relay fee of the network is undergoing change (due to a new
|
||||
// release of the software for example), so that the older fee can be
|
||||
// tracked exactly.
|
||||
//
|
||||
// It MUST have a value between MinBucketFee and MaxBucketFee, otherwise
|
||||
// it's ignored.
|
||||
ExtraBucketFee lbcutil.Amount
|
||||
|
||||
// FeeRateStep is the multiplier to generate the fee rate buckets (each
|
||||
// bucket is higher than the previous one by this factor).
|
||||
//
|
||||
// It MUST have a value > 1.0.
|
||||
FeeRateStep float64
|
||||
|
||||
// DatabaseFile is the location of the estimator database file. If empty,
|
||||
// updates to the estimator state are not backed by the filesystem.
|
||||
DatabaseFile string
|
||||
|
||||
// ReplaceBucketsOnLoad indicates whether to replace the buckets in the
|
||||
// current estimator by those stored in the feesdb file instead of
|
||||
// validating that they are both using the same set of fees.
|
||||
ReplaceBucketsOnLoad bool
|
||||
}
|
||||
|
||||
// memPoolTxDesc is an aux structure used to track the local estimator mempool.
|
||||
type memPoolTxDesc struct {
|
||||
addedHeight int32
|
||||
bucketIndex int32
|
||||
fees feeRate
|
||||
}
|
||||
|
||||
// Estimator tracks historical data for published and mined transactions in
|
||||
// order to estimate fees to be used in new transactions for confirmation
|
||||
// within a target block window.
|
||||
type Estimator struct {
|
||||
// bucketFeeBounds are the upper bounds for each individual fee bucket.
|
||||
bucketFeeBounds []feeRate
|
||||
|
||||
// buckets are the confirmed tx count and fee sum by bucket fee.
|
||||
buckets []txConfirmStatBucket
|
||||
|
||||
// memPool are the mempool transaction count and fee sum by bucket fee.
|
||||
memPool []txConfirmStatBucket
|
||||
|
||||
// memPoolTxs is the map of transaction hashes and data of known mempool txs.
|
||||
memPoolTxs map[chainhash.Hash]memPoolTxDesc
|
||||
|
||||
maxConfirms int32
|
||||
decay float64
|
||||
bestHeight int32
|
||||
db *leveldb.DB
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewEstimator returns an empty estimator given a config. This estimator
|
||||
// then needs to be fed data for published and mined transactions before it can
|
||||
// be used to estimate fees for new transactions.
|
||||
func NewEstimator(cfg *EstimatorConfig) (*Estimator, error) {
|
||||
// Sanity check the config.
|
||||
if cfg.MaxBucketFee <= cfg.MinBucketFee {
|
||||
return nil, errors.New("maximum bucket fee should not be lower than " +
|
||||
"minimum bucket fee")
|
||||
}
|
||||
if cfg.FeeRateStep <= 1.0 {
|
||||
return nil, errors.New("fee rate step should not be <= 1.0")
|
||||
}
|
||||
if cfg.MinBucketFee <= 0 {
|
||||
return nil, errors.New("minimum bucket fee rate cannot be <= 0")
|
||||
}
|
||||
if cfg.MaxConfirms > maxAllowedConfirms {
|
||||
return nil, fmt.Errorf("confirmation count requested (%d) larger than "+
|
||||
"maximum allowed (%d)", cfg.MaxConfirms, maxAllowedConfirms)
|
||||
}
|
||||
|
||||
decay := defaultDecay
|
||||
maxConfirms := cfg.MaxConfirms
|
||||
max := float64(cfg.MaxBucketFee)
|
||||
var bucketFees []feeRate
|
||||
prevF := 0.0
|
||||
extraBucketFee := float64(cfg.ExtraBucketFee)
|
||||
for f := float64(cfg.MinBucketFee); f < max; f *= cfg.FeeRateStep {
|
||||
if (f > extraBucketFee) && (prevF < extraBucketFee) {
|
||||
// Add the extra bucket fee for tracking.
|
||||
bucketFees = append(bucketFees, feeRate(extraBucketFee))
|
||||
}
|
||||
bucketFees = append(bucketFees, feeRate(f))
|
||||
prevF = f
|
||||
}
|
||||
|
||||
// The last bucket catches everything else, so it uses an upper bound of
|
||||
// +inf which any rate must be lower than.
|
||||
bucketFees = append(bucketFees, feeRate(math.Inf(1)))
|
||||
|
||||
nbBuckets := len(bucketFees)
|
||||
res := &Estimator{
|
||||
bucketFeeBounds: bucketFees,
|
||||
buckets: make([]txConfirmStatBucket, nbBuckets),
|
||||
memPool: make([]txConfirmStatBucket, nbBuckets),
|
||||
maxConfirms: int32(maxConfirms),
|
||||
decay: decay,
|
||||
memPoolTxs: make(map[chainhash.Hash]memPoolTxDesc),
|
||||
bestHeight: -1,
|
||||
}
|
||||
|
||||
for i := range bucketFees {
|
||||
res.buckets[i] = txConfirmStatBucket{
|
||||
confirmed: make([]txConfirmStatBucketCount, maxConfirms),
|
||||
}
|
||||
res.memPool[i] = txConfirmStatBucket{
|
||||
confirmed: make([]txConfirmStatBucketCount, maxConfirms),
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.DatabaseFile != "" {
|
||||
db, err := leveldb.OpenFile(cfg.DatabaseFile, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening estimator database: %v", err)
|
||||
}
|
||||
res.db = db
|
||||
|
||||
err = res.loadFromDatabase(cfg.ReplaceBucketsOnLoad)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading estimator data from db: %v",
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// DumpBuckets returns the internal estimator state as a string.
|
||||
func (stats *Estimator) DumpBuckets() string {
|
||||
res := " |"
|
||||
for c := 0; c < int(stats.maxConfirms); c++ {
|
||||
if c == int(stats.maxConfirms)-1 {
|
||||
res += fmt.Sprintf(" %15s", "+Inf")
|
||||
} else {
|
||||
res += fmt.Sprintf(" %15d|", c+1)
|
||||
}
|
||||
}
|
||||
res += "\n"
|
||||
|
||||
l := len(stats.bucketFeeBounds)
|
||||
for i := 0; i < l; i++ {
|
||||
res += fmt.Sprintf("%10.8f", stats.bucketFeeBounds[i]/1e8)
|
||||
for c := 0; c < int(stats.maxConfirms); c++ {
|
||||
avg := float64(0)
|
||||
count := stats.buckets[i].confirmed[c].txCount
|
||||
if stats.buckets[i].confirmed[c].txCount > 0 {
|
||||
avg = stats.buckets[i].confirmed[c].feeSum /
|
||||
stats.buckets[i].confirmed[c].txCount / 1e8
|
||||
}
|
||||
|
||||
res += fmt.Sprintf("| %.8f %6.1f", avg, count)
|
||||
}
|
||||
res += "\n"
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// loadFromDatabase loads the estimator data from the currently opened database
|
||||
// and performs any db upgrades if required. After loading, it updates the db
|
||||
// with the current estimator configuration.
|
||||
//
|
||||
// Argument replaceBuckets indicates if the buckets in the current stats should
|
||||
// be completely replaced by what is stored in the database or if the data
|
||||
// should be validated against what is current in the estimator.
|
||||
//
|
||||
// The database should *not* be used while loading is taking place.
|
||||
//
|
||||
// The current code does not support loading from a database created with a
|
||||
// different set of configuration parameters (fee rate buckets, max confirmation
|
||||
// range, etc) than the current estimator is configured with. If an incompatible
|
||||
// file is detected during loading, an error is returned and the user must
|
||||
// either reconfigure the estimator to use the same parameters to allow the
|
||||
// database to be loaded or they must ignore the database file (possibly by
|
||||
// deleting it) so that the new parameters are used. In the future it might be
|
||||
// possible to load from a different set of configuration parameters.
|
||||
//
|
||||
// The current code does not currently save mempool information, since saving
|
||||
// information in the estimator without saving the corresponding data in the
|
||||
// mempool itself could result in transactions lingering in the mempool
|
||||
// estimator forever.
|
||||
func (stats *Estimator) loadFromDatabase(replaceBuckets bool) error {
|
||||
if stats.db == nil {
|
||||
return errors.New("estimator database is not open")
|
||||
}
|
||||
|
||||
// Database version is currently hardcoded here as this is the only
|
||||
// place that uses it.
|
||||
currentDbVersion := []byte{1}
|
||||
|
||||
version, err := stats.db.Get(dbKeyVersion, nil)
|
||||
if err != nil && !errors.Is(err, leveldb.ErrNotFound) {
|
||||
return fmt.Errorf("error reading version from db: %v", err)
|
||||
}
|
||||
if len(version) < 1 {
|
||||
// No data in the file. Fill with the current config.
|
||||
batch := new(leveldb.Batch)
|
||||
b := bytes.NewBuffer(nil)
|
||||
var maxConfirmsBytes [4]byte
|
||||
var bestHeightBytes [8]byte
|
||||
|
||||
batch.Put(dbKeyVersion, currentDbVersion)
|
||||
|
||||
dbByteOrder.PutUint32(maxConfirmsBytes[:], uint32(stats.maxConfirms))
|
||||
batch.Put(dbKeyMaxConfirms, maxConfirmsBytes[:])
|
||||
|
||||
dbByteOrder.PutUint64(bestHeightBytes[:], uint64(stats.bestHeight))
|
||||
batch.Put(dbKeyBestHeight, bestHeightBytes[:])
|
||||
|
||||
err = binary.Write(b, dbByteOrder, stats.bucketFeeBounds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing bucket fees to db: %v", err)
|
||||
}
|
||||
batch.Put(dbKeyBucketFees, b.Bytes())
|
||||
|
||||
err = stats.db.Write(batch, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing initial estimator db file: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
err = stats.updateDatabase()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding initial estimator data to db: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
log.Debug("Initialized fee estimator database")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if !bytes.Equal(currentDbVersion, version) {
|
||||
return fmt.Errorf("incompatible database version: %d", version)
|
||||
}
|
||||
|
||||
maxConfirmsBytes, err := stats.db.Get(dbKeyMaxConfirms, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading max confirmation range from db file: "+
|
||||
"%v", err)
|
||||
}
|
||||
if len(maxConfirmsBytes) != 4 {
|
||||
return errors.New("wrong number of bytes in stored maxConfirms")
|
||||
}
|
||||
fileMaxConfirms := int32(dbByteOrder.Uint32(maxConfirmsBytes))
|
||||
if fileMaxConfirms > maxAllowedConfirms {
|
||||
return fmt.Errorf("confirmation count stored in database (%d) larger "+
|
||||
"than maximum allowed (%d)", fileMaxConfirms, maxAllowedConfirms)
|
||||
}
|
||||
|
||||
feesBytes, err := stats.db.Get(dbKeyBucketFees, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading fee bounds from db file: %v", err)
|
||||
}
|
||||
if feesBytes == nil {
|
||||
return errors.New("fee bounds not found in database file")
|
||||
}
|
||||
fileNbBucketFees := len(feesBytes) / 8
|
||||
if fileNbBucketFees > maxAllowedBucketFees {
|
||||
return fmt.Errorf("more fee buckets stored in file (%d) than allowed "+
|
||||
"(%d)", fileNbBucketFees, maxAllowedBucketFees)
|
||||
}
|
||||
fileBucketFees := make([]feeRate, fileNbBucketFees)
|
||||
err = binary.Read(bytes.NewReader(feesBytes), dbByteOrder,
|
||||
&fileBucketFees)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error decoding file bucket fees: %v", err)
|
||||
}
|
||||
|
||||
if !replaceBuckets {
|
||||
if stats.maxConfirms != fileMaxConfirms {
|
||||
return errors.New("max confirmation range in database file different " +
|
||||
"than currently configured max confirmation")
|
||||
}
|
||||
|
||||
if len(stats.bucketFeeBounds) != len(fileBucketFees) {
|
||||
return errors.New("number of bucket fees stored in database file " +
|
||||
"different than currently configured bucket fees")
|
||||
}
|
||||
|
||||
for i, f := range fileBucketFees {
|
||||
if stats.bucketFeeBounds[i] != f {
|
||||
return errors.New("bucket fee rates stored in database file " +
|
||||
"different than currently configured fees")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fileBuckets := make([]txConfirmStatBucket, fileNbBucketFees)
|
||||
|
||||
iter := stats.db.NewIterator(ldbutil.BytesPrefix(dbKeyBucketPrefix), nil)
|
||||
err = nil
|
||||
var fbytes [8]byte
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
if len(key) != 8 {
|
||||
err = fmt.Errorf("bucket key read from db has wrong length (%d)",
|
||||
len(key))
|
||||
break
|
||||
}
|
||||
idx := int(int32(dbByteOrder.Uint32(key[4:])))
|
||||
if (idx >= len(fileBuckets)) || (idx < 0) {
|
||||
err = fmt.Errorf("wrong bucket index read from db (%d vs %d)",
|
||||
idx, len(fileBuckets))
|
||||
break
|
||||
}
|
||||
value := iter.Value()
|
||||
if len(value) != 8+8+int(fileMaxConfirms)*16 {
|
||||
err = errors.New("wrong size of data in bucket read from db")
|
||||
break
|
||||
}
|
||||
|
||||
b := bytes.NewBuffer(value)
|
||||
readf := func() float64 {
|
||||
// We ignore the error here because the only possible one is EOF and
|
||||
// we already previously checked the length of the source byte array
|
||||
// for consistency.
|
||||
b.Read(fbytes[:])
|
||||
return math.Float64frombits(dbByteOrder.Uint64(fbytes[:]))
|
||||
}
|
||||
|
||||
fileBuckets[idx].confirmCount = readf()
|
||||
fileBuckets[idx].feeSum = readf()
|
||||
fileBuckets[idx].confirmed = make([]txConfirmStatBucketCount, fileMaxConfirms)
|
||||
for i := range fileBuckets[idx].confirmed {
|
||||
fileBuckets[idx].confirmed[i].txCount = readf()
|
||||
fileBuckets[idx].confirmed[i].feeSum = readf()
|
||||
}
|
||||
}
|
||||
iter.Release()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = iter.Error()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error on bucket iterator: %v", err)
|
||||
}
|
||||
|
||||
stats.bucketFeeBounds = fileBucketFees
|
||||
stats.buckets = fileBuckets
|
||||
stats.maxConfirms = fileMaxConfirms
|
||||
log.Debug("Loaded fee estimator database")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateDatabase updates the current database file with the current bucket
|
||||
// data. This is called during normal operation after processing mined
|
||||
// transactions, so it only updates data that might have changed.
|
||||
func (stats *Estimator) updateDatabase() error {
|
||||
if stats.db == nil {
|
||||
return errors.New("estimator database is closed")
|
||||
}
|
||||
|
||||
batch := new(leveldb.Batch)
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
var key [8]byte
|
||||
copy(key[:], dbKeyBucketPrefix)
|
||||
var fbytes [8]byte
|
||||
writef := func(f float64) {
|
||||
dbByteOrder.PutUint64(fbytes[:], math.Float64bits(f))
|
||||
_, err := buf.Write(fbytes[:])
|
||||
if err != nil {
|
||||
panic(err) // only possible error is ErrTooLarge
|
||||
}
|
||||
}
|
||||
|
||||
for i, b := range stats.buckets {
|
||||
dbByteOrder.PutUint32(key[4:], uint32(i))
|
||||
buf.Reset()
|
||||
writef(b.confirmCount)
|
||||
writef(b.feeSum)
|
||||
for _, c := range b.confirmed {
|
||||
writef(c.txCount)
|
||||
writef(c.feeSum)
|
||||
}
|
||||
batch.Put(key[:], buf.Bytes())
|
||||
}
|
||||
|
||||
var bestHeightBytes [8]byte
|
||||
|
||||
dbByteOrder.PutUint64(bestHeightBytes[:], uint64(stats.bestHeight))
|
||||
batch.Put(dbKeyBestHeight, bestHeightBytes[:])
|
||||
|
||||
err := stats.db.Write(batch, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing update to estimator db file: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// lowerBucket returns the bucket that has the highest upperBound such that it
|
||||
// is still lower than rate.
|
||||
func (stats *Estimator) lowerBucket(rate feeRate) int32 {
|
||||
res := sort.Search(len(stats.bucketFeeBounds), func(i int) bool {
|
||||
return stats.bucketFeeBounds[i] >= rate
|
||||
})
|
||||
return int32(res)
|
||||
}
|
||||
|
||||
// confirmRange returns the confirmation range index to be used for the given
|
||||
// number of blocks to confirm. The last confirmation range has an upper bound
|
||||
// of +inf to mean that it represents all confirmations higher than the second
|
||||
// to last bucket.
|
||||
func (stats *Estimator) confirmRange(blocksToConfirm int32) int32 {
|
||||
idx := blocksToConfirm - 1
|
||||
if idx >= stats.maxConfirms {
|
||||
return stats.maxConfirms - 1
|
||||
}
|
||||
return idx
|
||||
}
|
||||
|
||||
// updateMovingAverages updates the moving averages for the existing confirmed
|
||||
// statistics and increases the confirmation ranges for mempool txs. This is
|
||||
// meant to be called when a new block is mined, so that we discount older
|
||||
// information.
|
||||
func (stats *Estimator) updateMovingAverages(newHeight int32) {
|
||||
log.Debugf("Updated moving averages into block %d", newHeight)
|
||||
|
||||
// decay the existing stats so that, over time, we rely on more up to date
|
||||
// information regarding fees.
|
||||
for b := 0; b < len(stats.buckets); b++ {
|
||||
bucket := &stats.buckets[b]
|
||||
bucket.feeSum *= stats.decay
|
||||
bucket.confirmCount *= stats.decay
|
||||
for c := 0; c < len(bucket.confirmed); c++ {
|
||||
conf := &bucket.confirmed[c]
|
||||
conf.feeSum *= stats.decay
|
||||
conf.txCount *= stats.decay
|
||||
}
|
||||
}
|
||||
|
||||
// For unconfirmed (mempool) transactions, every transaction will now take
|
||||
// at least one additional block to confirm. So for every fee bucket, we
|
||||
// move the stats up one confirmation range.
|
||||
for b := 0; b < len(stats.memPool); b++ {
|
||||
bucket := &stats.memPool[b]
|
||||
|
||||
// The last confirmation range represents all txs confirmed at >= than
|
||||
// the initial maxConfirms, so we *add* the second to last range into
|
||||
// the last range.
|
||||
c := len(bucket.confirmed) - 1
|
||||
bucket.confirmed[c].txCount += bucket.confirmed[c-1].txCount
|
||||
bucket.confirmed[c].feeSum += bucket.confirmed[c-1].feeSum
|
||||
|
||||
// For the other ranges, just move up the stats.
|
||||
for c--; c > 0; c-- {
|
||||
bucket.confirmed[c] = bucket.confirmed[c-1]
|
||||
}
|
||||
|
||||
// and finally, the very first confirmation range (ie, what will enter
|
||||
// the mempool now that a new block has been mined) is zeroed so we can
|
||||
// start tracking brand new txs.
|
||||
bucket.confirmed[0].txCount = 0
|
||||
bucket.confirmed[0].feeSum = 0
|
||||
}
|
||||
|
||||
stats.bestHeight = newHeight
|
||||
}
|
||||
|
||||
// newMemPoolTx records a new memPool transaction into the stats. A brand new
|
||||
// mempool transaction has a minimum confirmation range of 1, so it is inserted
|
||||
// into the very first confirmation range bucket of the appropriate fee rate
|
||||
// bucket.
|
||||
func (stats *Estimator) newMemPoolTx(bucketIdx int32, fees feeRate) {
|
||||
conf := &stats.memPool[bucketIdx].confirmed[0]
|
||||
conf.feeSum += float64(fees)
|
||||
conf.txCount++
|
||||
}
|
||||
|
||||
// newMinedTx moves a mined tx from the mempool into the confirmed statistics.
|
||||
// Note that this should only be called if the transaction had been seen and
|
||||
// previously tracked by calling newMemPoolTx for it. Failing to observe that
|
||||
// will result in undefined statistical results.
|
||||
func (stats *Estimator) newMinedTx(blocksToConfirm int32, rate feeRate) {
|
||||
bucketIdx := stats.lowerBucket(rate)
|
||||
confirmIdx := stats.confirmRange(blocksToConfirm)
|
||||
bucket := &stats.buckets[bucketIdx]
|
||||
|
||||
// increase the counts for all confirmation ranges starting at the first
|
||||
// confirmIdx because it took at least `blocksToConfirm` for this tx to be
|
||||
// mined. This is used to simplify the bucket selection during estimation,
|
||||
// so that we only need to check a single confirmation range (instead of
|
||||
// iterating to sum all confirmations with <= `minConfs`).
|
||||
for c := int(confirmIdx); c < len(bucket.confirmed); c++ {
|
||||
conf := &bucket.confirmed[c]
|
||||
conf.feeSum += float64(rate)
|
||||
conf.txCount++
|
||||
}
|
||||
bucket.confirmCount++
|
||||
bucket.feeSum += float64(rate)
|
||||
}
|
||||
|
||||
func (stats *Estimator) removeFromMemPool(blocksInMemPool int32, rate feeRate) {
|
||||
bucketIdx := stats.lowerBucket(rate)
|
||||
confirmIdx := stats.confirmRange(blocksInMemPool + 1)
|
||||
bucket := &stats.memPool[bucketIdx]
|
||||
conf := &bucket.confirmed[confirmIdx]
|
||||
conf.feeSum -= float64(rate)
|
||||
conf.txCount--
|
||||
if conf.txCount < 0 {
|
||||
// If this happens, it means a transaction has been called on this
|
||||
// function but not on a previous newMemPoolTx. This leaves the fee db
|
||||
// in an undefined state and should never happen in regular use. If this
|
||||
// happens, then there is a logic or coding error somewhere, either in
|
||||
// the estimator itself or on its hooking to the mempool/network sync
|
||||
// manager. Either way, the easiest way to fix this is to completely
|
||||
// delete the database and start again. During development, you can use
|
||||
// a panic() here and we might return it after being confident that the
|
||||
// estimator is completely bug free.
|
||||
log.Errorf("Transaction count in bucket index %d and confirmation "+
|
||||
"index %d became < 0", bucketIdx, confirmIdx)
|
||||
}
|
||||
}
|
||||
|
||||
// estimateMedianFee estimates the median fee rate for the current recorded
|
||||
// statistics such that at least successPct transactions have been mined on all
|
||||
// tracked fee rate buckets with fee >= to the median.
|
||||
// In other words, this is the median fee of the lowest bucket such that it and
|
||||
// all higher fee buckets have >= successPct transactions confirmed in at most
|
||||
// `targetConfs` confirmations.
|
||||
// Note that sometimes the requested combination of targetConfs and successPct is
|
||||
// not achievable (hypothetical example: 99% of txs confirmed within 1 block)
|
||||
// or there are not enough recorded statistics to derive a successful estimate
|
||||
// (eg: confirmation tracking has only started or there was a period of very few
|
||||
// transactions). In those situations, the appropriate error is returned.
|
||||
func (stats *Estimator) estimateMedianFee(targetConfs int32, successPct float64) (feeRate, error) {
|
||||
if targetConfs <= 0 {
|
||||
return 0, errors.New("target confirmation range cannot be <= 0")
|
||||
}
|
||||
|
||||
const minTxCount float64 = 1
|
||||
|
||||
if (targetConfs - 1) >= stats.maxConfirms {
|
||||
// We might want to add support to use a targetConf at +infinity to
|
||||
// allow us to make estimates at confirmation interval higher than what
|
||||
// we currently track.
|
||||
return 0, ErrTargetConfTooLarge{MaxConfirms: stats.maxConfirms,
|
||||
ReqConfirms: targetConfs}
|
||||
}
|
||||
|
||||
startIdx := len(stats.buckets) - 1
|
||||
confirmRangeIdx := stats.confirmRange(targetConfs)
|
||||
|
||||
var totalTxs, confirmedTxs float64
|
||||
bestBucketsStt := startIdx
|
||||
bestBucketsEnd := startIdx
|
||||
curBucketsEnd := startIdx
|
||||
|
||||
for b := startIdx; b >= 0; b-- {
|
||||
totalTxs += stats.buckets[b].confirmCount
|
||||
confirmedTxs += stats.buckets[b].confirmed[confirmRangeIdx].txCount
|
||||
|
||||
// Add the mempool (unconfirmed) transactions to the total tx count
|
||||
// since a very large mempool for the given bucket might mean that
|
||||
// miners are reluctant to include these in their mined blocks.
|
||||
totalTxs += stats.memPool[b].confirmed[confirmRangeIdx].txCount
|
||||
|
||||
if totalTxs > minTxCount {
|
||||
if confirmedTxs/totalTxs < successPct {
|
||||
if curBucketsEnd == startIdx {
|
||||
return 0, ErrNoSuccessPctBucketFound
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
bestBucketsStt = b
|
||||
bestBucketsEnd = curBucketsEnd
|
||||
curBucketsEnd = b - 1
|
||||
totalTxs = 0
|
||||
confirmedTxs = 0
|
||||
}
|
||||
}
|
||||
|
||||
txCount := float64(0)
|
||||
for b := bestBucketsStt; b <= bestBucketsEnd; b++ {
|
||||
txCount += stats.buckets[b].confirmCount
|
||||
}
|
||||
if txCount <= 0 {
|
||||
return 0, ErrNotEnoughTxsForEstimate
|
||||
}
|
||||
txCount /= 2
|
||||
for b := bestBucketsStt; b <= bestBucketsEnd; b++ {
|
||||
if stats.buckets[b].confirmCount < txCount {
|
||||
txCount -= stats.buckets[b].confirmCount
|
||||
} else {
|
||||
median := stats.buckets[b].feeSum / stats.buckets[b].confirmCount
|
||||
return feeRate(median), nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errors.New("this isn't supposed to be reached")
|
||||
}
|
||||
|
||||
// EstimateFee is the public version of estimateMedianFee. It calculates the
|
||||
// suggested fee for a transaction to be confirmed in at most `targetConf`
|
||||
// blocks after publishing with a high degree of certainty.
|
||||
//
|
||||
// This function is safe to be called from multiple goroutines but might block
|
||||
// until concurrent modifications to the internal database state are complete.
|
||||
func (stats *Estimator) EstimateFee(targetConfs int32) (lbcutil.Amount, error) {
|
||||
stats.lock.RLock()
|
||||
rate, err := stats.estimateMedianFee(targetConfs, 0.95)
|
||||
stats.lock.RUnlock()
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
rate = feeRate(math.Round(float64(rate)))
|
||||
if rate < stats.bucketFeeBounds[0] {
|
||||
// Prevent our public facing api to ever return something lower than the
|
||||
// minimum fee
|
||||
rate = stats.bucketFeeBounds[0]
|
||||
}
|
||||
|
||||
return lbcutil.Amount(rate), nil
|
||||
}
|
||||
|
||||
// Enable establishes the current best height of the blockchain after
|
||||
// initializing the chain. All new mempool transactions will be added at this
|
||||
// block height.
|
||||
func (stats *Estimator) Enable(bestHeight int32) {
|
||||
log.Debugf("Setting best height as %d", bestHeight)
|
||||
stats.lock.Lock()
|
||||
stats.bestHeight = bestHeight
|
||||
stats.lock.Unlock()
|
||||
}
|
||||
|
||||
// IsEnabled returns whether the fee estimator is ready to accept new mined and
|
||||
// mempool transactions.
|
||||
func (stats *Estimator) IsEnabled() bool {
|
||||
stats.lock.RLock()
|
||||
enabled := stats.bestHeight > -1
|
||||
stats.lock.RUnlock()
|
||||
return enabled
|
||||
}
|
||||
|
||||
// AddMemPoolTransaction adds a mempool transaction to the estimator in order to
|
||||
// account for it in the estimations. It assumes that this transaction is
|
||||
// entering the mempool at the currently recorded best chain hash, using the
|
||||
// total fee amount (in atoms) and with the provided size (in bytes).
|
||||
//
|
||||
// This is safe to be called from multiple goroutines.
|
||||
func (stats *Estimator) AddMemPoolTransaction(txHash *chainhash.Hash, fee, size int64) {
|
||||
stats.lock.Lock()
|
||||
defer stats.lock.Unlock()
|
||||
|
||||
if stats.bestHeight < 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if _, exists := stats.memPoolTxs[*txHash]; exists {
|
||||
// we should not double count transactions
|
||||
return
|
||||
}
|
||||
|
||||
// Note that we use this less exact version instead of fee * 1000 / size
|
||||
// (using ints) because it naturally "downsamples" the fee rates towards the
|
||||
// minimum at values less than 0.001 DCR/KB. This is needed because due to
|
||||
// how the wallet estimates the final fee given an input rate and the final
|
||||
// tx size, there's usually a small discrepancy towards a higher effective
|
||||
// rate in the published tx.
|
||||
rate := feeRate(fee / size * 1000)
|
||||
|
||||
if rate < stats.bucketFeeBounds[0] {
|
||||
// Transactions paying less than the current relaying fee can only
|
||||
// possibly be included in the high priority/zero fee area of blocks,
|
||||
// which are usually of limited size, so we explicitly don't track
|
||||
// those.
|
||||
// This also naturally handles votes (SSGen transactions) which don't
|
||||
// carry a tx fee and are required for inclusion in blocks. Note that
|
||||
// the test is explicitly < instead of <= so that we *can* track
|
||||
// transactions that pay *exactly* the minimum fee.
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("Adding mempool tx %s using fee rate %.8f", txHash, rate/1e8)
|
||||
|
||||
tx := memPoolTxDesc{
|
||||
addedHeight: stats.bestHeight,
|
||||
bucketIndex: stats.lowerBucket(rate),
|
||||
fees: rate,
|
||||
}
|
||||
stats.memPoolTxs[*txHash] = tx
|
||||
stats.newMemPoolTx(tx.bucketIndex, rate)
|
||||
}
|
||||
|
||||
// RemoveMemPoolTransaction removes a mempool transaction from statistics
|
||||
// tracking.
|
||||
//
|
||||
// This is safe to be called from multiple goroutines.
|
||||
func (stats *Estimator) RemoveMemPoolTransaction(txHash *chainhash.Hash) {
|
||||
stats.lock.Lock()
|
||||
defer stats.lock.Unlock()
|
||||
|
||||
desc, exists := stats.memPoolTxs[*txHash]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("Removing tx %s from mempool", txHash)
|
||||
|
||||
stats.removeFromMemPool(stats.bestHeight-desc.addedHeight, desc.fees)
|
||||
delete(stats.memPoolTxs, *txHash)
|
||||
}
|
||||
|
||||
// processMinedTransaction moves the transaction that exist in the currently
|
||||
// tracked mempool into a mined state.
|
||||
//
|
||||
// This function is *not* safe to be called from multiple goroutines.
|
||||
func (stats *Estimator) processMinedTransaction(blockHeight int32, txh *chainhash.Hash) {
|
||||
desc, exists := stats.memPoolTxs[*txh]
|
||||
if !exists {
|
||||
// We cannot use transactions that we didn't know about to estimate
|
||||
// because that opens up the possibility of miners introducing dummy,
|
||||
// high fee transactions which would tend to then increase the average
|
||||
// fee estimate.
|
||||
// Tracking only previously known transactions forces miners trying to
|
||||
// pull off this attack to broadcast their transactions and possibly
|
||||
// forfeit their coins by having the transaction mined by a competitor.
|
||||
log.Tracef("Processing previously unknown mined tx %s", txh)
|
||||
return
|
||||
}
|
||||
|
||||
stats.removeFromMemPool(blockHeight-desc.addedHeight, desc.fees)
|
||||
delete(stats.memPoolTxs, *txh)
|
||||
|
||||
if blockHeight <= desc.addedHeight {
|
||||
// This shouldn't usually happen but we need to explicitly test for
|
||||
// because we can't account for non positive confirmation ranges in
|
||||
// mined transactions.
|
||||
log.Errorf("Mined transaction %s (%d) that was known from "+
|
||||
"mempool at a higher block height (%d)", txh, blockHeight,
|
||||
desc.addedHeight)
|
||||
return
|
||||
}
|
||||
|
||||
mineDelay := blockHeight - desc.addedHeight
|
||||
log.Debugf("Processing mined tx %s (rate %.8f, delay %d)", txh,
|
||||
desc.fees/1e8, mineDelay)
|
||||
stats.newMinedTx(mineDelay, desc.fees)
|
||||
}
|
||||
|
||||
// ProcessBlock processes all mined transactions in the provided block.
|
||||
//
|
||||
// This function is safe to be called from multiple goroutines.
|
||||
func (stats *Estimator) ProcessBlock(block *lbcutil.Block) error {
|
||||
stats.lock.Lock()
|
||||
defer stats.lock.Unlock()
|
||||
|
||||
if stats.bestHeight < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
blockHeight := block.Height()
|
||||
if blockHeight <= stats.bestHeight {
|
||||
// we don't explicitly track reorgs right now
|
||||
log.Warnf("Trying to process mined transactions at block %d when "+
|
||||
"previous best block was at height %d", blockHeight,
|
||||
stats.bestHeight)
|
||||
return nil
|
||||
}
|
||||
|
||||
stats.updateMovingAverages(blockHeight)
|
||||
|
||||
for _, tx := range block.Transactions() {
|
||||
stats.processMinedTransaction(blockHeight, tx.Hash())
|
||||
}
|
||||
|
||||
if stats.db != nil {
|
||||
return stats.updateDatabase()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the database (if it is currently opened).
|
||||
func (stats *Estimator) Close() {
|
||||
stats.lock.Lock()
|
||||
|
||||
if stats.db != nil {
|
||||
log.Trace("Closing fee estimator database")
|
||||
stats.db.Close()
|
||||
stats.db = nil
|
||||
}
|
||||
|
||||
stats.lock.Unlock()
|
||||
}
|
27
fees/log.go
27
fees/log.go
|
@ -1,27 +0,0 @@
|
|||
// Copyright (c) 2018-2019 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fees
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This means the
|
||||
// package will not perform any logging by default until the caller requests it.
|
||||
// The default amount of logging is none.
|
||||
var log btclog.Logger
|
||||
|
||||
// DisableLog disables all library log output. Logging output is disabled
|
||||
// by default until either UseLogger or SetLogWriter are called.
|
||||
func DisableLog() {
|
||||
log = btclog.Disabled
|
||||
}
|
||||
|
||||
// UseLogger uses a specified Logger to output package logging info.
|
||||
// This should be used in preference to SetLogWriter if the caller is also
|
||||
// using btclog.
|
||||
func UseLogger(logger btclog.Logger) {
|
||||
log = logger
|
||||
}
|
46
go.mod
46
go.mod
|
@ -1,56 +1,54 @@
|
|||
module github.com/lbryio/lbcd
|
||||
|
||||
go 1.19
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792
|
||||
github.com/btcsuite/winsvc v1.0.0
|
||||
github.com/cockroachdb/errors v1.9.0
|
||||
github.com/cockroachdb/pebble v0.0.0-20220523221036-bb2c1501ac23
|
||||
github.com/cockroachdb/errors v1.8.6
|
||||
github.com/cockroachdb/pebble v0.0.0-20211124004043-0dc90bc41e62
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/decred/dcrd/lru v1.1.1
|
||||
github.com/felixge/fgprof v0.9.2
|
||||
github.com/jessevdk/go-flags v1.5.0
|
||||
github.com/decred/dcrd/lru v1.0.0
|
||||
github.com/felixge/fgprof v0.9.1
|
||||
github.com/jessevdk/go-flags v1.4.0
|
||||
github.com/jrick/logrotate v1.0.0
|
||||
github.com/lbryio/lbcutil v1.0.202
|
||||
github.com/lbryio/lbcutil v1.0.202-rc3
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/shirou/gopsutil/v3 v3.22.4
|
||||
github.com/shirou/gopsutil/v3 v3.21.7
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/stretchr/testify v1.7.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.2
|
||||
golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898
|
||||
golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b
|
||||
golang.org/x/text v0.3.7
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/DataDog/zstd v1.5.2 // indirect
|
||||
github.com/DataDog/zstd v1.5.0 // indirect
|
||||
github.com/StackExchange/wmi v1.2.1 // indirect
|
||||
github.com/aead/siphash v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f // indirect
|
||||
github.com/cockroachdb/redact v1.1.3 // indirect
|
||||
github.com/codahale/hdrhistogram v0.9.0 // indirect
|
||||
github.com/getsentry/sentry-go v0.13.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect
|
||||
github.com/go-ole/go-ole v1.2.5 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/pprof v0.0.0-20220520215854-d04f2422c8a1 // indirect
|
||||
github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/kkdai/bstream v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.15.4 // indirect
|
||||
github.com/klauspost/compress v1.13.6 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.5.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.7 // indirect
|
||||
github.com/tklauser/numcpus v0.2.3 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/exp v0.0.0-20220518171630-0b5c67f07fdf // indirect
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
||||
golang.org/x/exp v0.0.0-20211123021643-48cbe7f80d7c // indirect
|
||||
golang.org/x/sys v0.0.0-20211123173158-ef496fb156ab // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
)
|
||||
|
|
340
go.sum
340
go.sum
|
@ -11,43 +11,69 @@ cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqCl
|
|||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
|
||||
github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w=
|
||||
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
|
||||
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
|
||||
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||
github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo=
|
||||
github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
||||
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
|
||||
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
|
@ -55,29 +81,29 @@ github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
|
|||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4=
|
||||
github.com/cockroachdb/datadriven v1.0.1-0.20211007161720-b558070c3be0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4=
|
||||
github.com/cockroachdb/datadriven v1.0.1-0.20220214170620-9913f5bc19b7/go.mod h1:hi0MtSY3AYDQNDi83kDkMH5/yqM/CsIrsOITkSoH7KI=
|
||||
github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM=
|
||||
github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac=
|
||||
github.com/cockroachdb/errors v1.8.8/go.mod h1:z6VnEL3hZ/2ONZEvG7S5Ym0bU2AqPcEKnIiA1wbsSu0=
|
||||
github.com/cockroachdb/errors v1.9.0 h1:B48dYem5SlAY7iU8AKsgedb4gH6mo+bDkbtLIvM/a88=
|
||||
github.com/cockroachdb/errors v1.9.0/go.mod h1:vaNcEYYqbIqB5JhKBhFV9CneUqeuEbB2OYJBK4GBNYQ=
|
||||
github.com/cockroachdb/errors v1.8.6 h1:Am9evxl/po3RzpokemQvq7S7Cd0mxv24xy0B/trlQF4=
|
||||
github.com/cockroachdb/errors v1.8.6/go.mod h1:hOm5fabihW+xEyY1kuypGwqT+Vt7rafg04ytBtIpeIQ=
|
||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
||||
github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f h1:6jduT9Hfc0njg5jJ1DdKCFPdMBrp/mdZfCpa5h+WM74=
|
||||
github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/pebble v0.0.0-20220523221036-bb2c1501ac23 h1:/Pvbuwd61qRxNCIpSIWbx7Oqy1tinfErdetF91DU9gQ=
|
||||
github.com/cockroachdb/pebble v0.0.0-20220523221036-bb2c1501ac23/go.mod h1:buxOO9GBtOcq1DiXDpIPYrmxY020K2A8lOrwno5FetU=
|
||||
github.com/cockroachdb/pebble v0.0.0-20210525181856-e45797baeb78/go.mod h1:1XpB4cLQcF189RAcWi4gUc110zJgtOfT7SVNGY8sOe0=
|
||||
github.com/cockroachdb/pebble v0.0.0-20211124004043-0dc90bc41e62 h1:MPucjIPsIzjSY4RLiyhjX00sHQVNXbzzTpfYHfj0cQw=
|
||||
github.com/cockroachdb/pebble v0.0.0-20211124004043-0dc90bc41e62/go.mod h1:buxOO9GBtOcq1DiXDpIPYrmxY020K2A8lOrwno5FetU=
|
||||
github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/redact v1.1.1/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ=
|
||||
github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM=
|
||||
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/codahale/hdrhistogram v0.9.0 h1:9GjrtRI+mLEFPtTfR/AZhcxp+Ii8NZYWq5104FbZQY0=
|
||||
github.com/codahale/hdrhistogram v0.9.0/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
|
@ -85,22 +111,33 @@ github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc
|
|||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/lru v1.1.1 h1:kWFDaW0OWx6AD6Ki342c+JPmHbiVdE6rK81pT3fuo/Y=
|
||||
github.com/decred/dcrd/lru v1.1.1/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||
github.com/decred/dcrd/lru v1.0.0 h1:Kbsb1SFDsIlaupWPwsPp+dkxiBY1frcS07PCPgotKz8=
|
||||
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
|
@ -110,16 +147,15 @@ github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHj
|
|||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/felixge/fgprof v0.9.2 h1:tAMHtWMyl6E0BimjVbFt7fieU6FpjttsZN7j0wT5blc=
|
||||
github.com/felixge/fgprof v0.9.2/go.mod h1:+VNi+ZXtHIQ6wIw6bUT8nXQRefQflWECoFyRealT5sg=
|
||||
github.com/felixge/fgprof v0.9.1 h1:E6FUJ2Mlv043ipLOCFqo8+cHo9MhQ203E2cdEK/isEs=
|
||||
github.com/felixge/fgprof v0.9.1/go.mod h1:7/HK6JFtFaARhIljgP2IV8rJLIoHDoOYoUphsnGvqxE=
|
||||
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
|
||||
github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c=
|
||||
github.com/getsentry/sentry-go v0.13.0 h1:20dgTiUSfxRB/EhMPtxcL9ZEbM1ZdR+W/7f7NWD+xWo=
|
||||
github.com/getsentry/sentry-go v0.13.0/go.mod h1:EOsfu5ZdvKPfeHYV6pTVQnsjfp30+XA7//UooKNumH0=
|
||||
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
|
||||
|
@ -130,16 +166,22 @@ github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm
|
|||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY=
|
||||
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
|
||||
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
|
||||
github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
|
@ -148,9 +190,10 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
|||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
|
@ -167,6 +210,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
|
|||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
|
@ -178,32 +223,36 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
|||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20220520215854-d04f2422c8a1 h1:K4bn56FHdjFCfjSo3wWaD6rJL8r9yvmmncJNMhdkKrw=
|
||||
github.com/google/pprof v0.0.0-20220520215854-d04f2422c8a1/go.mod h1:gSuNB+gJaOiQKLEZ+q+PK9Mq3SOzhRcw2GsGS/FhYDk=
|
||||
github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040 h1:i7RUpu0EybzQyQvPT7J3MmODs4+gPcHsD/pqW0uIYVo=
|
||||
github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
|
@ -224,25 +273,28 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m
|
|||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
|
||||
github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
|
||||
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
|
||||
github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI=
|
||||
github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
|
||||
github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g=
|
||||
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
|
||||
github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
|
||||
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
|
||||
|
@ -251,30 +303,26 @@ github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0t
|
|||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
|
||||
github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk=
|
||||
github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8=
|
||||
github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U=
|
||||
github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE=
|
||||
github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw=
|
||||
github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE=
|
||||
github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0=
|
||||
github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro=
|
||||
github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/kkdai/bstream v1.0.0 h1:Se5gHwgp2VT2uHfDrkbbgbgEvV9cimLELwrPJctSjg8=
|
||||
github.com/kkdai/bstream v1.0.0/go.mod h1:FDnDOHt5Yx4p3FaHcioFT0QjDOtgUpvjeZqAs+NVZZA=
|
||||
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.15.4 h1:1kn4/7MepF/CHmYub99/nNX8az0IJjfSOU/jbnTVfqQ=
|
||||
github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
|
@ -282,30 +330,29 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g=
|
||||
github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/lbryio/lbcutil v1.0.202 h1:L0aRMs2bdCUAicD8Xe4NmUEvevDDea3qkIpCSACnftI=
|
||||
github.com/lbryio/lbcutil v1.0.202/go.mod h1:LGPtVBBzh4cFXfLFb8ginlFcbA2QwumLNFd0yk/as2o=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281 h1:aczX6NMOtt6L4YT0fQvKkDK6LZEtdOso9sUH89V1+P0=
|
||||
github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281/go.mod h1:lc+czkgO/8F7puNki5jk8QyujbfK1LOT7Wl0ON2hxyk=
|
||||
github.com/lbryio/lbcd v0.22.100-beta/go.mod h1:u8SaFX4xdGMMR5xasBGfgApC8pvD4rnK2OujZnrq5gs=
|
||||
github.com/lbryio/lbcd v0.22.100-beta-rc5/go.mod h1:9PbFSlHYX7WlnDQwcTxHVf1W35VAnRsattCSyKOO55g=
|
||||
github.com/lbryio/lbcutil v1.0.201/go.mod h1:gDHc/b+Rdz3J7+VB8e5/Bl9roVf8Q5/8FQCyuK9dXD0=
|
||||
github.com/lbryio/lbcutil v1.0.202-rc3 h1:J7zYnIj3iN/ndPYKqMKBukLaLM1GhCEaiaMOYIMdUCU=
|
||||
github.com/lbryio/lbcutil v1.0.202-rc3/go.mod h1:LGPtVBBzh4cFXfLFb8ginlFcbA2QwumLNFd0yk/as2o=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
|
||||
github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
|
||||
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
|
||||
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
|
@ -323,25 +370,47 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
|
|||
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
||||
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
||||
github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM=
|
||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||
github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
|
||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
|
@ -349,55 +418,74 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
|||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI=
|
||||
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
|
||||
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
|
||||
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shirou/gopsutil/v3 v3.22.4 h1:srAQaiX6jX/cYL6q29aE0m8lOskT9CurZ9N61YR3yoI=
|
||||
github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM=
|
||||
github.com/shirou/gopsutil/v3 v3.21.7 h1:PnTqQamUjwEDSgn+nBGu0qSDV/CfvyiR/gwTH3i7HTU=
|
||||
github.com/shirou/gopsutil/v3 v3.21.7/go.mod h1:RGl11Y7XMTQPmHh8F0ayC6haKNBgH4PXMJuTAcMOlz4=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
|
||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
|
@ -405,27 +493,25 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
|||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
|
||||
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
|
||||
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
|
||||
github.com/tklauser/numcpus v0.5.0 h1:ooe7gN0fg6myJ0EKoTAf5hebTZrH52px3New/D9iJ+A=
|
||||
github.com/tklauser/numcpus v0.5.0/go.mod h1:OGzpTxpcIMNGYQdit2BYL1pvk/dSOaJWjKoflh+RQjo=
|
||||
github.com/tklauser/go-sysconf v0.3.7 h1:HT7h4+536gjqeq1ZIJPgOl1rg1XFatQGVZWp7Py53eg=
|
||||
github.com/tklauser/go-sysconf v0.3.7/go.mod h1:JZIdXh4RmBvZDBZ41ld2bGxRV3n4daiiqA3skYhAoQ4=
|
||||
github.com/tklauser/numcpus v0.2.3 h1:nQ0QYpiritP6ViFhrKYsiv6VVxOpum2Gks5GhnJbS/8=
|
||||
github.com/tklauser/numcpus v0.2.3/go.mod h1:vpEPS/JC+oZGGQ/My/vJnNsvMDQL6PwOqt8dsCw5j+E=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.2 h1:MsXyN2rqdM8NM0lLiIpTn610e8Zcoj8ZuHxsMOi9qhI=
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.2/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
|
||||
|
@ -443,14 +529,34 @@ github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZ
|
|||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
|
||||
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
|
||||
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
|
||||
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
|
||||
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
|
@ -459,20 +565,23 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898 h1:SLP7Q4Di66FONjDJbCYrCRrh97focO6sLogHO7/g8F0=
|
||||
golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b h1:QAqMVf3pSa6eeTsuklijukjXBlj7Es2QQplab+/RbQ4=
|
||||
golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
|
||||
golang.org/x/exp v0.0.0-20220518171630-0b5c67f07fdf h1:oXVg4h2qJDd9htKxb5SCpFBHLipW6hXmL3qpUixS2jw=
|
||||
golang.org/x/exp v0.0.0-20220518171630-0b5c67f07fdf/go.mod h1:yh0Ynu2b5ZUe3MQfp2nM0ecK7wsgouWTDN0FNeJuIys=
|
||||
golang.org/x/exp v0.0.0-20211123021643-48cbe7f80d7c h1:hp+QRBz/P/780ndA1Rv/UpvsR6AsVmOMGYitxgZ1PPA=
|
||||
golang.org/x/exp v0.0.0-20211123021643-48cbe7f80d7c/go.mod h1:b9TAUYHmRtqA6klRHApnXMnj+OyLce4yF5cZCUbk2ps=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -485,13 +594,18 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
|
|||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -500,6 +614,7 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r
|
|||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
|
@ -507,17 +622,20 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
|||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -537,45 +655,45 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210909193231-528a39cd75f3/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211123173158-ef496fb156ab h1:rfJ1bsoJQQIAoAxTxB7bme+vHrNkRw8CqfsYh9w54cw=
|
||||
golang.org/x/sys v0.0.0-20211123173158-ef496fb156ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -585,10 +703,12 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -608,24 +728,32 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
|
|||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
|
@ -635,6 +763,7 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn
|
|||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
|
@ -642,11 +771,17 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx
|
|||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
|
@ -666,16 +801,18 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
|
|||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
|
||||
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
@ -683,13 +820,16 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
|
|
|
@ -282,20 +282,19 @@ func testBIP0009(t *testing.T, forkKey string, deploymentID uint32) {
|
|||
// - Assert the chain height is 0 and the state is ThresholdDefined
|
||||
// - Generate 1 fewer blocks than needed to reach the first state transition
|
||||
// - Assert chain height is expected and state is still ThresholdDefined
|
||||
//
|
||||
// - Generate 1 more block to reach the first state transition
|
||||
// - Assert chain height is expected and state moved to ThresholdStarted
|
||||
// - Generate enough blocks to reach the next state transition window, but only
|
||||
// signal support in 1 fewer than the required number to achieve
|
||||
// ThresholdLockedIn
|
||||
// - Generate enough blocks to reach the next state transition window, but only
|
||||
// signal support in 1 fewer than the required number to achieve
|
||||
// ThresholdLockedIn
|
||||
// - Assert chain height is expected and state is still ThresholdStarted
|
||||
// - Generate enough blocks to reach the next state transition window with only
|
||||
// the exact number of blocks required to achieve locked in status signalling
|
||||
// support.
|
||||
// - Generate enough blocks to reach the next state transition window with only
|
||||
// the exact number of blocks required to achieve locked in status signalling
|
||||
// support.
|
||||
// - Assert chain height is expected and state moved to ThresholdLockedIn
|
||||
// - Generate 1 fewer blocks than needed to reach the next state transition
|
||||
// - Generate 1 fewer blocks than needed to reach the next state transition
|
||||
// - Assert chain height is expected and state is still ThresholdLockedIn
|
||||
// - Generate 1 more block to reach the next state transition
|
||||
// - Generate 1 more block to reach the next state transition
|
||||
// - Assert chain height is expected and state moved to ThresholdActive
|
||||
func TestBIP0009(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
@ -310,14 +309,11 @@ func TestBIP0009(t *testing.T) {
|
|||
// Overview:
|
||||
// - Generate block 1
|
||||
// - Assert bit is NOT set (ThresholdDefined)
|
||||
//
|
||||
// - Generate enough blocks to reach first state transition
|
||||
// - Assert bit is NOT set for block prior to state transition
|
||||
// - Assert bit is set for block at state transition (ThresholdStarted)
|
||||
//
|
||||
// - Generate enough blocks to reach second state transition
|
||||
// - Assert bit is set for block at state transition (ThresholdLockedIn)
|
||||
//
|
||||
// - Generate enough blocks to reach third state transition
|
||||
// - Assert bit is set for block prior to state transition (ThresholdLockedIn)
|
||||
// - Assert bit is NOT set for block at state transition (ThresholdActive)
|
||||
|
|
|
@ -95,22 +95,17 @@ func makeTestOutput(r *rpctest.Harness, t *testing.T,
|
|||
// them.
|
||||
//
|
||||
// Overview:
|
||||
// - Pre soft-fork:
|
||||
// - Transactions with non-final lock-times from the PoV of MTP should be
|
||||
// rejected from the mempool.
|
||||
// - Transactions within non-final MTP based lock-times should be accepted
|
||||
// in valid blocks.
|
||||
//
|
||||
// - Pre soft-fork:
|
||||
//
|
||||
// - Transactions with non-final lock-times from the PoV of MTP should be
|
||||
// rejected from the mempool.
|
||||
//
|
||||
// - Transactions within non-final MTP based lock-times should be accepted
|
||||
// in valid blocks.
|
||||
//
|
||||
// - Post soft-fork:
|
||||
//
|
||||
// - Transactions with non-final lock-times from the PoV of MTP should be
|
||||
// rejected from the mempool and when found within otherwise valid blocks.
|
||||
//
|
||||
// - Transactions with final lock-times from the PoV of MTP should be
|
||||
// accepted to the mempool and mined in future block.
|
||||
// - Post soft-fork:
|
||||
// - Transactions with non-final lock-times from the PoV of MTP should be
|
||||
// rejected from the mempool and when found within otherwise valid blocks.
|
||||
// - Transactions with final lock-times from the PoV of MTP should be
|
||||
// accepted to the mempool and mined in future block.
|
||||
func TestBIP0113Activation(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -396,13 +391,13 @@ func assertTxInBlock(r *rpctest.Harness, t *testing.T, blockHash *chainhash.Hash
|
|||
// 112 and BIP 68 rule-set after the activation of the CSV-package soft-fork.
|
||||
//
|
||||
// Overview:
|
||||
// - Pre soft-fork:
|
||||
// - A transaction spending a CSV output validly should be rejected from the
|
||||
// mempool, but accepted in a valid generated block including the
|
||||
// transaction.
|
||||
// - Post soft-fork:
|
||||
// - See the cases exercised within the table driven tests towards the end
|
||||
// of this test.
|
||||
// - Pre soft-fork:
|
||||
// - A transaction spending a CSV output validly should be rejected from the
|
||||
// mempool, but accepted in a valid generated block including the
|
||||
// transaction.
|
||||
// - Post soft-fork:
|
||||
// - See the cases exercised within the table driven tests towards the end
|
||||
// of this test.
|
||||
func TestBIP0068AndBIP0112Activation(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
|
|
@ -13,17 +13,12 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/lbcd/chaincfg"
|
||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||
"github.com/lbryio/lbcd/integration/rpctest"
|
||||
"github.com/lbryio/lbcd/rpcclient"
|
||||
"github.com/lbryio/lbcd/txscript"
|
||||
"github.com/lbryio/lbcd/wire"
|
||||
"github.com/lbryio/lbcutil"
|
||||
)
|
||||
|
||||
func testGetBestBlock(r *rpctest.Harness, t *testing.T) {
|
||||
|
@ -138,278 +133,13 @@ func testBulkClient(r *rpctest.Harness, t *testing.T) {
|
|||
t.Fatalf("expected hash %s to be in generated hash list", blockHash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testGetBlockStats(r *rpctest.Harness, t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
baseFeeRate := int64(10)
|
||||
txValue := int64(50000000)
|
||||
txQuantity := 10
|
||||
txs := make([]*lbcutil.Tx, txQuantity)
|
||||
fees := make([]int64, txQuantity)
|
||||
sizes := make([]int64, txQuantity)
|
||||
feeRates := make([]int64, txQuantity)
|
||||
var outputCount int
|
||||
|
||||
// Generate test sample.
|
||||
for i := 0; i < txQuantity; i++ {
|
||||
address, err := r.NewAddress()
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to generate address: %v", err)
|
||||
}
|
||||
|
||||
pkScript, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to generate PKScript: %v", err)
|
||||
}
|
||||
|
||||
// This feerate is not the actual feerate. See comment below.
|
||||
feeRate := baseFeeRate * int64(i)
|
||||
|
||||
tx, err := r.CreateTransaction([]*wire.TxOut{wire.NewTxOut(txValue, pkScript)}, lbcutil.Amount(feeRate), true)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to generate segwit transaction: %v", err)
|
||||
}
|
||||
|
||||
txs[i] = lbcutil.NewTx(tx)
|
||||
sizes[i] = int64(tx.SerializeSize())
|
||||
|
||||
// memWallet.fundTx makes some assumptions when calculating fees.
|
||||
// For instance, it assumes the signature script has exactly 108 bytes
|
||||
// and it does not account for the size of the change output.
|
||||
// This needs to be taken into account when getting the true feerate.
|
||||
scriptSigOffset := 108 - len(tx.TxIn[0].SignatureScript)
|
||||
changeOutputSize := tx.TxOut[len(tx.TxOut)-1].SerializeSize()
|
||||
fees[i] = (sizes[i] + int64(scriptSigOffset) - int64(changeOutputSize)) * feeRate
|
||||
feeRates[i] = fees[i] / sizes[i]
|
||||
|
||||
outputCount += len(tx.TxOut)
|
||||
}
|
||||
|
||||
stats := func(slice []int64) (int64, int64, int64, int64, int64) {
|
||||
var total, average, min, max, median int64
|
||||
min = slice[0]
|
||||
length := len(slice)
|
||||
for _, item := range slice {
|
||||
if min > item {
|
||||
min = item
|
||||
}
|
||||
if max < item {
|
||||
max = item
|
||||
}
|
||||
total += item
|
||||
}
|
||||
average = total / int64(length)
|
||||
sort.Slice(slice, func(i, j int) bool { return slice[i] < slice[j] })
|
||||
if length == 0 {
|
||||
median = 0
|
||||
} else if length%2 == 0 {
|
||||
median = (slice[length/2-1] + slice[length/2]) / 2
|
||||
} else {
|
||||
median = slice[length/2]
|
||||
}
|
||||
return total, average, min, max, median
|
||||
}
|
||||
|
||||
totalFee, avgFee, minFee, maxFee, medianFee := stats(fees)
|
||||
totalSize, avgSize, minSize, maxSize, medianSize := stats(sizes)
|
||||
_, avgFeeRate, minFeeRate, maxFeeRate, _ := stats(feeRates)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
txs []*lbcutil.Tx
|
||||
stats []string
|
||||
expectedResults map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: "empty block",
|
||||
txs: []*lbcutil.Tx{},
|
||||
stats: []string{},
|
||||
expectedResults: map[string]interface{}{
|
||||
"avgfee": int64(0),
|
||||
"avgfeerate": int64(0),
|
||||
"avgtxsize": int64(0),
|
||||
"feerate_percentiles": []int64{0, 0, 0, 0, 0},
|
||||
"ins": int64(0),
|
||||
"maxfee": int64(0),
|
||||
"maxfeerate": int64(0),
|
||||
"maxtxsize": int64(0),
|
||||
"medianfee": int64(0),
|
||||
"mediantxsize": int64(0),
|
||||
"minfee": int64(0),
|
||||
"mintxsize": int64(0),
|
||||
"outs": int64(1),
|
||||
"swtotal_size": int64(0),
|
||||
"swtotal_weight": int64(0),
|
||||
"swtxs": int64(0),
|
||||
"total_out": int64(0),
|
||||
"total_size": int64(0),
|
||||
"total_weight": int64(0),
|
||||
"txs": int64(1),
|
||||
"utxo_increase": int64(1),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block with 10 transactions + coinbase",
|
||||
txs: txs,
|
||||
stats: []string{"avgfee", "avgfeerate", "avgtxsize", "feerate_percentiles",
|
||||
"ins", "maxfee", "maxfeerate", "maxtxsize", "medianfee", "mediantxsize",
|
||||
"minfee", "minfeerate", "mintxsize", "outs", "subsidy", "swtxs",
|
||||
"total_size", "total_weight", "totalfee", "txs", "utxo_increase"},
|
||||
expectedResults: map[string]interface{}{
|
||||
"avgfee": avgFee,
|
||||
"avgfeerate": avgFeeRate,
|
||||
"avgtxsize": avgSize,
|
||||
"feerate_percentiles": []int64{feeRates[0], feeRates[2],
|
||||
feeRates[4], feeRates[7], feeRates[8]},
|
||||
"ins": int64(txQuantity),
|
||||
"maxfee": maxFee,
|
||||
"maxfeerate": maxFeeRate,
|
||||
"maxtxsize": maxSize,
|
||||
"medianfee": medianFee,
|
||||
"mediantxsize": medianSize,
|
||||
"minfee": minFee,
|
||||
"minfeerate": minFeeRate,
|
||||
"mintxsize": minSize,
|
||||
"outs": int64(outputCount + 1), // Coinbase output also counts.
|
||||
"subsidy": int64(100000000),
|
||||
"swtotal_weight": nil, // This stat was not selected, so it should be nil.
|
||||
"swtxs": int64(0),
|
||||
"total_size": totalSize,
|
||||
"total_weight": totalSize * 4,
|
||||
"totalfee": totalFee,
|
||||
"txs": int64(txQuantity + 1), // Coinbase transaction also counts.
|
||||
"utxo_increase": int64(outputCount + 1 - txQuantity),
|
||||
"utxo_size_inc": nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
// Submit a new block with the provided transactions.
|
||||
block, err := r.GenerateAndSubmitBlock(test.txs, -1, time.Time{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to generate block: %v from test %s", err, test.name)
|
||||
}
|
||||
|
||||
blockStats, err := r.GetBlockStats(block.Hash(), &test.stats)
|
||||
if err != nil {
|
||||
t.Fatalf("Call to `getblockstats` on test %s failed: %v", test.name, err)
|
||||
}
|
||||
|
||||
if blockStats.Height != (*int64)(nil) && *blockStats.Height != int64(block.Height()) {
|
||||
t.Fatalf("Unexpected result in test %s, stat: %v, expected: %v, got: %v", test.name, "height", block.Height(), *blockStats.Height)
|
||||
}
|
||||
|
||||
for stat, value := range test.expectedResults {
|
||||
var result interface{}
|
||||
switch stat {
|
||||
case "avgfee":
|
||||
result = blockStats.AverageFee
|
||||
case "avgfeerate":
|
||||
result = blockStats.AverageFeeRate
|
||||
case "avgtxsize":
|
||||
result = blockStats.AverageTxSize
|
||||
case "feerate_percentiles":
|
||||
result = blockStats.FeeratePercentiles
|
||||
case "blockhash":
|
||||
result = blockStats.Hash
|
||||
case "height":
|
||||
result = blockStats.Height
|
||||
case "ins":
|
||||
result = blockStats.Ins
|
||||
case "maxfee":
|
||||
result = blockStats.MaxFee
|
||||
case "maxfeerate":
|
||||
result = blockStats.MaxFeeRate
|
||||
case "maxtxsize":
|
||||
result = blockStats.MaxTxSize
|
||||
case "medianfee":
|
||||
result = blockStats.MedianFee
|
||||
case "mediantime":
|
||||
result = blockStats.MedianTime
|
||||
case "mediantxsize":
|
||||
result = blockStats.MedianTxSize
|
||||
case "minfee":
|
||||
result = blockStats.MinFee
|
||||
case "minfeerate":
|
||||
result = blockStats.MinFeeRate
|
||||
case "mintxsize":
|
||||
result = blockStats.MinTxSize
|
||||
case "outs":
|
||||
result = blockStats.Outs
|
||||
case "swtotal_size":
|
||||
result = blockStats.SegWitTotalSize
|
||||
case "swtotal_weight":
|
||||
result = blockStats.SegWitTotalWeight
|
||||
case "swtxs":
|
||||
result = blockStats.SegWitTxs
|
||||
case "subsidy":
|
||||
result = blockStats.Subsidy
|
||||
case "time":
|
||||
result = blockStats.Time
|
||||
case "total_out":
|
||||
result = blockStats.TotalOut
|
||||
case "total_size":
|
||||
result = blockStats.TotalSize
|
||||
case "total_weight":
|
||||
result = blockStats.TotalWeight
|
||||
case "totalfee":
|
||||
result = blockStats.TotalFee
|
||||
case "txs":
|
||||
result = blockStats.Txs
|
||||
case "utxo_increase":
|
||||
result = blockStats.UTXOIncrease
|
||||
case "utxo_size_inc":
|
||||
result = blockStats.UTXOSizeIncrease
|
||||
}
|
||||
|
||||
var equality bool
|
||||
|
||||
// Check for nil equality.
|
||||
if value == nil && result == (*int64)(nil) {
|
||||
equality = true
|
||||
break
|
||||
} else if result == nil || value == nil {
|
||||
equality = false
|
||||
}
|
||||
|
||||
var resultValue interface{}
|
||||
switch v := value.(type) {
|
||||
case int64:
|
||||
resultValue = *result.(*int64)
|
||||
equality = v == resultValue
|
||||
case string:
|
||||
resultValue = *result.(*string)
|
||||
equality = v == resultValue
|
||||
case []int64:
|
||||
resultValue = *result.(*[]int64)
|
||||
resultSlice := resultValue.([]int64)
|
||||
equality = true
|
||||
for i, item := range resultSlice {
|
||||
if item != v[i] {
|
||||
equality = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !equality {
|
||||
if result != nil {
|
||||
t.Fatalf("Unexpected result in test %s, stat: %v, expected: %v, got: %v", test.name, stat, value, resultValue)
|
||||
} else {
|
||||
t.Fatalf("Unexpected result in test %s, stat: %v, expected: %v, got: %v", test.name, stat, value, "<nil>")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var rpcTestCases = []rpctest.HarnessTestCase{
|
||||
testGetBestBlock,
|
||||
testGetBlockCount,
|
||||
testGetBlockHash,
|
||||
testGetBlockStats,
|
||||
testBulkClient,
|
||||
}
|
||||
|
||||
|
@ -421,8 +151,7 @@ func TestMain(m *testing.M) {
|
|||
// In order to properly test scenarios on as if we were on mainnet,
|
||||
// ensure that non-standard transactions aren't accepted into the
|
||||
// mempool or relayed.
|
||||
// Enable transaction index to be able to fully test GetBlockStats
|
||||
btcdCfg := []string{"--rejectnonstd", "--txindex"}
|
||||
btcdCfg := []string{"--rejectnonstd"}
|
||||
primaryHarness, err = rpctest.New(
|
||||
&chaincfg.SimNetParams, nil, btcdCfg, "",
|
||||
)
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/lbcd/btcjson"
|
||||
"github.com/lbryio/lbcd/chaincfg"
|
||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||
"github.com/lbryio/lbcd/rpcclient"
|
||||
|
@ -513,18 +512,6 @@ func (h *Harness) GenerateAndSubmitBlockWithCustomCoinbaseOutputs(
|
|||
return newBlock, nil
|
||||
}
|
||||
|
||||
// GetBlockStats returns block statistics. First argument specifies height or
|
||||
// hash of the target block. Second argument allows to select certain stats to
|
||||
// return. If second argument is empty, all stats are returned.
|
||||
func (h *Harness) GetBlockStats(hashOrHeight interface{}, stats *[]string) (
|
||||
*btcjson.GetBlockStatsResult, error) {
|
||||
|
||||
h.Lock()
|
||||
defer h.Unlock()
|
||||
|
||||
return h.Client.GetBlockStats(hashOrHeight, stats)
|
||||
}
|
||||
|
||||
// generateListeningAddresses returns two strings representing listening
|
||||
// addresses designated for the current rpc test. If there haven't been any
|
||||
// test instances created, the default ports are used. Otherwise, in order to
|
||||
|
|
26
lbcd.go
26
lbcd.go
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/lbryio/lbcd/claimtrie/param"
|
||||
"github.com/lbryio/lbcd/database"
|
||||
"github.com/lbryio/lbcd/limits"
|
||||
"github.com/lbryio/lbcd/version"
|
||||
|
||||
"github.com/felixge/fgprof"
|
||||
)
|
||||
|
@ -65,7 +64,7 @@ func btcdMain(serverChan chan<- *server) error {
|
|||
defer btcdLog.Info("Shutdown complete")
|
||||
|
||||
// Show version at startup.
|
||||
btcdLog.Infof("Version %s", version.Full())
|
||||
btcdLog.Infof("Version %s", version())
|
||||
|
||||
// Enable http profiling server if requested.
|
||||
if cfg.Profile != "" {
|
||||
|
@ -92,25 +91,6 @@ func btcdMain(serverChan chan<- *server) error {
|
|||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
// Write memory profile if requested.
|
||||
if cfg.MemProfile != "" {
|
||||
f, err := os.Create(cfg.MemProfile + ".heap")
|
||||
if err != nil {
|
||||
btcdLog.Errorf("Unable to create mem profile: %v", err)
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
defer pprof.Lookup("heap").WriteTo(f, 0)
|
||||
|
||||
f, err = os.Create(cfg.MemProfile + ".allocs")
|
||||
if err != nil {
|
||||
btcdLog.Errorf("Unable to create mem profile: %v", err)
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
defer pprof.Lookup("allocs").WriteTo(f, 0)
|
||||
}
|
||||
|
||||
// Perform upgrades to btcd as new versions require it.
|
||||
if err := doUpgrades(); err != nil {
|
||||
btcdLog.Errorf("%v", err)
|
||||
|
@ -300,9 +280,7 @@ func main() {
|
|||
// limits the garbage collector from excessively overallocating during
|
||||
// bursts. This value was arrived at with the help of profiling live
|
||||
// usage.
|
||||
if _, ok := os.LookupEnv("GOGC"); !ok {
|
||||
debug.SetGCPercent(10)
|
||||
}
|
||||
debug.SetGCPercent(10)
|
||||
|
||||
// Up some limits.
|
||||
if err := limits.SetLimits(); err != nil {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue