Compare commits
130 commits
v0.2021.07
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
d2193e980a | ||
|
711c4b4b7b | ||
|
317cdf7129 | ||
|
e070e8a51e | ||
|
cd7f20a461 | ||
|
b85556499f | ||
|
8eb7841600 | ||
|
6d4b9b5e37 | ||
|
537b8c7ddd | ||
|
8fb3db8136 | ||
|
979d0d16b6 | ||
|
7d24ff82bf | ||
|
86a287ec69 | ||
|
891f63fb5c | ||
|
789974227f | ||
|
71e79c553e | ||
|
b298454727 | ||
|
8c8871b4d2 | ||
|
20e32437e9 | ||
|
d0d6145f9d | ||
|
90afae7cd5 | ||
|
f5b8f2ce0d | ||
|
321bcf6420 | ||
|
293a3f685e | ||
|
f55a5ed777 | ||
|
50f7e91ead | ||
|
fe18c70bf7 | ||
|
9403d84a83 | ||
|
09fd939b60 | ||
|
dc9b4ada2a | ||
|
c38134b645 | ||
|
d025ea1616 | ||
|
5b690ff2ff | ||
|
aa16207aa5 | ||
|
78b9a625eb | ||
|
e46ac7c913 | ||
|
8ac89195db | ||
|
4e11433325 | ||
|
9d9c73f97f | ||
|
cbdcc5faeb | ||
|
3a53f46114 | ||
|
071aa2a7ad | ||
|
b018217899 | ||
|
13479794ed | ||
|
ab2a39ac6c | ||
|
ffa426b899 | ||
|
b9f7d595bd | ||
|
49e5d7b8c1 | ||
|
8b4b27bdb9 | ||
|
b2dd70bc7c | ||
|
7fcb4a750d | ||
|
3dfe5a5bc3 | ||
|
28a299efa8 | ||
|
ff0694def1 | ||
|
78f35cc96c | ||
|
d51f5ca064 | ||
|
9e2a39e938 | ||
|
8e99bc56d0 | ||
|
447e931f18 | ||
|
516b95d96b | ||
|
2a1d6fa7d4 | ||
|
2e52c1639c | ||
|
1c1d288654 | ||
|
283686ecac | ||
|
ca0e8562f3 | ||
|
72ea236d86 | ||
|
0a1ba43d66 | ||
|
4e6b47c2a3 | ||
|
ee5fcaef14 | ||
|
159f4b941b | ||
|
355eab682c | ||
|
395e1db489 | ||
|
602292281c | ||
|
d511b08736 | ||
|
2537a84b86 | ||
|
67b3dce492 | ||
|
284f825d22 | ||
|
15614c6a5b | ||
|
c61c8db92a | ||
|
2ee8d2c3cc | ||
|
914d2bfc61 | ||
|
a67f9b43d1 | ||
|
58a44bfce3 | ||
|
5387aeeebe | ||
|
45e9817ced | ||
|
a27b6e730d | ||
|
02dbea4775 | ||
|
e12c26fe20 | ||
|
f0369e12e7 | ||
|
d69af2db8a | ||
|
a9357d3ba5 | ||
|
8084a15598 | ||
|
e043ca89c9 | ||
|
0d51cbfde4 | ||
|
a6d47e662a | ||
|
220a42984e | ||
|
a0de5164ba | ||
|
0d59480f3c | ||
|
36b4a3cdd9 | ||
|
076adcca39 | ||
|
4839f4aa8a | ||
|
777ee5ee52 | ||
|
d99f993c21 | ||
|
bdcb15b7c5 | ||
|
e0141ae5ca | ||
|
4f7d3b20c8 | ||
|
adb5f189d7 | ||
|
fdbd7ec509 | ||
|
81c3de3bfd | ||
|
9c89907bb8 | ||
|
fc57c177e2 | ||
|
7c9d773ff0 | ||
|
915810739c | ||
|
aa2fd9dd10 | ||
|
64870c54bc | ||
|
031c3bfe4e | ||
|
d93fe2ee8d | ||
|
8a2a98726d | ||
|
b4782ce6ac | ||
|
d71e91c58e | ||
|
9edba6109f | ||
|
7a8e8b9b47 | ||
|
27598d628b | ||
|
d45f215789 | ||
|
fb161ee6f5 | ||
|
80ab0e7cd3 | ||
|
b857eb38b2 | ||
|
87b3db5d98 | ||
|
5757edfb9e | ||
|
d2d17bee3b |
137 changed files with 34798 additions and 1196 deletions
12
.github/workflows/build-short.yml
vendored
Normal file
12
.github/workflows/build-short.yml
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
name: 'Build and Test Hub'
|
||||
|
||||
on: ["push", "pull_request"]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Build and Test
|
||||
uses: ./
|
4
.gitignore
vendored
Normal file
4
.gitignore
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
.idea/
|
||||
.vscode/
|
||||
.DS_Store
|
||||
.venv
|
|
@ -1,5 +0,0 @@
|
|||
FROM debian:10-slim
|
||||
|
||||
EXPOSE 50051
|
||||
COPY ./hub /hub
|
||||
ENTRYPOINT ["/hub", "serve"]
|
11
action.yml
11
action.yml
|
@ -1,6 +1,5 @@
|
|||
# action.yml
|
||||
name: 'Hub'
|
||||
description: 'Run go hub'
|
||||
runs:
|
||||
using: 'docker'
|
||||
image: 'dev.dockerfile'
|
||||
name: 'Build and Test'
|
||||
description: 'Build and test hub'
|
||||
runs:
|
||||
using: 'docker'
|
||||
image: 'jeffreypicard/hub-github-action:dev'
|
||||
|
|
4
build.sh
4
build.sh
|
@ -1,4 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
go build .
|
||||
sudo docker build . -t lbry/hub:latest
|
1157
db/db_get.go
Normal file
1157
db/db_get.go
Normal file
File diff suppressed because it is too large
Load diff
576
db/db_resolve.go
Normal file
576
db/db_resolve.go
Normal file
|
@ -0,0 +1,576 @@
|
|||
package db
|
||||
|
||||
// db_resolve.go contains functions relevant to resolving a claim.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/lbryio/herald.go/db/prefixes"
|
||||
"github.com/lbryio/herald.go/db/stack"
|
||||
"github.com/lbryio/herald.go/internal"
|
||||
pb "github.com/lbryio/herald.go/protobuf/go"
|
||||
lbryurl "github.com/lbryio/lbry.go/v3/url"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// PrepareResolveResult prepares a ResolveResult to return
|
||||
func PrepareResolveResult(
|
||||
db *ReadOnlyDBColumnFamily,
|
||||
txNum uint32,
|
||||
position uint16,
|
||||
claimHash []byte,
|
||||
name string,
|
||||
rootTxNum uint32,
|
||||
rootPosition uint16,
|
||||
activationHeight uint32,
|
||||
signatureValid bool) (*ResolveResult, error) {
|
||||
|
||||
normalizedName := internal.NormalizeName(name)
|
||||
controllingClaim, err := db.GetControllingClaim(normalizedName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txHash, err := db.GetTxHash(txNum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
heights := stack.BisectRight(db.TxCounts, []uint32{txNum, rootTxNum})
|
||||
height, createdHeight := heights[0], heights[1]
|
||||
lastTakeoverHeight := controllingClaim.Height
|
||||
|
||||
expirationHeight := GetExpirationHeight(height)
|
||||
|
||||
supportAmount, err := db.GetSupportAmount(claimHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
claimToTxo, err := db.GetCachedClaimTxo(claimHash, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
claimAmount := claimToTxo.Amount
|
||||
|
||||
effectiveAmount, err := db.GetEffectiveAmount(claimHash, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
channelHash, err := db.GetChannelForClaim(claimHash, txNum, position)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repostedClaimHash, err := db.GetRepost(claimHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var repostTxHash []byte
|
||||
var repostTxPostition uint16
|
||||
var repostHeight uint32
|
||||
|
||||
if repostedClaimHash != nil {
|
||||
repostTxo, err := db.GetCachedClaimTxo(repostedClaimHash, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if repostTxo != nil {
|
||||
repostTxHash, err = db.GetTxHash(repostTxo.TxNum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
repostTxPostition = repostTxo.Position
|
||||
repostHeight = stack.BisectRight(db.TxCounts, []uint32{repostTxo.TxNum})[0]
|
||||
}
|
||||
}
|
||||
|
||||
shortUrl, err := db.GetShortClaimIdUrl(name, normalizedName, claimHash, txNum, rootPosition)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var canonicalUrl string = shortUrl
|
||||
claimsInChannel, err := db.GetClaimsInChannelCount(claimHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var channelTxHash []byte
|
||||
var channelTxPostition uint16
|
||||
var channelHeight uint32
|
||||
|
||||
if channelHash != nil {
|
||||
// Ignore error because we already have this set if this doesn't work
|
||||
channelVals, _ := db.GetCachedClaimTxo(channelHash, true)
|
||||
log.Printf("channelVals: %#v\n", channelVals)
|
||||
if channelVals != nil {
|
||||
channelShortUrl, _ := db.GetShortClaimIdUrl(
|
||||
channelVals.Name,
|
||||
channelVals.NormalizedName(),
|
||||
channelHash, channelVals.RootTxNum,
|
||||
channelVals.RootPosition,
|
||||
)
|
||||
canonicalUrl = fmt.Sprintf("%s/%s", channelShortUrl, shortUrl)
|
||||
channelTxHash, err = db.GetTxHash(channelVals.TxNum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
channelTxPostition = channelVals.Position
|
||||
channelHeight = stack.BisectRight(db.TxCounts, []uint32{channelVals.TxNum})[0]
|
||||
}
|
||||
}
|
||||
|
||||
reposted, err := db.GetRepostedCount(claimHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isControlling := bytes.Equal(controllingClaim.ClaimHash, claimHash)
|
||||
|
||||
return &ResolveResult{
|
||||
Name: name,
|
||||
NormalizedName: normalizedName,
|
||||
ClaimHash: claimHash,
|
||||
TxNum: txNum,
|
||||
Position: position,
|
||||
TxHash: txHash,
|
||||
Height: height,
|
||||
Amount: claimAmount,
|
||||
ShortUrl: shortUrl,
|
||||
IsControlling: isControlling,
|
||||
CanonicalUrl: canonicalUrl,
|
||||
CreationHeight: createdHeight,
|
||||
ActivationHeight: activationHeight,
|
||||
ExpirationHeight: expirationHeight,
|
||||
EffectiveAmount: effectiveAmount,
|
||||
SupportAmount: supportAmount,
|
||||
Reposted: reposted,
|
||||
LastTakeoverHeight: lastTakeoverHeight,
|
||||
ClaimsInChannel: claimsInChannel,
|
||||
ChannelHash: channelHash,
|
||||
RepostedClaimHash: repostedClaimHash,
|
||||
SignatureValid: signatureValid,
|
||||
RepostTxHash: repostTxHash,
|
||||
RepostTxPostition: repostTxPostition,
|
||||
RepostHeight: repostHeight,
|
||||
ChannelTxHash: channelTxHash,
|
||||
ChannelTxPostition: channelTxPostition,
|
||||
ChannelHeight: channelHeight,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (db *ReadOnlyDBColumnFamily) ResolveParsedUrl(parsed *PathSegment) (*ResolveResult, error) {
|
||||
normalizedName := internal.NormalizeName(parsed.name)
|
||||
if (parsed.amountOrder == -1 && parsed.claimId == "") || parsed.amountOrder == 1 {
|
||||
log.Warn("Resolving claim by name")
|
||||
ch := db.ControllingClaimIter()
|
||||
for kv := range ch {
|
||||
key := kv.Key.(*prefixes.ClaimTakeoverKey)
|
||||
val := kv.Value.(*prefixes.ClaimTakeoverValue)
|
||||
log.Tracef("ClaimTakeoverKey: %#v", key)
|
||||
log.Tracef("ClaimTakeoverValue: %#v", val)
|
||||
}
|
||||
controlling, err := db.GetControllingClaim(normalizedName)
|
||||
log.Warnf("controlling: %#v", controlling)
|
||||
log.Warnf("err: %#v", err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if controlling == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return db.FsGetClaimByHash(controlling.ClaimHash)
|
||||
}
|
||||
|
||||
var amountOrder int = int(math.Max(float64(parsed.amountOrder), 1))
|
||||
|
||||
log.Println("amountOrder:", amountOrder)
|
||||
|
||||
// Resolve by claimId
|
||||
if parsed.claimId != "" {
|
||||
if len(parsed.claimId) == 40 {
|
||||
claimHash, err := hex.DecodeString(parsed.claimId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Maybe don't use caching version, when I actually implement the cache
|
||||
claimTxo, err := db.GetCachedClaimTxo(claimHash, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if claimTxo == nil || claimTxo.NormalizedName() != normalizedName {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
activation, err := db.GetActivation(claimTxo.TxNum, claimTxo.Position)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Warn("claimTxo.ChannelSignatureIsValid:", claimTxo.ChannelSignatureIsValid)
|
||||
|
||||
return PrepareResolveResult(
|
||||
db,
|
||||
claimTxo.TxNum,
|
||||
claimTxo.Position,
|
||||
claimHash,
|
||||
claimTxo.Name,
|
||||
claimTxo.RootTxNum,
|
||||
claimTxo.RootPosition,
|
||||
activation,
|
||||
claimTxo.ChannelSignatureIsValid,
|
||||
)
|
||||
}
|
||||
log.Println("nomalizedName:", normalizedName)
|
||||
log.Println("claimId:", parsed.claimId)
|
||||
// max short id length
|
||||
var j int = 10
|
||||
if len(parsed.claimId) < j {
|
||||
j = len(parsed.claimId)
|
||||
}
|
||||
|
||||
ch := db.ClaimShortIdIter(normalizedName, parsed.claimId[:j])
|
||||
row := <-ch
|
||||
if row == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
key := row.Key.(*prefixes.ClaimShortIDKey)
|
||||
claimTxo := row.Value.(*prefixes.ClaimShortIDValue)
|
||||
|
||||
fullClaimHash, err := db.GetCachedClaimHash(claimTxo.TxNum, claimTxo.Position)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := db.GetCachedClaimTxo(fullClaimHash.ClaimHash, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nonNormalizedName := c.Name
|
||||
signatureIsValid := c.ChannelSignatureIsValid
|
||||
activation, err := db.GetActivation(claimTxo.TxNum, claimTxo.Position)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Warn("signatureIsValid:", signatureIsValid)
|
||||
|
||||
return PrepareResolveResult(
|
||||
db,
|
||||
claimTxo.TxNum,
|
||||
claimTxo.Position,
|
||||
fullClaimHash.ClaimHash,
|
||||
nonNormalizedName,
|
||||
key.RootTxNum,
|
||||
key.RootPosition,
|
||||
activation,
|
||||
signatureIsValid,
|
||||
)
|
||||
}
|
||||
|
||||
// Resolve by amount ordering
|
||||
log.Warn("resolving by amount ordering")
|
||||
ch := db.BidOrderNameIter(normalizedName)
|
||||
var i = 0
|
||||
for kv := range ch {
|
||||
if i+1 < amountOrder {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
key := kv.Key.(*prefixes.BidOrderKey)
|
||||
claimVal := kv.Value.(*prefixes.BidOrderValue)
|
||||
claimTxo, err := db.GetCachedClaimTxo(claimVal.ClaimHash, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
activation, err := db.GetActivation(key.TxNum, key.Position)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return PrepareResolveResult(
|
||||
db,
|
||||
key.TxNum,
|
||||
key.Position,
|
||||
claimVal.ClaimHash,
|
||||
key.NormalizedName,
|
||||
claimTxo.RootTxNum,
|
||||
claimTxo.RootPosition,
|
||||
activation,
|
||||
claimTxo.ChannelSignatureIsValid,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (db *ReadOnlyDBColumnFamily) ResolveClaimInChannel(channelHash []byte, normalizedName string) (*ResolveResult, error) {
|
||||
handle, err := db.EnsureHandle(prefixes.ChannelToClaim)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key := prefixes.NewChannelToClaimKey(channelHash, normalizedName)
|
||||
rawKeyPrefix := key.PartialPack(2)
|
||||
options := NewIterateOptions().WithDB(db).WithCfHandle(handle).WithPrefix(rawKeyPrefix)
|
||||
options = options.WithIncludeValue(true) //.WithIncludeStop(true)
|
||||
ch := IterCF(db.DB, options)
|
||||
// TODO: what's a good default size for this?
|
||||
var candidates []*ResolveResult = make([]*ResolveResult, 0, 100)
|
||||
var i = 0
|
||||
for row := range ch {
|
||||
key := row.Key.(*prefixes.ChannelToClaimKey)
|
||||
stream := row.Value.(*prefixes.ChannelToClaimValue)
|
||||
effectiveAmount, err := db.GetEffectiveAmount(stream.ClaimHash, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if i == 0 || candidates[i-1].Amount == effectiveAmount {
|
||||
candidates = append(
|
||||
candidates,
|
||||
&ResolveResult{
|
||||
TxNum: key.TxNum,
|
||||
Position: key.Position,
|
||||
ClaimHash: stream.ClaimHash,
|
||||
Amount: effectiveAmount,
|
||||
ChannelHash: channelHash,
|
||||
NormalizedName: normalizedName,
|
||||
},
|
||||
)
|
||||
i++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
log.Printf("candidates: %#v\n", candidates)
|
||||
if len(candidates) == 0 {
|
||||
return nil, nil
|
||||
} else {
|
||||
// return list(sorted(candidates, key=lambda item: item[1]))[0]
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
return candidates[i].Amount < candidates[j].Amount
|
||||
})
|
||||
return candidates[0], nil
|
||||
}
|
||||
}
|
||||
|
||||
func (db *ReadOnlyDBColumnFamily) Resolve(url string) *ExpandedResolveResult {
|
||||
var res = NewExpandedResolveResult()
|
||||
|
||||
var channel *PathSegment = nil
|
||||
var stream *PathSegment = nil
|
||||
parsed, err := lbryurl.Parse(url, false)
|
||||
|
||||
log.Warnf("parsed: %#v", parsed)
|
||||
|
||||
if err != nil {
|
||||
log.Warn("lbryurl.Parse:", err)
|
||||
res.Stream = &optionalResolveResultOrError{
|
||||
err: &ResolveError{Error: err},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// has stream in channel
|
||||
if strings.Compare(parsed.StreamName, "") != 0 && strings.Compare(parsed.ChannelName, "") != 0 {
|
||||
channel = &PathSegment{
|
||||
name: parsed.ClaimName,
|
||||
claimId: parsed.ChannelClaimId,
|
||||
amountOrder: parsed.PrimaryBidPosition,
|
||||
}
|
||||
stream = &PathSegment{
|
||||
name: parsed.StreamName,
|
||||
claimId: parsed.StreamClaimId,
|
||||
amountOrder: parsed.SecondaryBidPosition,
|
||||
}
|
||||
} else if parsed.IsChannelUrl() {
|
||||
channel = &PathSegment{
|
||||
name: parsed.ClaimName,
|
||||
claimId: parsed.ChannelClaimId,
|
||||
amountOrder: parsed.PrimaryBidPosition,
|
||||
}
|
||||
} else if strings.Compare(parsed.StreamName, "") != 0 {
|
||||
stream = &PathSegment{
|
||||
name: parsed.StreamName,
|
||||
claimId: parsed.StreamClaimId,
|
||||
amountOrder: parsed.PrimaryBidPosition,
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("channel: %#v\n", channel)
|
||||
log.Printf("stream: %#v\n", stream)
|
||||
|
||||
var resolvedChannel *ResolveResult = nil
|
||||
var resolvedStream *ResolveResult = nil
|
||||
if channel != nil {
|
||||
resolvedChannel, err = db.ResolveParsedUrl(channel)
|
||||
if err != nil {
|
||||
res.Channel = &optionalResolveResultOrError{
|
||||
err: &ResolveError{Error: err},
|
||||
}
|
||||
return res
|
||||
} else if resolvedChannel == nil {
|
||||
res.Channel = &optionalResolveResultOrError{
|
||||
err: &ResolveError{
|
||||
Error: fmt.Errorf("Could not find claim at \"%s\".", url),
|
||||
ErrorType: uint8(pb.Error_NOT_FOUND),
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
}
|
||||
if resolvedChannel != nil {
|
||||
log.Printf("resolvedChannel: %#v\n", resolvedChannel)
|
||||
log.Printf("resolvedChannel.TxHash: %s\n", hex.EncodeToString(resolvedChannel.TxHash))
|
||||
log.Printf("resolvedChannel.ClaimHash: %s\n", hex.EncodeToString(resolvedChannel.ClaimHash))
|
||||
log.Printf("resolvedChannel.ChannelHash: %s\n", hex.EncodeToString(resolvedChannel.ChannelHash))
|
||||
}
|
||||
if stream != nil {
|
||||
if resolvedChannel != nil {
|
||||
streamClaim, err := db.ResolveClaimInChannel(resolvedChannel.ClaimHash, stream.Normalized())
|
||||
log.Printf("streamClaim %#v\n", streamClaim)
|
||||
if streamClaim != nil {
|
||||
log.Printf("streamClaim.ClaimHash: %s\n", hex.EncodeToString(streamClaim.ClaimHash))
|
||||
log.Printf("streamClaim.ChannelHash: %s\n", hex.EncodeToString(streamClaim.ChannelHash))
|
||||
}
|
||||
// TODO: Confirm error case
|
||||
if err != nil {
|
||||
res.Stream = &optionalResolveResultOrError{
|
||||
err: &ResolveError{Error: err},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
if streamClaim != nil {
|
||||
resolvedStream, err = db.FsGetClaimByHash(streamClaim.ClaimHash)
|
||||
// TODO: Confirm error case
|
||||
if err != nil {
|
||||
res.Stream = &optionalResolveResultOrError{
|
||||
err: &ResolveError{Error: err},
|
||||
}
|
||||
return res
|
||||
}
|
||||
}
|
||||
} else {
|
||||
resolvedStream, err = db.ResolveParsedUrl(stream)
|
||||
// TODO: Confirm error case
|
||||
if err != nil {
|
||||
res.Stream = &optionalResolveResultOrError{
|
||||
err: &ResolveError{Error: err},
|
||||
}
|
||||
return res
|
||||
}
|
||||
if channel == nil && resolvedChannel == nil && resolvedStream != nil && len(resolvedStream.ChannelHash) > 0 {
|
||||
resolvedChannel, err = db.FsGetClaimByHash(resolvedStream.ChannelHash)
|
||||
// TODO: Confirm error case
|
||||
if err != nil {
|
||||
res.Channel = &optionalResolveResultOrError{
|
||||
err: &ResolveError{Error: err},
|
||||
}
|
||||
return res
|
||||
}
|
||||
}
|
||||
}
|
||||
if resolvedStream == nil {
|
||||
res.Stream = &optionalResolveResultOrError{
|
||||
err: &ResolveError{
|
||||
Error: fmt.Errorf("Could not find claim at \"%s\".", url),
|
||||
ErrorType: uint8(pb.Error_NOT_FOUND),
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
}
|
||||
|
||||
// Getting blockers and filters
|
||||
var repost *ResolveResult = nil
|
||||
var repostedChannel *ResolveResult = nil
|
||||
if resolvedChannel != nil && resolvedStream != nil {
|
||||
log.Printf("about to get blockers and filters: %#v, %#v\n", resolvedChannel, resolvedStream)
|
||||
}
|
||||
|
||||
if resolvedStream != nil || resolvedChannel != nil {
|
||||
var claim *ResolveResult = nil
|
||||
var claimHash []byte = nil
|
||||
var respostedClaimHash []byte = nil
|
||||
var blockerHash []byte = nil
|
||||
if resolvedStream != nil {
|
||||
claim = resolvedStream
|
||||
claimHash = resolvedStream.ClaimHash
|
||||
respostedClaimHash = resolvedStream.RepostedClaimHash
|
||||
} else {
|
||||
claim = resolvedChannel
|
||||
claimHash = resolvedChannel.ClaimHash
|
||||
}
|
||||
blockerHash, _, err = db.GetBlockerHash(claimHash, respostedClaimHash, claim.ChannelHash)
|
||||
log.Printf("blockerHash: %s\n", hex.EncodeToString(blockerHash))
|
||||
if err != nil {
|
||||
res.Channel = &optionalResolveResultOrError{
|
||||
err: &ResolveError{Error: err},
|
||||
}
|
||||
return res
|
||||
}
|
||||
if blockerHash != nil {
|
||||
reasonRow, err := db.FsGetClaimByHash(blockerHash)
|
||||
if err != nil {
|
||||
res.Channel = &optionalResolveResultOrError{
|
||||
err: &ResolveError{Error: err},
|
||||
}
|
||||
return res
|
||||
}
|
||||
res.Channel = &optionalResolveResultOrError{
|
||||
err: &ResolveError{Error: fmt.Errorf("%s, %v, %v", url, blockerHash, reasonRow)},
|
||||
}
|
||||
return res
|
||||
}
|
||||
if claim.RepostedClaimHash != nil {
|
||||
repost, err = db.FsGetClaimByHash(claim.RepostedClaimHash)
|
||||
if err != nil {
|
||||
res.Channel = &optionalResolveResultOrError{
|
||||
err: &ResolveError{Error: err},
|
||||
}
|
||||
return res
|
||||
}
|
||||
if repost != nil && repost.ChannelHash != nil && repost.SignatureValid {
|
||||
repostedChannel, err = db.FsGetClaimByHash(repost.ChannelHash)
|
||||
if err != nil {
|
||||
res.Channel = &optionalResolveResultOrError{
|
||||
err: &ResolveError{Error: err},
|
||||
}
|
||||
return res
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.Channel = &optionalResolveResultOrError{
|
||||
res: resolvedChannel,
|
||||
}
|
||||
res.Stream = &optionalResolveResultOrError{
|
||||
res: resolvedStream,
|
||||
}
|
||||
res.Repost = &optionalResolveResultOrError{
|
||||
res: repost,
|
||||
}
|
||||
res.RepostedChannel = &optionalResolveResultOrError{
|
||||
res: repostedChannel,
|
||||
}
|
||||
|
||||
log.Warnf("leaving Resolve, parsed: %#v\n", parsed)
|
||||
log.Warnf("leaving Resolve, res: %s\n", res)
|
||||
return res
|
||||
}
|
880
db/db_test.go
Normal file
880
db/db_test.go
Normal file
|
@ -0,0 +1,880 @@
|
|||
package db_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
dbpkg "github.com/lbryio/herald.go/db"
|
||||
"github.com/lbryio/herald.go/db/prefixes"
|
||||
"github.com/lbryio/herald.go/internal"
|
||||
"github.com/lbryio/lbry.go/v3/extras/stop"
|
||||
"github.com/linxGnu/grocksdb"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Utility functions for testing
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// OpenAndFillTmpDBColumnFamlies opens a db and fills it with data from a csv file using the given column family names
|
||||
func OpenAndFillTmpDBColumnFamlies(filePath string) (*dbpkg.ReadOnlyDBColumnFamily, [][]string, error) {
|
||||
|
||||
log.Println(filePath)
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
reader := csv.NewReader(file)
|
||||
records, err := reader.ReadAll()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
wOpts := grocksdb.NewDefaultWriteOptions()
|
||||
opts := grocksdb.NewDefaultOptions()
|
||||
opts.SetCreateIfMissing(true)
|
||||
db, err := grocksdb.OpenDb(opts, "tmp")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var handleMap map[string]*grocksdb.ColumnFamilyHandle = make(map[string]*grocksdb.ColumnFamilyHandle)
|
||||
|
||||
// Make sure we always create the TxCounts column family
|
||||
var cfNameRunes string = records[0][0]
|
||||
txCountPrefix := string(prefixes.TxCount)
|
||||
if !strings.Contains(cfNameRunes, txCountPrefix) {
|
||||
cfNameRunes = cfNameRunes + txCountPrefix
|
||||
}
|
||||
for _, cfNameRune := range cfNameRunes {
|
||||
cfName := string(cfNameRune)
|
||||
log.Println(cfName)
|
||||
handle, err := db.CreateColumnFamily(opts, cfName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
handleMap[cfName] = handle
|
||||
}
|
||||
toDefer := func() {
|
||||
db.Close()
|
||||
err = os.RemoveAll("./tmp")
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
for _, record := range records[1:] {
|
||||
cf := record[0]
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
handle := handleMap[string(cf)]
|
||||
key, err := hex.DecodeString(record[1])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
val, err := hex.DecodeString(record[2])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
db.PutCF(wOpts, handle, key, val)
|
||||
}
|
||||
|
||||
myDB := &dbpkg.ReadOnlyDBColumnFamily{
|
||||
DB: db,
|
||||
Handles: handleMap,
|
||||
Opts: grocksdb.NewDefaultReadOptions(),
|
||||
BlockedStreams: make(map[string][]byte),
|
||||
BlockedChannels: make(map[string][]byte),
|
||||
FilteredStreams: make(map[string][]byte),
|
||||
FilteredChannels: make(map[string][]byte),
|
||||
TxCounts: nil,
|
||||
LastState: nil,
|
||||
Height: 0,
|
||||
Headers: nil,
|
||||
Grp: stop.New(),
|
||||
Cleanup: toDefer,
|
||||
}
|
||||
|
||||
// err = dbpkg.ReadDBState(myDB) //TODO: Figure out right place for this
|
||||
// if err != nil {
|
||||
// return nil, nil, nil, err
|
||||
// }
|
||||
|
||||
err = myDB.InitTxCounts()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// err = dbpkg.InitHeaders(myDB)
|
||||
// if err != nil {
|
||||
// return nil, nil, nil, err
|
||||
// }
|
||||
|
||||
return myDB, records, nil
|
||||
}
|
||||
|
||||
// OpenAndFillTmpDBCF opens a db and fills it with data from a csv file
|
||||
// using the given column family handle. Old version, should probably remove.
|
||||
func OpenAndFillTmpDBCF(filePath string) (*grocksdb.DB, [][]string, func(), *grocksdb.ColumnFamilyHandle, error) {
|
||||
|
||||
log.Println(filePath)
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
reader := csv.NewReader(file)
|
||||
records, err := reader.ReadAll()
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
wOpts := grocksdb.NewDefaultWriteOptions()
|
||||
opts := grocksdb.NewDefaultOptions()
|
||||
opts.SetCreateIfMissing(true)
|
||||
db, err := grocksdb.OpenDb(opts, "tmp")
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
handle, err := db.CreateColumnFamily(opts, records[0][0])
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
toDefer := func() {
|
||||
db.Close()
|
||||
err = os.RemoveAll("./tmp")
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
for _, record := range records[1:] {
|
||||
key, err := hex.DecodeString(record[0])
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
val, err := hex.DecodeString(record[1])
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
db.PutCF(wOpts, handle, key, val)
|
||||
}
|
||||
|
||||
return db, records, toDefer, handle, nil
|
||||
}
|
||||
|
||||
// OpenAndFillTmpDB opens a db and fills it with data from a csv file.
|
||||
// Old funciont, should probably remove.
|
||||
func OpenAndFillTmpDB(filePath string) (*grocksdb.DB, [][]string, func(), error) {
|
||||
|
||||
log.Println(filePath)
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
reader := csv.NewReader(file)
|
||||
records, err := reader.ReadAll()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
wOpts := grocksdb.NewDefaultWriteOptions()
|
||||
opts := grocksdb.NewDefaultOptions()
|
||||
opts.SetCreateIfMissing(true)
|
||||
db, err := grocksdb.OpenDb(opts, "tmp")
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
toDefer := func() {
|
||||
db.Close()
|
||||
err = os.RemoveAll("./tmp")
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
for _, record := range records {
|
||||
key, err := hex.DecodeString(record[0])
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
val, err := hex.DecodeString(record[1])
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
db.Put(wOpts, key, val)
|
||||
}
|
||||
|
||||
return db, records, toDefer, nil
|
||||
}
|
||||
|
||||
// CatCSV Reads a csv version of the db and prints it to stdout,
|
||||
// while decoding types.
|
||||
func CatCSV(filePath string) {
|
||||
log.Println(filePath)
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
reader := csv.NewReader(file)
|
||||
records, err := reader.ReadAll()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
for _, record := range records[1:] {
|
||||
log.Println(record[1])
|
||||
keyRaw, err := hex.DecodeString(record[1])
|
||||
key, _ := prefixes.UnpackGenericKey(keyRaw)
|
||||
log.Println(key)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
valRaw, err := hex.DecodeString(record[2])
|
||||
// val := prefixes.ClaimTakeoverValueUnpack(valRaw)
|
||||
val, _ := prefixes.UnpackGenericValue(keyRaw, valRaw)
|
||||
log.Println(val)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCatFullDB(t *testing.T) {
|
||||
t.Skip("Skipping full db test")
|
||||
grp := stop.New()
|
||||
// url := "lbry://@lothrop#2/lothrop-livestream-games-and-code#c"
|
||||
// "lbry://@lbry", "lbry://@lbry#3", "lbry://@lbry3f", "lbry://@lbry#3fda836a92faaceedfe398225fb9b2ee2ed1f01a", "lbry://@lbry:1", "lbry://@lbry$1"
|
||||
// url := "lbry://@Styxhexenhammer666#2/legacy-media-baron-les-moonves-(cbs#9"
|
||||
// url := "lbry://@lbry"
|
||||
// url := "lbry://@lbry#3fda836a92faaceedfe398225fb9b2ee2ed1f01a"
|
||||
dbPath := "/mnt/sda1/wallet_server/_data/lbry-rocksdb/"
|
||||
// dbPath := "/mnt/d/data/snapshot_1072108/lbry-rocksdb/"
|
||||
secondaryPath := "asdf"
|
||||
db, err := dbpkg.GetProdDB(dbPath, secondaryPath, grp)
|
||||
defer db.Shutdown()
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
ch := db.ClaimShortIdIter("@lbry", "")
|
||||
for row := range ch {
|
||||
key := row.Key.(*prefixes.ClaimShortIDKey)
|
||||
val := row.Value.(*prefixes.ClaimShortIDValue)
|
||||
log.Printf("%#v, %#v\n", key, val)
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// End utility functions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// TestOpenFullDB Tests running a resolve on a full db.
|
||||
func TestOpenFullDB(t *testing.T) {
|
||||
t.Skip("Skipping full db test")
|
||||
grp := stop.New()
|
||||
// url := "lbry://@lothrop#2/lothrop-livestream-games-and-code#c"
|
||||
// "lbry://@lbry", "lbry://@lbry#3", "lbry://@lbry3f", "lbry://@lbry#3fda836a92faaceedfe398225fb9b2ee2ed1f01a", "lbry://@lbry:1", "lbry://@lbry$1"
|
||||
// url := "lbry://@Styxhexenhammer666#2/legacy-media-baron-les-moonves-(cbs#9"
|
||||
// url := "lbry://@lbry"
|
||||
// url := "lbry://@lbry#3fda836a92faaceedfe398225fb9b2ee2ed1f01a"
|
||||
// url := "lbry://@lbry$1"
|
||||
url := "https://lbry.tv/@lothrop:2/lothrop-livestream-games-and-code:c"
|
||||
dbPath := "/mnt/sda1/wallet_server/_data/lbry-rocksdb/"
|
||||
// dbPath := "/mnt/d/data/snapshot_1072108/lbry-rocksdb/"
|
||||
secondaryPath := "asdf"
|
||||
db, err := dbpkg.GetProdDB(dbPath, secondaryPath, grp)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
expandedResolveResult := db.Resolve(url)
|
||||
log.Printf("expandedResolveResult: %#v\n", expandedResolveResult)
|
||||
log.Printf("expandedResolveResult: %s\n", expandedResolveResult)
|
||||
}
|
||||
|
||||
// TODO: Finish the constructed data set for the stream part of this resolve.
|
||||
func TestResolve(t *testing.T) {
|
||||
url := "lbry://@Styxhexenhammer666#2/legacy-media-baron-les-moonves-(cbs#9"
|
||||
filePath := "../testdata/FULL_resolve.csv"
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
expandedResolveResult := db.Resolve(url)
|
||||
log.Printf("%#v\n", expandedResolveResult)
|
||||
if expandedResolveResult != nil && expandedResolveResult.Channel != nil {
|
||||
log.Println(expandedResolveResult.Channel.GetError())
|
||||
}
|
||||
if expandedResolveResult != nil && expandedResolveResult.Stream != nil {
|
||||
log.Println(expandedResolveResult.Stream.GetError())
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDBState(t *testing.T) {
|
||||
filePath := "../testdata/s_resolve.csv"
|
||||
want := uint32(1072108)
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
state, err := db.GetDBState()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
log.Printf("state: %#v\n", state)
|
||||
if state.Height != want {
|
||||
t.Errorf("Expected %d, got %d", want, state.Height)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRepostedClaim(t *testing.T) {
|
||||
t.Skip("skipping obsolete? test of prefix W (Reposted)")
|
||||
channelHash, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bd")
|
||||
want := 5
|
||||
// Should be non-existent
|
||||
channelHash2, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bf")
|
||||
filePath := "../testdata/W_resolve.csv"
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
count, err := db.GetRepostedCount(channelHash)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
log.Println(count)
|
||||
|
||||
if count != want {
|
||||
t.Errorf("Expected %d, got %d", want, count)
|
||||
}
|
||||
|
||||
count2, err := db.GetRepostedCount(channelHash2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count2 != 0 {
|
||||
t.Errorf("Expected 0, got %d", count2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRepostedCount(t *testing.T) {
|
||||
channelHash, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bd")
|
||||
want := 5
|
||||
// Should be non-existent
|
||||
channelHash2, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bf")
|
||||
filePath := "../testdata/j_resolve.csv"
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
count, err := db.GetRepostedCount(channelHash)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
log.Println(count)
|
||||
|
||||
if count != want {
|
||||
t.Errorf("Expected %d, got %d", want, count)
|
||||
}
|
||||
|
||||
count2, err := db.GetRepostedCount(channelHash2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count2 != 0 {
|
||||
t.Errorf("Expected 0, got %d", count2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintRepost(t *testing.T) {
|
||||
filePath := "../testdata/V_resolve.csv"
|
||||
CatCSV(filePath)
|
||||
}
|
||||
|
||||
func TestGetRepost(t *testing.T) {
|
||||
channelHash, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bd")
|
||||
channelHash2, _ := hex.DecodeString("000009ca6e0caaaef16872b4bd4f6f1b8c2363e2")
|
||||
filePath := "../testdata/V_resolve.csv"
|
||||
// want := uint32(3670)
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
res, err := db.GetRepost(channelHash)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(res, []byte{}) {
|
||||
t.Errorf("Expected empty, got %#v", res)
|
||||
}
|
||||
|
||||
res2, err := db.GetRepost(channelHash2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if bytes.Equal(res2, []byte{}) {
|
||||
t.Errorf("Expected non empty, got %#v", res2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintChannelCount(t *testing.T) {
|
||||
filePath := "../testdata/Z_resolve.csv"
|
||||
CatCSV(filePath)
|
||||
}
|
||||
|
||||
func TestGetClaimsInChannelCount(t *testing.T) {
|
||||
channelHash, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bd")
|
||||
filePath := "../testdata/Z_resolve.csv"
|
||||
want := uint32(3670)
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
count, err := db.GetClaimsInChannelCount(channelHash)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if count != want {
|
||||
t.Errorf("Expected %d, got %d", want, count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintClaimShortId(t *testing.T) {
|
||||
filePath := "../testdata/F_resolve.csv"
|
||||
CatCSV(filePath)
|
||||
}
|
||||
|
||||
// TestGetShortClaimIdUrl tests resolving a claim to a short url.
|
||||
func TestGetShortClaimIdUrl(t *testing.T) {
|
||||
name := "@Styxhexenhammer666"
|
||||
normalName := internal.NormalizeName(name)
|
||||
claimHash, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bd")
|
||||
// claimHash := []byte{}
|
||||
var rootTxNum uint32 = 0x61ec7c
|
||||
var position uint16 = 0
|
||||
filePath := "../testdata/F_resolve.csv"
|
||||
log.Println(filePath)
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
shortUrl, err := db.GetShortClaimIdUrl(name, normalName, claimHash, rootTxNum, position)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
log.Println(shortUrl)
|
||||
}
|
||||
|
||||
// TestClaimShortIdIter Tests the function to get an iterator of ClaimShortIds
|
||||
// with a noramlized name and a partial claim id.
|
||||
func TestClaimShortIdIter(t *testing.T) {
|
||||
filePath := "../testdata/F_cat.csv"
|
||||
normalName := "cat"
|
||||
claimId := "0"
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ch := db.ClaimShortIdIter(normalName, claimId)
|
||||
|
||||
for row := range ch {
|
||||
key := row.Key.(*prefixes.ClaimShortIDKey)
|
||||
log.Println(key)
|
||||
if key.NormalizedName != normalName {
|
||||
t.Errorf("Expected %s, got %s", normalName, key.NormalizedName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPrintTXOToCLaim Utility function to cat the TXOToClaim csv.
|
||||
func TestPrintTXOToClaim(t *testing.T) {
|
||||
filePath := "../testdata/G_2.csv"
|
||||
CatCSV(filePath)
|
||||
}
|
||||
|
||||
// TestGetTXOToClaim Tests getting a claim hash from the db given
|
||||
// a txNum and position.
|
||||
func TestGetTXOToClaim(t *testing.T) {
|
||||
//&{[71] 1456296 0}
|
||||
var txNum uint32 = 1456296
|
||||
var position uint16 = 0
|
||||
filePath := "../testdata/G_2.csv"
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
val, err := db.GetCachedClaimHash(txNum, position)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if val.Name != "one" {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetClaimToChannel(t *testing.T) {
|
||||
streamHashStr := "9a0ed686ecdad9b6cb965c4d6681c02f0bbc66a6"
|
||||
claimHashStr := "2556ed1cab9d17f2a9392030a9ad7f5d138f11bd"
|
||||
claimHash, _ := hex.DecodeString(claimHashStr)
|
||||
streamHash, _ := hex.DecodeString(streamHashStr)
|
||||
|
||||
txNum := uint32(0x6284e3)
|
||||
position := uint16(0x0)
|
||||
|
||||
streamTxNum := uint32(0x369e2b2)
|
||||
streamPosition := uint16(0x0)
|
||||
|
||||
var val []byte = nil
|
||||
|
||||
filePath := "../testdata/I_resolve.csv"
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
val, err = db.GetChannelForClaim(claimHash, txNum, position)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if val != nil {
|
||||
t.Errorf("Expected nil, got %s", hex.EncodeToString(val))
|
||||
}
|
||||
|
||||
val, err = db.GetChannelForClaim(streamHash, streamTxNum, streamPosition)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
valStr := hex.EncodeToString(val)
|
||||
if valStr != claimHashStr {
|
||||
t.Errorf("Expected %s, got %s", claimHashStr, valStr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetEffectiveAmountSupportOnly(t *testing.T) {
|
||||
filePath := "../testdata/Si_resolve.csv"
|
||||
want := uint64(20000006)
|
||||
claimHashStr := "00000324e40fcb63a0b517a3660645e9bd99244a"
|
||||
claimHash, _ := hex.DecodeString(claimHashStr)
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
db.Height = 999999999
|
||||
|
||||
amount, err := db.GetEffectiveAmount(claimHash, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if amount != want {
|
||||
t.Errorf("Expected %d, got %d", want, amount)
|
||||
}
|
||||
|
||||
// Cross-check against iterator-based implementation.
|
||||
iteratorAmount, err := db.GetActiveAmount(claimHash, prefixes.ActivatedSupportTXOType, db.Height)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if iteratorAmount != want {
|
||||
t.Errorf("Expected %d, got %d", want, iteratorAmount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetEffectiveAmount(t *testing.T) {
|
||||
filePath := "../testdata/Si_resolve.csv"
|
||||
want := uint64(21000006)
|
||||
claimHashStr := "00000324e40fcb63a0b517a3660645e9bd99244a"
|
||||
claimHash, _ := hex.DecodeString(claimHashStr)
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
db.Height = 999999999
|
||||
|
||||
amount, err := db.GetEffectiveAmount(claimHash, false)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if amount != want {
|
||||
t.Errorf("Expected %d, got %d", want, amount)
|
||||
}
|
||||
|
||||
// Cross-check against iterator-based implementation.
|
||||
iteratorAmount1, err := db.GetActiveAmount(claimHash, prefixes.ActivatedSupportTXOType, db.Height)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
iteratorAmount2, err := db.GetActiveAmount(claimHash, prefixes.ActivateClaimTXOType, db.Height)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if iteratorAmount1+iteratorAmount2 != want {
|
||||
t.Errorf("Expected %d, got %d (%d + %d)", want, iteratorAmount1+iteratorAmount2, iteratorAmount1, iteratorAmount2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSupportAmount(t *testing.T) {
|
||||
want := uint64(8654754160700)
|
||||
claimHashStr := "2556ed1cab9d17f2a9392030a9ad7f5d138f11bd"
|
||||
claimHash, err := hex.DecodeString(claimHashStr)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
filePath := "../testdata/a_resolve.csv"
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
res, err := db.GetSupportAmount(claimHash)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if res != want {
|
||||
t.Errorf("Expected %d, got %d", want, res)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: verify where this hash comes from exactly.
|
||||
func TestGetTxHash(t *testing.T) {
|
||||
txNum := uint32(0x6284e3)
|
||||
want := "54e14ff0c404c29b3d39ae4d249435f167d5cd4ce5a428ecb745b3df1c8e3dde"
|
||||
|
||||
filePath := "../testdata/X_resolve.csv"
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
resHash, err := db.GetTxHash(txNum)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
resStr := hex.EncodeToString(resHash)
|
||||
if want != resStr {
|
||||
t.Errorf("Expected %s, got %s", want, resStr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExpirationHeight(t *testing.T) {
|
||||
var lastUpdated uint32 = 0
|
||||
var expHeight uint32 = 0
|
||||
|
||||
expHeight = dbpkg.GetExpirationHeight(lastUpdated)
|
||||
if lastUpdated+dbpkg.OriginalClaimExpirationTime != expHeight {
|
||||
t.Errorf("Expected %d, got %d", lastUpdated+dbpkg.OriginalClaimExpirationTime, expHeight)
|
||||
}
|
||||
|
||||
lastUpdated = dbpkg.ExtendedClaimExpirationForkHeight + 1
|
||||
expHeight = dbpkg.GetExpirationHeight(lastUpdated)
|
||||
if lastUpdated+dbpkg.ExtendedClaimExpirationTime != expHeight {
|
||||
t.Errorf("Expected %d, got %d", lastUpdated+dbpkg.ExtendedClaimExpirationTime, expHeight)
|
||||
}
|
||||
|
||||
lastUpdated = 0
|
||||
expHeight = dbpkg.GetExpirationHeightFull(lastUpdated, true)
|
||||
if lastUpdated+dbpkg.ExtendedClaimExpirationTime != expHeight {
|
||||
t.Errorf("Expected %d, got %d", lastUpdated+dbpkg.ExtendedClaimExpirationTime, expHeight)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetActivation(t *testing.T) {
|
||||
filePath := "../testdata/R_resolve.csv"
|
||||
txNum := uint32(0x6284e3)
|
||||
position := uint16(0x0)
|
||||
want := uint32(0xa6b65)
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
activation, err := db.GetActivation(txNum, position)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if activation != want {
|
||||
t.Errorf("Expected %d, got %d", want, activation)
|
||||
}
|
||||
log.Printf("activation: %#v\n", activation)
|
||||
}
|
||||
|
||||
// TestPrintClaimToTXO Utility function to cat the ClaimToTXO csv.
|
||||
func TestPrintClaimToTXO(t *testing.T) {
|
||||
filePath := "../testdata/E_resolve.csv"
|
||||
CatCSV(filePath)
|
||||
}
|
||||
|
||||
// TestGetClaimToTXO Tests getting a ClaimToTXO value from the db.
|
||||
func TestGetClaimToTXO(t *testing.T) {
|
||||
claimHashStr := "2556ed1cab9d17f2a9392030a9ad7f5d138f11bd"
|
||||
want := uint32(0x6284e3)
|
||||
claimHash, err := hex.DecodeString(claimHashStr)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
filePath := "../testdata/E_resolve.csv"
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
res, err := db.GetCachedClaimTxo(claimHash, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if res.TxNum != want {
|
||||
t.Errorf("Expected %d, got %d", want, res.TxNum)
|
||||
}
|
||||
log.Printf("res: %#v\n", res)
|
||||
}
|
||||
|
||||
// TestPrintClaimTakeover Utility function to cat the ClaimTakeover csv.
|
||||
func TestPrintClaimTakeover(t *testing.T) {
|
||||
filePath := "../testdata/P_resolve.csv"
|
||||
CatCSV(filePath)
|
||||
}
|
||||
|
||||
// TestGetControlingClaim Tests getting a controlling claim value from the db
|
||||
// based on a name.
|
||||
func TestGetControllingClaim(t *testing.T) {
|
||||
claimName := internal.NormalizeName("@Styxhexenhammer666")
|
||||
claimHash := "2556ed1cab9d17f2a9392030a9ad7f5d138f11bd"
|
||||
filePath := "../testdata/P_resolve.csv"
|
||||
db, _, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
res, err := db.GetControllingClaim(claimName)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
got := hex.EncodeToString(res.ClaimHash)
|
||||
if claimHash != got {
|
||||
t.Errorf("Expected %s, got %s", claimHash, got)
|
||||
}
|
||||
log.Println(res)
|
||||
}
|
||||
|
||||
// TestIter Tests the db iterator. Probably needs data format updated.
|
||||
func TestIter(t *testing.T) {
|
||||
|
||||
filePath := "../testdata/W.csv"
|
||||
|
||||
db, records, toDefer, handle, err := OpenAndFillTmpDBCF(filePath)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
// skip the cf
|
||||
records = records[1:]
|
||||
defer toDefer()
|
||||
// test prefix
|
||||
options := dbpkg.NewIterateOptions().WithPrefix([]byte{prefixes.RepostedClaim}).WithIncludeValue(true)
|
||||
options = options.WithCfHandle(handle)
|
||||
// ch := dbpkg.Iter(db, options)
|
||||
ch := dbpkg.IterCF(db, options)
|
||||
var i = 0
|
||||
for kv := range ch {
|
||||
// log.Println(kv.Key)
|
||||
gotKey := kv.Key.(*prefixes.RepostedKey).PackKey()
|
||||
|
||||
keyPartial3 := kv.Key.(*prefixes.RepostedKey).PartialPack(3)
|
||||
keyPartial2 := kv.Key.(*prefixes.RepostedKey).PartialPack(2)
|
||||
keyPartial1 := kv.Key.(*prefixes.RepostedKey).PartialPack(1)
|
||||
|
||||
// Check pack partial for sanity
|
||||
if !bytes.HasPrefix(gotKey, keyPartial3) {
|
||||
t.Errorf("%+v should be prefix of %+v\n", keyPartial3, gotKey)
|
||||
}
|
||||
if !bytes.HasPrefix(gotKey, keyPartial2) {
|
||||
t.Errorf("%+v should be prefix of %+v\n", keyPartial2, gotKey)
|
||||
}
|
||||
if !bytes.HasPrefix(gotKey, keyPartial1) {
|
||||
t.Errorf("%+v should be prefix of %+v\n", keyPartial1, gotKey)
|
||||
}
|
||||
|
||||
got := kv.Value.(*prefixes.RepostedValue).PackValue()
|
||||
wantKey, err := hex.DecodeString(records[i][0])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
want, err := hex.DecodeString(records[i][1])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
if !bytes.Equal(gotKey, wantKey) {
|
||||
t.Errorf("gotKey: %+v, wantKey: %+v\n", got, want)
|
||||
}
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("got: %+v, want: %+v\n", got, want)
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
// Test start / stop
|
||||
start, err := hex.DecodeString(records[0][0])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
stop, err := hex.DecodeString(records[9][0])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
options2 := dbpkg.NewIterateOptions().WithStart(start).WithStop(stop).WithIncludeValue(true)
|
||||
options2 = options2.WithCfHandle(handle)
|
||||
ch2 := dbpkg.IterCF(db, options2)
|
||||
i = 0
|
||||
for kv := range ch2 {
|
||||
got := kv.Value.(*prefixes.RepostedValue).PackValue()
|
||||
want, err := hex.DecodeString(records[i][1])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("got: %+v, want: %+v\n", got, want)
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
213
db/iteroptions.go
Normal file
213
db/iteroptions.go
Normal file
|
@ -0,0 +1,213 @@
|
|||
package db
|
||||
|
||||
// iteroptions.go contains the implementation for iterators on rocksdb used by the hub
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/lbryio/herald.go/db/prefixes"
|
||||
"github.com/lbryio/lbry.go/v3/extras/stop"
|
||||
"github.com/linxGnu/grocksdb"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type IterOptions struct {
|
||||
FillCache bool
|
||||
Prefix []byte
|
||||
Start []byte //interface{}
|
||||
Stop []byte //interface{}
|
||||
IncludeStart bool
|
||||
IncludeStop bool
|
||||
IncludeKey bool
|
||||
IncludeValue bool
|
||||
RawKey bool
|
||||
RawValue bool
|
||||
Grp *stop.Group
|
||||
// DB *ReadOnlyDBColumnFamily
|
||||
CfHandle *grocksdb.ColumnFamilyHandle
|
||||
It *grocksdb.Iterator
|
||||
Serializer *prefixes.SerializationAPI
|
||||
}
|
||||
|
||||
// NewIterateOptions creates a defualt options structure for a db iterator.
|
||||
func NewIterateOptions() *IterOptions {
|
||||
return &IterOptions{
|
||||
FillCache: false,
|
||||
Prefix: []byte{},
|
||||
Start: nil,
|
||||
Stop: nil,
|
||||
IncludeStart: true,
|
||||
IncludeStop: false,
|
||||
IncludeKey: true,
|
||||
IncludeValue: false,
|
||||
RawKey: false,
|
||||
RawValue: false,
|
||||
Grp: nil,
|
||||
// DB: nil,
|
||||
CfHandle: nil,
|
||||
It: nil,
|
||||
Serializer: prefixes.ProductionAPI,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *IterOptions) WithCfHandle(cfHandle *grocksdb.ColumnFamilyHandle) *IterOptions {
|
||||
o.CfHandle = cfHandle
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *IterOptions) WithFillCache(fillCache bool) *IterOptions {
|
||||
o.FillCache = fillCache
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *IterOptions) WithPrefix(prefix []byte) *IterOptions {
|
||||
o.Prefix = prefix
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *IterOptions) WithStart(start []byte) *IterOptions {
|
||||
o.Start = start
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *IterOptions) WithStop(stop []byte) *IterOptions {
|
||||
o.Stop = stop
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *IterOptions) WithIncludeStart(includeStart bool) *IterOptions {
|
||||
o.IncludeStart = includeStart
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *IterOptions) WithIncludeStop(includeStop bool) *IterOptions {
|
||||
o.IncludeStop = includeStop
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *IterOptions) WithIncludeKey(includeKey bool) *IterOptions {
|
||||
o.IncludeKey = includeKey
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *IterOptions) WithIncludeValue(includeValue bool) *IterOptions {
|
||||
o.IncludeValue = includeValue
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *IterOptions) WithRawKey(rawKey bool) *IterOptions {
|
||||
o.RawKey = rawKey
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *IterOptions) WithRawValue(rawValue bool) *IterOptions {
|
||||
o.RawValue = rawValue
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *IterOptions) WithDB(db *ReadOnlyDBColumnFamily) *IterOptions {
|
||||
// o.Grp.AddNamed(1, iterKey)
|
||||
o.Grp = stop.New(db.Grp)
|
||||
o.Grp.Add(1)
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *IterOptions) WithSerializer(serializer *prefixes.SerializationAPI) *IterOptions {
|
||||
o.Serializer = serializer
|
||||
return o
|
||||
}
|
||||
|
||||
// ReadRow reads a row from the db, returns nil when no more rows are available.
|
||||
func (opts *IterOptions) ReadRow(prevKey *[]byte) *prefixes.PrefixRowKV {
|
||||
it := opts.It
|
||||
if !it.Valid() {
|
||||
log.Trace("ReadRow iterator not valid returning nil")
|
||||
return nil
|
||||
}
|
||||
|
||||
key := it.Key()
|
||||
defer key.Free()
|
||||
keyData := key.Data()
|
||||
keyLen := len(keyData)
|
||||
|
||||
value := it.Value()
|
||||
defer value.Free()
|
||||
valueData := value.Data()
|
||||
valueLen := len(valueData)
|
||||
|
||||
var outKey prefixes.BaseKey = nil
|
||||
var outValue prefixes.BaseValue = nil
|
||||
var rawOutKey []byte = nil
|
||||
var rawOutValue []byte = nil
|
||||
var err error = nil
|
||||
|
||||
log.Trace("keyData:", keyData)
|
||||
log.Trace("valueData:", valueData)
|
||||
|
||||
// We need to check the current key if we're not including the stop
|
||||
// key.
|
||||
if !opts.IncludeStop && opts.StopIteration(keyData) {
|
||||
log.Trace("ReadRow returning nil")
|
||||
return nil
|
||||
}
|
||||
|
||||
// We have to copy the key no matter what because we need to check
|
||||
// it on the next iterations to see if we're going to stop.
|
||||
newKeyData := make([]byte, keyLen)
|
||||
copy(newKeyData, keyData)
|
||||
if opts.IncludeKey && !opts.RawKey {
|
||||
outKey, err = opts.Serializer.UnpackKey(newKeyData)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
} else if opts.IncludeKey {
|
||||
rawOutKey = newKeyData
|
||||
}
|
||||
|
||||
// Value could be quite large, so this setting could be important
|
||||
// for performance in some cases.
|
||||
if opts.IncludeValue {
|
||||
newValueData := make([]byte, valueLen)
|
||||
copy(newValueData, valueData)
|
||||
if !opts.RawValue {
|
||||
outValue, err = opts.Serializer.UnpackValue(newKeyData, newValueData)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
} else {
|
||||
rawOutValue = newValueData
|
||||
}
|
||||
}
|
||||
|
||||
kv := &prefixes.PrefixRowKV{
|
||||
Key: outKey,
|
||||
Value: outValue,
|
||||
RawKey: rawOutKey,
|
||||
RawValue: rawOutValue,
|
||||
}
|
||||
*prevKey = newKeyData
|
||||
|
||||
return kv
|
||||
}
|
||||
|
||||
// StopIteration returns true if we've hit the criteria to end iteration on this key
|
||||
func (o *IterOptions) StopIteration(key []byte) bool {
|
||||
if key == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
maxLenStop := intMin(len(key), len(o.Stop))
|
||||
maxLenStart := intMin(len(key), len(o.Start))
|
||||
if o.Stop != nil &&
|
||||
(bytes.HasPrefix(key, o.Stop) || bytes.Compare(o.Stop, key[:maxLenStop]) < 0) {
|
||||
return true
|
||||
} else if o.Start != nil &&
|
||||
bytes.Compare(o.Start, key[:maxLenStart]) > 0 {
|
||||
return true
|
||||
} else if o.Prefix != nil && !bytes.HasPrefix(key, o.Prefix) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
230
db/prefixes/generic.go
Normal file
230
db/prefixes/generic.go
Normal file
|
@ -0,0 +1,230 @@
|
|||
package prefixes
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/go-restruct/restruct"
|
||||
"github.com/lbryio/herald.go/internal"
|
||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||
)
|
||||
|
||||
func init() {
|
||||
restruct.EnableExprBeta()
|
||||
}
|
||||
|
||||
// Type OnesComplementEffectiveAmount (uint64) has to be encoded specially
|
||||
// to get the desired sort ordering.
|
||||
// Implement the Sizer, Packer, Unpacker interface to handle it manually.
|
||||
|
||||
func (amt *OnesComplementEffectiveAmount) SizeOf() int {
|
||||
return 8
|
||||
}
|
||||
|
||||
func (amt *OnesComplementEffectiveAmount) Pack(buf []byte, order binary.ByteOrder) ([]byte, error) {
|
||||
binary.BigEndian.PutUint64(buf, OnesCompTwiddle64-uint64(*amt))
|
||||
return buf[8:], nil
|
||||
}
|
||||
|
||||
func (amt *OnesComplementEffectiveAmount) Unpack(buf []byte, order binary.ByteOrder) ([]byte, error) {
|
||||
*amt = OnesComplementEffectiveAmount(OnesCompTwiddle64 - binary.BigEndian.Uint64(buf))
|
||||
return buf[8:], nil
|
||||
}
|
||||
|
||||
// Struct BlockTxsValue has a field TxHashes of type []*chainhash.Hash.
|
||||
// I haven't been able to figure out the right annotations to make
|
||||
// restruct.Pack,Unpack work automagically.
|
||||
// Implement the Sizer, Packer, Unpacker interface to handle it manually.
|
||||
|
||||
func (kv *BlockTxsValue) SizeOf() int {
|
||||
return 32 * len(kv.TxHashes)
|
||||
}
|
||||
|
||||
func (kv *BlockTxsValue) Pack(buf []byte, order binary.ByteOrder) ([]byte, error) {
|
||||
offset := 0
|
||||
for _, h := range kv.TxHashes {
|
||||
offset += copy(buf[offset:], h[:])
|
||||
}
|
||||
return buf[offset:], nil
|
||||
}
|
||||
|
||||
func (kv *BlockTxsValue) Unpack(buf []byte, order binary.ByteOrder) ([]byte, error) {
|
||||
offset := 0
|
||||
kv.TxHashes = make([]*chainhash.Hash, len(buf)/32)
|
||||
for i := range kv.TxHashes {
|
||||
kv.TxHashes[i] = (*chainhash.Hash)(buf[offset:32])
|
||||
offset += 32
|
||||
}
|
||||
return buf[offset:], nil
|
||||
}
|
||||
|
||||
// Struct BigEndianChainHash is a chainhash.Hash stored in external
|
||||
// byte-order (opposite of other 32 byte chainhash.Hash values). In order
|
||||
// to reuse chainhash.Hash we need to correct the byte-order.
|
||||
// Currently this type is used for field Genesis of DBStateValue.
|
||||
|
||||
func (kv *BigEndianChainHash) SizeOf() int {
|
||||
return chainhash.HashSize
|
||||
}
|
||||
|
||||
func (kv *BigEndianChainHash) Pack(buf []byte, order binary.ByteOrder) ([]byte, error) {
|
||||
offset := 0
|
||||
hash := kv.CloneBytes()
|
||||
// HACK: Instances of chainhash.Hash use the internal byte-order.
|
||||
// Python scribe writes bytes of genesis hash in external byte-order.
|
||||
internal.ReverseBytesInPlace(hash)
|
||||
offset += copy(buf[offset:chainhash.HashSize], hash[:])
|
||||
return buf[offset:], nil
|
||||
}
|
||||
|
||||
func (kv *BigEndianChainHash) Unpack(buf []byte, order binary.ByteOrder) ([]byte, error) {
|
||||
offset := 0
|
||||
offset += copy(kv.Hash[:], buf[offset:32])
|
||||
// HACK: Instances of chainhash.Hash use the internal byte-order.
|
||||
// Python scribe writes bytes of genesis hash in external byte-order.
|
||||
internal.ReverseBytesInPlace(kv.Hash[:])
|
||||
return buf[offset:], nil
|
||||
}
|
||||
|
||||
func genericNew(prefix []byte, key bool) (interface{}, error) {
|
||||
t, ok := prefixRegistry[prefix[0]]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("not handled: prefix=%v", prefix))
|
||||
}
|
||||
if key {
|
||||
return t.newKey(), nil
|
||||
}
|
||||
return t.newValue(), nil
|
||||
}
|
||||
|
||||
func GenericPack(kv interface{}, fields int) ([]byte, error) {
|
||||
// Locate the byte offset of the first excluded field.
|
||||
offset := 0
|
||||
if fields > 0 {
|
||||
v := reflect.ValueOf(kv)
|
||||
t := v.Type()
|
||||
// Handle indirection to reach kind=Struct.
|
||||
switch t.Kind() {
|
||||
case reflect.Interface, reflect.Pointer:
|
||||
v = v.Elem()
|
||||
t = v.Type()
|
||||
default:
|
||||
panic(fmt.Sprintf("not handled: %v", t.Kind()))
|
||||
}
|
||||
count := 0
|
||||
for _, sf := range reflect.VisibleFields(t) {
|
||||
if !sf.IsExported() {
|
||||
continue
|
||||
}
|
||||
if sf.Anonymous && strings.HasPrefix(sf.Name, "LengthEncoded") {
|
||||
fields += 1 // Skip it but process NameLen and Name instead.
|
||||
continue
|
||||
}
|
||||
if count > fields {
|
||||
break
|
||||
}
|
||||
sz, err := restruct.SizeOf(v.FieldByIndex(sf.Index).Interface())
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("not handled: %v: %v", sf.Name, sf.Type.Kind()))
|
||||
}
|
||||
offset += sz
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
// Pack the struct. No ability to partially pack.
|
||||
buf, err := restruct.Pack(binary.BigEndian, kv)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("not handled: %v", err))
|
||||
}
|
||||
// Return a prefix if some fields were excluded.
|
||||
if fields > 0 {
|
||||
return buf[:offset], nil
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func GenericUnpack(pfx []byte, key bool, buf []byte) (interface{}, error) {
|
||||
kv, _ := genericNew(pfx, key)
|
||||
err := restruct.Unpack(buf, binary.BigEndian, kv)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("not handled: %v", err))
|
||||
}
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
func GetSerializationAPI(prefix []byte) *SerializationAPI {
|
||||
t, ok := prefixRegistry[prefix[0]]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("not handled: prefix=%v", prefix))
|
||||
}
|
||||
if t.API != nil {
|
||||
return t.API
|
||||
}
|
||||
return ProductionAPI
|
||||
}
|
||||
|
||||
type SerializationAPI struct {
|
||||
PackKey func(key BaseKey) ([]byte, error)
|
||||
PackPartialKey func(key BaseKey, fields int) ([]byte, error)
|
||||
PackValue func(value BaseValue) ([]byte, error)
|
||||
UnpackKey func(key []byte) (BaseKey, error)
|
||||
UnpackValue func(prefix []byte, value []byte) (BaseValue, error)
|
||||
}
|
||||
|
||||
var ProductionAPI = &SerializationAPI{
|
||||
PackKey: PackGenericKey,
|
||||
PackPartialKey: PackPartialGenericKey,
|
||||
PackValue: PackGenericValue,
|
||||
UnpackKey: UnpackGenericKey,
|
||||
UnpackValue: UnpackGenericValue,
|
||||
}
|
||||
|
||||
var RegressionAPI_1 = &SerializationAPI{
|
||||
PackKey: func(key BaseKey) ([]byte, error) {
|
||||
return GenericPack(key, -1)
|
||||
},
|
||||
PackPartialKey: func(key BaseKey, fields int) ([]byte, error) {
|
||||
return GenericPack(key, fields)
|
||||
},
|
||||
PackValue: func(value BaseValue) ([]byte, error) {
|
||||
return GenericPack(value, -1)
|
||||
},
|
||||
UnpackKey: UnpackGenericKey,
|
||||
UnpackValue: UnpackGenericValue,
|
||||
}
|
||||
|
||||
var RegressionAPI_2 = &SerializationAPI{
|
||||
PackKey: PackGenericKey,
|
||||
PackPartialKey: PackPartialGenericKey,
|
||||
PackValue: PackGenericValue,
|
||||
UnpackKey: func(key []byte) (BaseKey, error) {
|
||||
k, err := GenericUnpack(key, true, key)
|
||||
return k.(BaseKey), err
|
||||
},
|
||||
UnpackValue: func(prefix []byte, value []byte) (BaseValue, error) {
|
||||
k, err := GenericUnpack(prefix, false, value)
|
||||
return k.(BaseValue), err
|
||||
},
|
||||
}
|
||||
|
||||
var RegressionAPI_3 = &SerializationAPI{
|
||||
PackKey: func(key BaseKey) ([]byte, error) {
|
||||
return GenericPack(key, -1)
|
||||
},
|
||||
PackPartialKey: func(key BaseKey, fields int) ([]byte, error) {
|
||||
return GenericPack(key, fields)
|
||||
},
|
||||
PackValue: func(value BaseValue) ([]byte, error) {
|
||||
return GenericPack(value, -1)
|
||||
},
|
||||
UnpackKey: func(key []byte) (BaseKey, error) {
|
||||
k, err := GenericUnpack(key, true, key)
|
||||
return k.(BaseKey), err
|
||||
},
|
||||
UnpackValue: func(prefix []byte, value []byte) (BaseValue, error) {
|
||||
k, err := GenericUnpack(prefix, false, value)
|
||||
return k.(BaseValue), err
|
||||
},
|
||||
}
|
4099
db/prefixes/prefixes.go
Normal file
4099
db/prefixes/prefixes.go
Normal file
File diff suppressed because it is too large
Load diff
567
db/prefixes/prefixes_test.go
Normal file
567
db/prefixes/prefixes_test.go
Normal file
|
@ -0,0 +1,567 @@
|
|||
package prefixes_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/csv"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
dbpkg "github.com/lbryio/herald.go/db"
|
||||
prefixes "github.com/lbryio/herald.go/db/prefixes"
|
||||
"github.com/linxGnu/grocksdb"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestPrefixRegistry(t *testing.T) {
|
||||
for _, prefix := range prefixes.GetPrefixes() {
|
||||
if prefixes.GetSerializationAPI(prefix) == nil {
|
||||
t.Errorf("prefix %c not registered", prefix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testInit(filePath string) (*grocksdb.DB, [][]string, func(), *grocksdb.ColumnFamilyHandle) {
|
||||
log.Println(filePath)
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
reader := csv.NewReader(file)
|
||||
records, err := reader.ReadAll()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
columnFamily := records[0][0]
|
||||
records = records[1:]
|
||||
|
||||
cleanupFiles := func() {
|
||||
err = os.RemoveAll("./tmp")
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
// wOpts := grocksdb.NewDefaultWriteOptions()
|
||||
opts := grocksdb.NewDefaultOptions()
|
||||
opts.SetCreateIfMissing(true)
|
||||
db, err := grocksdb.OpenDb(opts, "tmp")
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
// Garbage might have been left behind by a prior crash.
|
||||
cleanupFiles()
|
||||
db, err = grocksdb.OpenDb(opts, "tmp")
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
handle, err := db.CreateColumnFamily(opts, columnFamily)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
toDefer := func() {
|
||||
db.Close()
|
||||
cleanupFiles()
|
||||
}
|
||||
|
||||
return db, records, toDefer, handle
|
||||
}
|
||||
|
||||
func testGeneric(filePath string, prefix byte, numPartials int) func(*testing.T) {
|
||||
return func(t *testing.T) {
|
||||
APIs := []*prefixes.SerializationAPI{
|
||||
prefixes.GetSerializationAPI([]byte{prefix}),
|
||||
// Verify combinations of production vs. "restruct" implementations of
|
||||
// serialization API (e.g production Pack() with "restruct" Unpack()).
|
||||
prefixes.RegressionAPI_1,
|
||||
prefixes.RegressionAPI_2,
|
||||
prefixes.RegressionAPI_3,
|
||||
}
|
||||
for _, api := range APIs {
|
||||
opts := dbpkg.NewIterateOptions().WithPrefix([]byte{prefix}).WithSerializer(api).WithIncludeValue(true)
|
||||
testGenericOptions(opts, filePath, prefix, numPartials)(t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testGenericOptions(options *dbpkg.IterOptions, filePath string, prefix byte, numPartials int) func(*testing.T) {
|
||||
return func(t *testing.T) {
|
||||
|
||||
wOpts := grocksdb.NewDefaultWriteOptions()
|
||||
db, records, toDefer, handle := testInit(filePath)
|
||||
defer toDefer()
|
||||
for _, record := range records {
|
||||
key, err := hex.DecodeString(record[0])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
val, err := hex.DecodeString(record[1])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
// db.Put(wOpts, key, val)
|
||||
db.PutCF(wOpts, handle, key, val)
|
||||
}
|
||||
// test prefix
|
||||
options = options.WithCfHandle(handle)
|
||||
ch := dbpkg.IterCF(db, options)
|
||||
var i = 0
|
||||
for kv := range ch {
|
||||
// log.Println(kv.Key)
|
||||
gotKey, err := options.Serializer.PackKey(kv.Key)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
if numPartials != kv.Key.NumFields() {
|
||||
t.Errorf("key reports %v fields but %v expected", kv.Key.NumFields(), numPartials)
|
||||
}
|
||||
for j := 1; j <= numPartials; j++ {
|
||||
keyPartial, _ := options.Serializer.PackPartialKey(kv.Key, j)
|
||||
// Check pack partial for sanity
|
||||
if j < numPartials {
|
||||
if !bytes.HasPrefix(gotKey, keyPartial) || (len(keyPartial) >= len(gotKey)) {
|
||||
t.Errorf("%+v should be prefix of %+v\n", keyPartial, gotKey)
|
||||
}
|
||||
} else {
|
||||
if !bytes.Equal(gotKey, keyPartial) {
|
||||
t.Errorf("%+v should be equal to %+v\n", keyPartial, gotKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
got, err := options.Serializer.PackValue(kv.Value)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
wantKey, err := hex.DecodeString(records[i][0])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
want, err := hex.DecodeString(records[i][1])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
if !bytes.Equal(gotKey, wantKey) {
|
||||
t.Errorf("gotKey: %+v, wantKey: %+v\n", gotKey, wantKey)
|
||||
}
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("got: %+v, want: %+v\n", got, want)
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
// Test start / stop
|
||||
start, err := hex.DecodeString(records[0][0])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
numRecords := i
|
||||
// var numRecords = 9
|
||||
// if prefix == prefixes.Undo || prefix == prefixes.DBState {
|
||||
// numRecords = 1
|
||||
// }
|
||||
stop, err := hex.DecodeString(records[numRecords-1][0])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
options2 := dbpkg.NewIterateOptions().WithSerializer(options.Serializer).WithStart(start).WithStop(stop).WithIncludeValue(true)
|
||||
options2 = options2.WithCfHandle(handle)
|
||||
ch2 := dbpkg.IterCF(db, options2)
|
||||
i = 0
|
||||
for kv := range ch2 {
|
||||
got, err := options2.Serializer.PackValue(kv.Value)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
want, err := hex.DecodeString(records[i][1])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("got: %+v, want: %+v\n", got, want)
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSupportAmount(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.SupportAmount)
|
||||
testGeneric(filePath, prefixes.SupportAmount, 1)(t)
|
||||
}
|
||||
|
||||
func TestChannelCount(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ChannelCount)
|
||||
testGeneric(filePath, prefixes.ChannelCount, 1)(t)
|
||||
}
|
||||
|
||||
func TestDBState(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.DBState)
|
||||
testGeneric(filePath, prefixes.DBState, 0)(t)
|
||||
}
|
||||
|
||||
func TestBlockTxs(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.BlockTXs)
|
||||
testGeneric(filePath, prefixes.BlockTXs, 1)(t)
|
||||
}
|
||||
|
||||
func TestTxCount(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.TxCount)
|
||||
testGeneric(filePath, prefixes.TxCount, 1)(t)
|
||||
}
|
||||
|
||||
func TestTxHash(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.TxHash)
|
||||
testGeneric(filePath, prefixes.TxHash, 1)(t)
|
||||
}
|
||||
|
||||
func TestTxNum(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.TxNum)
|
||||
testGeneric(filePath, prefixes.TxNum, 1)(t)
|
||||
}
|
||||
|
||||
func TestTx(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.Tx)
|
||||
testGeneric(filePath, prefixes.Tx, 1)(t)
|
||||
}
|
||||
|
||||
func TestHashXHistory(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.HashXHistory)
|
||||
testGeneric(filePath, prefixes.HashXHistory, 2)(t)
|
||||
}
|
||||
|
||||
func TestUndo(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.Undo)
|
||||
testGeneric(filePath, prefixes.Undo, 1)(t)
|
||||
}
|
||||
|
||||
func TestBlockHash(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.BlockHash)
|
||||
testGeneric(filePath, prefixes.BlockHash, 1)(t)
|
||||
}
|
||||
|
||||
func TestBlockHeader(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.Header)
|
||||
testGeneric(filePath, prefixes.Header, 1)(t)
|
||||
}
|
||||
|
||||
func TestClaimToTXO(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ClaimToTXO)
|
||||
testGeneric(filePath, prefixes.ClaimToTXO, 1)(t)
|
||||
}
|
||||
|
||||
func TestTXOToClaim(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.TXOToClaim)
|
||||
testGeneric(filePath, prefixes.TXOToClaim, 2)(t)
|
||||
}
|
||||
|
||||
func TestClaimShortID(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ClaimShortIdPrefix)
|
||||
testGeneric(filePath, prefixes.ClaimShortIdPrefix, 4)(t)
|
||||
}
|
||||
|
||||
func TestClaimToChannel(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ClaimToChannel)
|
||||
testGeneric(filePath, prefixes.ClaimToChannel, 3)(t)
|
||||
}
|
||||
|
||||
func TestChannelToClaim(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ChannelToClaim)
|
||||
testGeneric(filePath, prefixes.ChannelToClaim, 4)(t)
|
||||
}
|
||||
|
||||
func TestClaimToSupport(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ClaimToSupport)
|
||||
testGeneric(filePath, prefixes.ClaimToSupport, 3)(t)
|
||||
}
|
||||
|
||||
func TestSupportToClaim(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.SupportToClaim)
|
||||
testGeneric(filePath, prefixes.SupportToClaim, 2)(t)
|
||||
}
|
||||
|
||||
func TestClaimExpiration(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ClaimExpiration)
|
||||
testGeneric(filePath, prefixes.ClaimExpiration, 3)(t)
|
||||
}
|
||||
|
||||
func TestClaimTakeover(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ClaimTakeover)
|
||||
testGeneric(filePath, prefixes.ClaimTakeover, 1)(t)
|
||||
}
|
||||
|
||||
func TestPendingActivation(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.PendingActivation)
|
||||
testGeneric(filePath, prefixes.PendingActivation, 4)(t)
|
||||
}
|
||||
|
||||
func TestActivated(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ActivatedClaimAndSupport)
|
||||
testGeneric(filePath, prefixes.ActivatedClaimAndSupport, 3)(t)
|
||||
}
|
||||
|
||||
func TestActiveAmount(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ActiveAmount)
|
||||
testGeneric(filePath, prefixes.ActiveAmount, 5)(t)
|
||||
}
|
||||
|
||||
func TestBidOrder(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.BidOrder)
|
||||
testGeneric(filePath, prefixes.BidOrder, 4)(t)
|
||||
}
|
||||
|
||||
func TestRepost(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.Repost)
|
||||
testGeneric(filePath, prefixes.Repost, 1)(t)
|
||||
}
|
||||
|
||||
func TestRepostedClaim(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.RepostedClaim)
|
||||
testGeneric(filePath, prefixes.RepostedClaim, 3)(t)
|
||||
}
|
||||
|
||||
func TestRepostedCount(t *testing.T) {
|
||||
prefix := byte(prefixes.RepostedCount)
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefix)
|
||||
//synthesizeTestData([]byte{prefix}, filePath, []int{20}, []int{4}, [][3]int{})
|
||||
key := &prefixes.RepostedCountKey{}
|
||||
testGeneric(filePath, prefix, key.NumFields())(t)
|
||||
}
|
||||
|
||||
func TestClaimDiff(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ClaimDiff)
|
||||
testGeneric(filePath, prefixes.ClaimDiff, 1)(t)
|
||||
}
|
||||
|
||||
func TestUTXO(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.UTXO)
|
||||
testGeneric(filePath, prefixes.UTXO, 3)(t)
|
||||
}
|
||||
|
||||
func TestHashXUTXO(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.HashXUTXO)
|
||||
testGeneric(filePath, prefixes.HashXUTXO, 3)(t)
|
||||
}
|
||||
|
||||
func TestUTXOKey_String(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
prefix []byte
|
||||
hashx []byte
|
||||
txnum uint32
|
||||
nout uint16
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "Converts to string",
|
||||
prefix: []byte("u"),
|
||||
hashx: []byte("AAAAAAAAAA"),
|
||||
txnum: 0,
|
||||
nout: 0,
|
||||
want: "*prefixes.UTXOKey(hashX=41414141414141414141, tx_num=0, nout=0)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
key := &prefixes.UTXOKey{
|
||||
Prefix: tt.prefix,
|
||||
HashX: tt.hashx,
|
||||
TxNum: tt.txnum,
|
||||
Nout: tt.nout,
|
||||
}
|
||||
|
||||
got := fmt.Sprint(key)
|
||||
log.Println(got)
|
||||
if got != tt.want {
|
||||
t.Errorf("got: %s, want: %s\n", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrendingNotifications(t *testing.T) {
|
||||
prefix := byte(prefixes.TrendingNotifications)
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefix)
|
||||
//synthesizeTestData([]byte{prefix}, filePath, []int{4, 20}, []int{8, 8}, [][3]int{})
|
||||
key := &prefixes.TrendingNotificationKey{}
|
||||
testGeneric(filePath, prefix, key.NumFields())(t)
|
||||
}
|
||||
|
||||
func TestMempoolTx(t *testing.T) {
|
||||
prefix := byte(prefixes.MempoolTx)
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefix)
|
||||
//synthesizeTestData([]byte{prefix}, filePath, []int{32}, []int{}, [][3]int{{20, 100, 1}})
|
||||
key := &prefixes.MempoolTxKey{}
|
||||
testGeneric(filePath, prefix, key.NumFields())(t)
|
||||
}
|
||||
|
||||
func TestTouchedHashX(t *testing.T) {
|
||||
prefix := byte(prefixes.TouchedHashX)
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefix)
|
||||
//synthesizeTestData([]byte{prefix}, filePath, []int{4}, []int{}, [][3]int{{1, 5, 11}})
|
||||
key := &prefixes.TouchedHashXKey{}
|
||||
testGeneric(filePath, prefix, key.NumFields())(t)
|
||||
}
|
||||
|
||||
func TestHashXStatus(t *testing.T) {
|
||||
prefix := byte(prefixes.HashXStatus)
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefix)
|
||||
//synthesizeTestData([]byte{prefix}, filePath, []int{20}, []int{32}, [][3]int{})
|
||||
key := &prefixes.HashXStatusKey{}
|
||||
testGeneric(filePath, prefix, key.NumFields())(t)
|
||||
}
|
||||
|
||||
func TestHashXMempoolStatus(t *testing.T) {
|
||||
prefix := byte(prefixes.HashXMempoolStatus)
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefix)
|
||||
//synthesizeTestData([]byte{prefix}, filePath, []int{20}, []int{32}, [][3]int{})
|
||||
key := &prefixes.HashXMempoolStatusKey{}
|
||||
testGeneric(filePath, prefix, key.NumFields())(t)
|
||||
}
|
||||
|
||||
func TestEffectiveAmount(t *testing.T) {
|
||||
prefix := byte(prefixes.EffectiveAmount)
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefix)
|
||||
//synthesizeTestData([]byte{prefix}, filePath, []int{20}, []int{8, 8}, [][3]int{})
|
||||
key := &prefixes.EffectiveAmountKey{}
|
||||
testGeneric(filePath, prefix, key.NumFields())(t)
|
||||
}
|
||||
|
||||
func synthesizeTestData(prefix []byte, filePath string, keyFixed, valFixed []int, valVariable [][3]int) {
|
||||
file, err := os.OpenFile(filePath, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
records := make([][2][]byte, 0, 20)
|
||||
for r := 0; r < 20; r++ {
|
||||
key := make([]byte, 0, 1000)
|
||||
key = append(key, prefix...)
|
||||
val := make([]byte, 0, 1000)
|
||||
// Handle fixed columns of key.
|
||||
for _, width := range keyFixed {
|
||||
v := make([]byte, width)
|
||||
rand.Read(v)
|
||||
key = append(key, v...)
|
||||
}
|
||||
// Handle fixed columns of value.
|
||||
for _, width := range valFixed {
|
||||
v := make([]byte, width)
|
||||
rand.Read(v)
|
||||
val = append(val, v...)
|
||||
}
|
||||
// Handle variable length array in value. Each element is "chunk" size.
|
||||
for _, w := range valVariable {
|
||||
low, high, chunk := w[0], w[1], w[2]
|
||||
n, _ := rand.Int(rand.Reader, big.NewInt(int64(high-low)))
|
||||
v := make([]byte, chunk*(low+int(n.Int64())))
|
||||
rand.Read(v)
|
||||
val = append(val, v...)
|
||||
}
|
||||
records = append(records, [2][]byte{key, val})
|
||||
}
|
||||
|
||||
sort.Slice(records, func(i, j int) bool { return bytes.Compare(records[i][0], records[j][0]) == -1 })
|
||||
|
||||
wr := csv.NewWriter(file)
|
||||
wr.Write([]string{string(prefix), ""}) // column headers
|
||||
for _, rec := range records {
|
||||
encoded := []string{hex.EncodeToString(rec[0]), hex.EncodeToString(rec[1])}
|
||||
err := wr.Write(encoded)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
wr.Flush()
|
||||
}
|
||||
|
||||
// Fuzz tests for various Key and Value types (EXPERIMENTAL)
|
||||
|
||||
func FuzzTouchedHashXKey(f *testing.F) {
|
||||
kvs := []prefixes.TouchedHashXKey{
|
||||
{
|
||||
Prefix: []byte{prefixes.TouchedHashX},
|
||||
Height: 0,
|
||||
},
|
||||
{
|
||||
Prefix: []byte{prefixes.TouchedHashX},
|
||||
Height: 1,
|
||||
},
|
||||
{
|
||||
Prefix: []byte{prefixes.TouchedHashX},
|
||||
Height: math.MaxUint32,
|
||||
},
|
||||
}
|
||||
|
||||
for _, kv := range kvs {
|
||||
seed := make([]byte, 0, 200)
|
||||
seed = append(seed, kv.PackKey()...)
|
||||
f.Add(seed)
|
||||
}
|
||||
|
||||
f.Fuzz(func(t *testing.T, in []byte) {
|
||||
t.Logf("testing: %+v", in)
|
||||
out := make([]byte, 0, 200)
|
||||
var kv prefixes.TouchedHashXKey
|
||||
kv.UnpackKey(in)
|
||||
out = append(out, kv.PackKey()...)
|
||||
if len(in) >= 5 {
|
||||
if !bytes.HasPrefix(in, out) {
|
||||
t.Fatalf("%v: not equal after round trip: %v", in, out)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzTouchedHashXValue(f *testing.F) {
|
||||
kvs := []prefixes.TouchedHashXValue{
|
||||
{
|
||||
TouchedHashXs: [][]byte{},
|
||||
},
|
||||
{
|
||||
TouchedHashXs: [][]byte{
|
||||
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
},
|
||||
},
|
||||
{
|
||||
TouchedHashXs: [][]byte{
|
||||
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
},
|
||||
},
|
||||
{
|
||||
TouchedHashXs: [][]byte{
|
||||
{0xff, 0xff, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
{0, 1, 0xff, 0xff, 4, 5, 6, 7, 8, 9, 10},
|
||||
{0, 1, 2, 3, 0xff, 0xff, 6, 7, 8, 9, 10},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, kv := range kvs {
|
||||
seed := make([]byte, 0, 200)
|
||||
seed = append(seed, kv.PackValue()...)
|
||||
f.Add(seed)
|
||||
}
|
||||
|
||||
f.Fuzz(func(t *testing.T, in []byte) {
|
||||
t.Logf("testing: %+v", in)
|
||||
out := make([]byte, 0, 200)
|
||||
var kv prefixes.TouchedHashXValue
|
||||
kv.UnpackValue(in)
|
||||
out = append(out, kv.PackValue()...)
|
||||
if len(in) >= 5 {
|
||||
if !bytes.HasPrefix(in, out) {
|
||||
t.Fatalf("%v: not equal after round trip: %v", in, out)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
102
db/stack/stack.go
Normal file
102
db/stack/stack.go
Normal file
|
@ -0,0 +1,102 @@
|
|||
package stack
|
||||
|
||||
// The db_stack package contains the implementation of a generic slice backed stack
|
||||
// used for tracking various states in the hub, i.e. headers and txcounts
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/lbryio/herald.go/internal"
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
type SliceBacked[T any] struct {
|
||||
slice []T
|
||||
len uint32
|
||||
mut sync.RWMutex
|
||||
}
|
||||
|
||||
func NewSliceBacked[T any](size int) *SliceBacked[T] {
|
||||
return &SliceBacked[T]{
|
||||
slice: make([]T, size),
|
||||
len: 0,
|
||||
mut: sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SliceBacked[T]) Push(v T) {
|
||||
s.mut.Lock()
|
||||
defer s.mut.Unlock()
|
||||
|
||||
if s.len == uint32(len(s.slice)) {
|
||||
s.slice = append(s.slice, v)
|
||||
} else {
|
||||
s.slice[s.len] = v
|
||||
}
|
||||
s.len++
|
||||
}
|
||||
|
||||
func (s *SliceBacked[T]) Pop() T {
|
||||
s.mut.Lock()
|
||||
defer s.mut.Unlock()
|
||||
|
||||
if s.len == 0 {
|
||||
var null T
|
||||
return null
|
||||
}
|
||||
s.len--
|
||||
return s.slice[s.len]
|
||||
}
|
||||
|
||||
func (s *SliceBacked[T]) Get(i uint32) T {
|
||||
s.mut.RLock()
|
||||
defer s.mut.RUnlock()
|
||||
|
||||
if i >= s.len {
|
||||
var null T
|
||||
return null
|
||||
}
|
||||
return s.slice[i]
|
||||
}
|
||||
|
||||
func (s *SliceBacked[T]) GetTip() T {
|
||||
s.mut.RLock()
|
||||
defer s.mut.RUnlock()
|
||||
|
||||
if s.len == 0 {
|
||||
var null T
|
||||
return null
|
||||
}
|
||||
return s.slice[s.len-1]
|
||||
}
|
||||
|
||||
func (s *SliceBacked[T]) Len() uint32 {
|
||||
s.mut.RLock()
|
||||
defer s.mut.RUnlock()
|
||||
|
||||
return s.len
|
||||
}
|
||||
|
||||
func (s *SliceBacked[T]) Cap() int {
|
||||
s.mut.RLock()
|
||||
defer s.mut.RUnlock()
|
||||
|
||||
return cap(s.slice)
|
||||
}
|
||||
|
||||
func (s *SliceBacked[T]) GetSlice() []T {
|
||||
// This is not thread safe so I won't bother with locking
|
||||
return s.slice
|
||||
}
|
||||
|
||||
func BisectRight[T constraints.Ordered](s *SliceBacked[T], searchKeys []T) []uint32 {
|
||||
s.mut.RLock()
|
||||
defer s.mut.RUnlock()
|
||||
|
||||
found := make([]uint32, len(searchKeys))
|
||||
for i, k := range searchKeys {
|
||||
found[i] = internal.BisectRight(s.slice[:s.Len()], k)
|
||||
}
|
||||
|
||||
return found
|
||||
}
|
123
db/stack/stack_test.go
Normal file
123
db/stack/stack_test.go
Normal file
|
@ -0,0 +1,123 @@
|
|||
package stack_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/herald.go/db/stack"
|
||||
)
|
||||
|
||||
func TestPush(t *testing.T) {
|
||||
var want uint32 = 3
|
||||
|
||||
stack := stack.NewSliceBacked[int](10)
|
||||
|
||||
stack.Push(0)
|
||||
stack.Push(1)
|
||||
stack.Push(2)
|
||||
|
||||
if got := stack.Len(); got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPushPop(t *testing.T) {
|
||||
stack := stack.NewSliceBacked[int](10)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
stack.Push(i)
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
wantLen := 5 - i
|
||||
|
||||
if got := stack.Len(); int(got) != wantLen {
|
||||
t.Errorf("got %v, want %v", got, wantLen)
|
||||
}
|
||||
|
||||
if got := stack.Pop(); got != 5-i-1 {
|
||||
t.Errorf("got %v, want %v", got, 5-i-1)
|
||||
}
|
||||
|
||||
wantLen -= 1
|
||||
|
||||
if got := stack.Len(); int(got) != wantLen {
|
||||
t.Errorf("got %v, want %v", got, wantLen)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doPushes(stack *stack.SliceBacked[int], numPushes int) {
|
||||
for i := 0; i < numPushes; i++ {
|
||||
stack.Push(i)
|
||||
}
|
||||
}
|
||||
|
||||
func doPops(stack *stack.SliceBacked[int], numPops int) {
|
||||
for i := 0; i < numPops; i++ {
|
||||
stack.Pop()
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiThreaded(t *testing.T) {
|
||||
stack := stack.NewSliceBacked[int](100000)
|
||||
|
||||
go doPushes(stack, 100000)
|
||||
go doPushes(stack, 100000)
|
||||
go doPushes(stack, 100000)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
if stack.Len() != 300000 {
|
||||
t.Errorf("got %v, want %v", stack.Len(), 300000)
|
||||
}
|
||||
|
||||
go doPops(stack, 100000)
|
||||
go doPops(stack, 100000)
|
||||
go doPops(stack, 100000)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
if stack.Len() != 0 {
|
||||
t.Errorf("got %v, want %v", stack.Len(), 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
stack := stack.NewSliceBacked[int](10)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
stack.Push(i)
|
||||
}
|
||||
|
||||
if got := stack.GetTip(); got != 4 {
|
||||
t.Errorf("got %v, want %v", got, 4)
|
||||
}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
if got := stack.Get(uint32(i)); got != i {
|
||||
t.Errorf("got %v, want %v", got, i)
|
||||
}
|
||||
}
|
||||
|
||||
if got := stack.Get(5); got != 0 {
|
||||
t.Errorf("got %v, want %v", got, 0)
|
||||
}
|
||||
|
||||
slice := stack.GetSlice()
|
||||
|
||||
if len(slice) != 10 {
|
||||
t.Errorf("got %v, want %v", len(slice), 10)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLenCap(t *testing.T) {
|
||||
stack := stack.NewSliceBacked[int](10)
|
||||
|
||||
if got := stack.Len(); got != 0 {
|
||||
t.Errorf("got %v, want %v", got, 0)
|
||||
}
|
||||
|
||||
if got := stack.Cap(); got != 10 {
|
||||
t.Errorf("got %v, want %v", got, 10)
|
||||
}
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
FROM debian:10-slim
|
||||
|
||||
EXPOSE 50051
|
||||
RUN apt-get update && apt-get install curl -y
|
||||
RUN curl -L -o /hub https://github.com/lbryio/hub/releases/download/v0.2021.06.14-beta/hub && chmod +x /hub
|
||||
ENTRYPOINT ["/hub", "serve", "--dev"]
|
10
docker/Dockerfile
Normal file
10
docker/Dockerfile
Normal file
|
@ -0,0 +1,10 @@
|
|||
FROM golang:alpine as stage1
|
||||
|
||||
EXPOSE 50051
|
||||
RUN mkdir /app
|
||||
WORKDIR /app
|
||||
COPY . /app
|
||||
RUN go build -o herald .
|
||||
FROM alpine:latest
|
||||
COPY --from=stage1 /app/herald /herald
|
||||
ENTRYPOINT ["/herald", "serve"]
|
11
docker/Dockerfile.action
Normal file
11
docker/Dockerfile.action
Normal file
|
@ -0,0 +1,11 @@
|
|||
FROM jeffreypicard/hub-github-env:dev
|
||||
|
||||
COPY scripts/build_and_test.sh /build_and_test.sh
|
||||
# COPY . /hub
|
||||
# WORKDIR /hub
|
||||
|
||||
ENV CGO_LDFLAGS "-L/usr/local/lib -lrocksdb -lstdc++ -lm -lz -lsnappy -llz4 -lzstd"
|
||||
ENV CGO_CFLAGS "-I/usr/local/include/rocksdb"
|
||||
ENV LD_LIBRARY_PATH /usr/local/lib
|
||||
|
||||
ENTRYPOINT ["/build_and_test.sh"]
|
13
docker/Dockerfile.action.integration
Normal file
13
docker/Dockerfile.action.integration
Normal file
|
@ -0,0 +1,13 @@
|
|||
FROM jeffreypicard/hub-github-env:dev
|
||||
|
||||
COPY scripts/integration_tests.sh /integration_tests.sh
|
||||
COPY scripts/cicd_integration_test_runner.sh /cicd_integration_test_runner.sh
|
||||
COPY herald /herald
|
||||
|
||||
RUN apt install -y jq curl
|
||||
|
||||
ENV CGO_LDFLAGS "-L/usr/local/lib -lrocksdb -lstdc++ -lm -lz -lsnappy -llz4 -lzstd"
|
||||
ENV CGO_CFLAGS "-I/usr/local/include/rocksdb"
|
||||
ENV LD_LIBRARY_PATH /usr/local/lib
|
||||
|
||||
ENTRYPOINT ["/cicd_integration_test_runner.sh"]
|
25
docker/Dockerfile.github
Normal file
25
docker/Dockerfile.github
Normal file
|
@ -0,0 +1,25 @@
|
|||
FROM golang:1.18.5-bullseye
|
||||
|
||||
RUN apt-get update -y && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y dnsutils git libsnappy-dev liblz4-dev libzstd-dev zlib1g-dev \
|
||||
autoconf automake libtool curl make g++
|
||||
RUN cd /tmp && \
|
||||
wget https://github.com/protocolbuffers/protobuf/releases/download/v3.17.1/protobuf-all-3.17.1.tar.gz && \
|
||||
tar xfzv protobuf-all-3.17.1.tar.gz && \
|
||||
cd protobuf-3.17.1 && \
|
||||
./autogen.sh && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
ldconfig && \
|
||||
rm -rf /tmp/proto*
|
||||
RUN cd /tmp && \
|
||||
git clone https://github.com/facebook/rocksdb.git && \
|
||||
cd rocksdb && \
|
||||
git checkout v6.29.5 && \
|
||||
make static_lib && \
|
||||
make install && \
|
||||
rm -rf /tmp/rocksdb
|
||||
|
||||
CMD ["bash"]
|
54
go.mod
54
go.mod
|
@ -1,18 +1,50 @@
|
|||
module github.com/lbryio/hub
|
||||
module github.com/lbryio/herald.go
|
||||
|
||||
go 1.16
|
||||
go 1.18
|
||||
|
||||
// replace github.com/lbryio/lbry.go/v3 => /home/loki/dev/lbry/lbry.go
|
||||
|
||||
require (
|
||||
github.com/ReneKroon/ttlcache/v2 v2.8.1
|
||||
github.com/akamensky/argparse v1.2.2
|
||||
github.com/btcsuite/btcutil v1.0.2
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/lbryio/lbry.go/v2 v2.7.2-0.20210625145058-2b155597bf57
|
||||
github.com/go-restruct/restruct v1.2.0-alpha
|
||||
github.com/gorilla/mux v1.7.3
|
||||
github.com/gorilla/rpc v1.2.0
|
||||
github.com/lbryio/lbcutil v1.0.202
|
||||
github.com/lbryio/lbry.go/v3 v3.0.1-beta
|
||||
github.com/linxGnu/grocksdb v1.6.42
|
||||
github.com/olivere/elastic/v7 v7.0.24
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 // indirect
|
||||
golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea // indirect
|
||||
golang.org/x/text v0.3.6
|
||||
google.golang.org/genproto v0.0.0-20210524171403-669157292da3 // indirect
|
||||
google.golang.org/grpc v1.38.0
|
||||
google.golang.org/protobuf v1.26.0
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
golang.org/x/exp v0.0.0-20220907003533-145caa8ea1d0
|
||||
golang.org/x/text v0.3.7
|
||||
google.golang.org/grpc v1.46.0
|
||||
google.golang.org/protobuf v1.27.1
|
||||
gopkg.in/karalabe/cookiejar.v1 v1.0.0-20141109175019-e1490cae028c
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/lbryio/lbcd v0.22.201-beta-rc4
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/common v0.26.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/stretchr/testify v1.7.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b // indirect
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
)
|
||||
|
|
735
go.sum
735
go.sum
|
@ -1,50 +1,217 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw=
|
||||
github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w=
|
||||
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
||||
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/ReneKroon/ttlcache/v2 v2.8.1 h1:0Exdyt5+vEsdRoFO1T7qDIYM3gq/ETbeYV+vjgcPxZk=
|
||||
github.com/ReneKroon/ttlcache/v2 v2.8.1/go.mod h1:mBxvsNY+BT8qLLd6CuAJubbKo6r0jh3nb5et22bbfGY=
|
||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
|
||||
github.com/akamensky/argparse v1.2.2 h1:P17T0ZjlUNJuWTPPJ2A5dM1wxarHgHqfYH+AZTo2xQA=
|
||||
github.com/akamensky/argparse v1.2.2/go.mod h1:S5kwC7IuDcEr5VeXtGPRVZ5o/FdhcMlQz4IZQuw64xA=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts=
|
||||
github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts=
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4=
|
||||
github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM=
|
||||
github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac=
|
||||
github.com/cockroachdb/errors v1.8.6/go.mod h1:hOm5fabihW+xEyY1kuypGwqT+Vt7rafg04ytBtIpeIQ=
|
||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
||||
github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/pebble v0.0.0-20210525181856-e45797baeb78/go.mod h1:1XpB4cLQcF189RAcWi4gUc110zJgtOfT7SVNGY8sOe0=
|
||||
github.com/cockroachdb/pebble v0.0.0-20211124004043-0dc90bc41e62/go.mod h1:buxOO9GBtOcq1DiXDpIPYrmxY020K2A8lOrwno5FetU=
|
||||
github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/redact v1.1.1/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/felixge/fgprof v0.9.1/go.mod h1:7/HK6JFtFaARhIljgP2IV8rJLIoHDoOYoUphsnGvqxE=
|
||||
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
|
||||
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/go-errors/errors v1.1.1 h1:ljK/pL5ltg3qoN+OtN6yCv9HWSfMwxSx90GJCZQxYNg=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
|
||||
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
|
||||
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-errors/errors v1.1.1/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-ini/ini v1.48.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
||||
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ozzo/ozzo-validation v3.6.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU=
|
||||
github.com/go-restruct/restruct v1.2.0-alpha h1:2Lp474S/9660+SJjpVxoKuWX09JsXHSrdV7Nv3/gkvc=
|
||||
github.com/go-restruct/restruct v1.2.0-alpha/go.mod h1:KqrpKpn4M8OLznErihXTGLlsXFGeLxHUrLRRI/1YjGk=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
|
||||
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
|
||||
github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
|
@ -56,170 +223,660 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
|||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190915194858-d3ddacdb130f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/rpc v1.2.0 h1:WvvdC2lNeT1SP32zrIce5l0ECBfbAlmrmSBsuc57wfk=
|
||||
github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
|
||||
github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
|
||||
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
|
||||
github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI=
|
||||
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
|
||||
github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
|
||||
github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
|
||||
github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk=
|
||||
github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U=
|
||||
github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw=
|
||||
github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/kkdai/bstream v1.0.0/go.mod h1:FDnDOHt5Yx4p3FaHcioFT0QjDOtgUpvjeZqAs+NVZZA=
|
||||
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lbryio/lbry.go/v2 v2.7.2-0.20210625145058-2b155597bf57 h1:Dzg3a7M9EWS48D0mbYInBjeqX/LB5qRuDgG29ktpmw8=
|
||||
github.com/lbryio/lbry.go/v2 v2.7.2-0.20210625145058-2b155597bf57/go.mod h1:I1q8W9fwU+t0IWNiprPgE1SorWQwcO6ser0nzP3L5Pk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/lbryio/lbcd v0.22.100-beta/go.mod h1:u8SaFX4xdGMMR5xasBGfgApC8pvD4rnK2OujZnrq5gs=
|
||||
github.com/lbryio/lbcd v0.22.100-beta-rc5/go.mod h1:9PbFSlHYX7WlnDQwcTxHVf1W35VAnRsattCSyKOO55g=
|
||||
github.com/lbryio/lbcd v0.22.200-beta/go.mod h1:kNuzGWf808ipTGB0y0WogzsGv5BVM4Qv85Z+JYwC9FA=
|
||||
github.com/lbryio/lbcd v0.22.201-beta-rc4 h1:Xh751Bh/GWRcP5bI6NJ2+zueo2otTcTWapFvFbryP5c=
|
||||
github.com/lbryio/lbcd v0.22.201-beta-rc4/go.mod h1:Jgo48JDINhdOgHHR83J70Q6G42x3WAo9DI//QogcL+E=
|
||||
github.com/lbryio/lbcutil v1.0.201/go.mod h1:gDHc/b+Rdz3J7+VB8e5/Bl9roVf8Q5/8FQCyuK9dXD0=
|
||||
github.com/lbryio/lbcutil v1.0.202-rc3/go.mod h1:LGPtVBBzh4cFXfLFb8ginlFcbA2QwumLNFd0yk/as2o=
|
||||
github.com/lbryio/lbcutil v1.0.202 h1:L0aRMs2bdCUAicD8Xe4NmUEvevDDea3qkIpCSACnftI=
|
||||
github.com/lbryio/lbcutil v1.0.202/go.mod h1:LGPtVBBzh4cFXfLFb8ginlFcbA2QwumLNFd0yk/as2o=
|
||||
github.com/lbryio/lbry.go/v2 v2.7.1/go.mod h1:sUhhSKqPNkiwgBqvBzJIqfLLzGH8hkDGrrO/HcaXzFc=
|
||||
github.com/lbryio/lbry.go/v3 v3.0.1-beta h1:oIpQ5czhtdVSoWZCiOHE9SrqnNsahyCnMhXvXsd2IiM=
|
||||
github.com/lbryio/lbry.go/v3 v3.0.1-beta/go.mod h1:v03OVXSBGNZNDfGoAVyjQV/ZOzBGQyTnWs3jpkssxGM=
|
||||
github.com/lbryio/ozzo-validation v0.0.0-20170323141101-d1008ad1fd04/go.mod h1:fbG/dzobG8r95KzMwckXiLMHfFjZaBRQqC9hPs2XAQ4=
|
||||
github.com/lbryio/types v0.0.0-20201019032447-f0b4476ef386/go.mod h1:CG3wsDv5BiVYQd5i1Jp7wGsaVyjZTJshqXeWMVKsISE=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/linxGnu/grocksdb v1.6.42 h1:nJLoXFuzwBwQQQrXTUgRGRz1QRm7y8pR6CNV/gwrbqs=
|
||||
github.com/linxGnu/grocksdb v1.6.42/go.mod h1:JcMMDBFaDNhRXFYcYXmgQwb/RarSld1PulTI7UzE+w0=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5/go.mod h1:H0aPCWffGOaDcjkw1iB7W9DVLp6GXmfcJY/7YZCWPA4=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
|
||||
github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
|
||||
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/nlopes/slack v0.6.0 h1:jt0jxVQGhssx1Ib7naAOZEZcGdtIhTzkP0nopK0AsRA=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
||||
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
||||
github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM=
|
||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||
github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/nlopes/slack v0.6.0/go.mod h1:JzQ9m3PMAqcpeCam7UaHSuBuupz7CmpjehYMayT6YOk=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/olivere/elastic/v7 v7.0.24 h1:9ZcCQP3Pvgese7TaypYiVAL49sCEphyIwkVxtRf8jb8=
|
||||
github.com/olivere/elastic/v7 v7.0.24/go.mod h1:OuWmD2DiuYhddWegBKPWQuelVKBLrW0fa/VUYgxuGTY=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sebdah/goldie v0.0.0-20190531093107-d313ffb52c77/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shirou/gopsutil/v3 v3.21.7/go.mod h1:RGl11Y7XMTQPmHh8F0ayC6haKNBgH4PXMJuTAcMOlz4=
|
||||
github.com/shopspring/decimal v0.0.0-20191009025716-f1972eb1d1f5/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||
github.com/smartystreets/assertions v1.1.1/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
|
||||
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/gunit v1.4.2/go.mod h1:ZjM1ozSIMJlAz/ay4SG8PeKF00ckUp+zMHZXV9/bvak=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.7/go.mod h1:JZIdXh4RmBvZDBZ41ld2bGxRV3n4daiiqA3skYhAoQ4=
|
||||
github.com/tklauser/numcpus v0.2.3/go.mod h1:vpEPS/JC+oZGGQ/My/vJnNsvMDQL6PwOqt8dsCw5j+E=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.2/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
|
||||
github.com/ybbus/jsonrpc v0.0.0-20180411222309-2a548b7d822d/go.mod h1:XJrh1eMSzdIYFbM08flv0wp5G35eRniyeGut1z+LSiE=
|
||||
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
|
||||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
|
||||
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
|
||||
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
|
||||
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
|
||||
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
|
||||
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b h1:QAqMVf3pSa6eeTsuklijukjXBlj7Es2QQplab+/RbQ4=
|
||||
golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
|
||||
golang.org/x/exp v0.0.0-20211123021643-48cbe7f80d7c/go.mod h1:b9TAUYHmRtqA6klRHApnXMnj+OyLce4yF5cZCUbk2ps=
|
||||
golang.org/x/exp v0.0.0-20220907003533-145caa8ea1d0 h1:17k44ji3KFYG94XS5QEFC8pyuOlMh3IoR+vkmTZmJJs=
|
||||
golang.org/x/exp v0.0.0-20220907003533-145caa8ea1d0/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191009170203-06d7bd2c5f4f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea h1:+WiDlPBBaO+h9vPNZi8uJ3k4BkKQB7Iow3aqwHVA5hI=
|
||||
golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210909193231-528a39cd75f3/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211123173158-ef496fb156ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20210524171403-669157292da3 h1:xFyh6GBb+NO1L0xqb978I3sBPQpk6FrKO0jJGRvdj/0=
|
||||
google.golang.org/genproto v0.0.0-20210524171403-669157292da3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 h1:R1r5J0u6Cx+RNl/6mezTw6oA14cmKC96FeUwL6A9bd4=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -230,23 +887,49 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
|||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
|
||||
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
|
||||
gopkg.in/ini.v1 v1.48.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/karalabe/cookiejar.v1 v1.0.0-20141109175019-e1490cae028c h1:4GYkPhjcYLPrPAnoxHVQlH/xcXtWN8pEgqBnHrPAs8c=
|
||||
gopkg.in/karalabe/cookiejar.v1 v1.0.0-20141109175019-e1490cae028c/go.mod h1:xd7qpr5uPMNy4hsRJ5JEBXA8tJjTFmUI1soCjlCIgAE=
|
||||
gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79 h1:FpCr9V8wuOei4BAen+93HtVJ+XSi+KPbaPKm0Vj5R64=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79/go.mod h1:gWkaRU7CoXpezCBWfWjm3999QqS+1pYPXGbqQCTMzo8=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
|
|
41
internal/metrics/metrics.go
Normal file
41
internal/metrics/metrics.go
Normal file
|
@ -0,0 +1,41 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
HistogramBuckets = []float64{0.005, 0.025, 0.05, 0.1, 0.25, 0.4, 1, 2, 5, 10, 20, 60, 120, 300}
|
||||
// These mirror counters from the python code
|
||||
RequestsCount = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "requests_count",
|
||||
Help: "Total number of searches",
|
||||
}, []string{"method"})
|
||||
// These are unique to the go code
|
||||
ErrorsCounter = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "errors",
|
||||
Help: "Number of errors by type",
|
||||
}, []string{"error_type"})
|
||||
QueryTime = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: "query_time",
|
||||
Help: "Histogram of query times",
|
||||
Buckets: HistogramBuckets,
|
||||
}, []string{"method"})
|
||||
PeersKnown = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "peers_known",
|
||||
Help: "Number of peers we know about.",
|
||||
})
|
||||
PeersSubscribed = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "peers_subbed",
|
||||
Help: "Number of peers that are subscribed to us.",
|
||||
})
|
||||
BlockCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "block_count",
|
||||
Help: "Number of blocks we have processed.",
|
||||
})
|
||||
ReorgCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "reorg_count",
|
||||
Help: "Number of blockchain reorgs we have done.",
|
||||
})
|
||||
)
|
14
internal/search.go
Normal file
14
internal/search.go
Normal file
|
@ -0,0 +1,14 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
// BisectRight returns the index of the first element in the list that is greater than or equal to the value.
|
||||
// https://stackoverflow.com/questions/29959506/is-there-a-go-analog-of-pythons-bisect-module
|
||||
func BisectRight[T constraints.Ordered](arr []T, val T) uint32 {
|
||||
i := sort.Search(len(arr), func(i int) bool { return arr[i] >= val })
|
||||
return uint32(i)
|
||||
}
|
54
internal/strings.go
Normal file
54
internal/strings.go
Normal file
|
@ -0,0 +1,54 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/cases"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
func StringSplitArg(stringToSplit, separator string) []interface{} {
|
||||
split := strings.Split(stringToSplit, separator)
|
||||
splitInterface := make([]interface{}, len(split))
|
||||
for i, s := range split {
|
||||
splitInterface[i] = s
|
||||
}
|
||||
return splitInterface
|
||||
}
|
||||
|
||||
// NormalizeName Normalize names to remove weird characters and account to capitalization
|
||||
func NormalizeName(s string) string {
|
||||
c := cases.Fold()
|
||||
return c.String(norm.NFD.String(s))
|
||||
}
|
||||
|
||||
// ReverseBytesInPlace reverse the bytes. thanks, Satoshi 😒
|
||||
func ReverseBytesInPlace(s []byte) {
|
||||
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
}
|
||||
|
||||
// TxIdToTxHash convert the txid to a hash for returning from the hub
|
||||
func TxIdToTxHash(txid string) []byte {
|
||||
t, err := hex.DecodeString(txid)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ReverseBytesInPlace(t)
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// TxHashToTxId convert the txHash from the response format back to an id
|
||||
func TxHashToTxId(txHash []byte) string {
|
||||
t := make([]byte, len(txHash))
|
||||
copy(t, txHash)
|
||||
|
||||
ReverseBytesInPlace(t)
|
||||
|
||||
return hex.EncodeToString(t)
|
||||
|
||||
}
|
10
internal/types.go
Normal file
10
internal/types.go
Normal file
|
@ -0,0 +1,10 @@
|
|||
package internal
|
||||
|
||||
// internal types that need their own file to avoid circular imports.
|
||||
|
||||
// HeightHash struct for the height subscription endpoint.
|
||||
type HeightHash struct {
|
||||
Height uint64
|
||||
BlockHash []byte
|
||||
BlockHeader []byte
|
||||
}
|
196
main.go
196
main.go
|
@ -3,165 +3,48 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/akamensky/argparse"
|
||||
pb "github.com/lbryio/hub/protobuf/go"
|
||||
"github.com/lbryio/hub/server"
|
||||
"github.com/lbryio/lbry.go/v2/extras/util"
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/lbryio/herald.go/internal"
|
||||
pb "github.com/lbryio/herald.go/protobuf/go"
|
||||
"github.com/lbryio/herald.go/server"
|
||||
"github.com/lbryio/lbry.go/v3/extras/stop"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/reflection"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHost = "0.0.0.0"
|
||||
defaultPort = "50051"
|
||||
defaultEsHost = "http://localhost"
|
||||
defaultEsPort = "9200"
|
||||
)
|
||||
|
||||
|
||||
func GetEnvironment(data []string, getkeyval func(item string) (key, val string)) map[string]string {
|
||||
items := make(map[string]string)
|
||||
for _, item := range data {
|
||||
key, val := getkeyval(item)
|
||||
items[key] = val
|
||||
}
|
||||
return items
|
||||
}
|
||||
|
||||
func GetEnvironmentStandard() map[string]string {
|
||||
return GetEnvironment(os.Environ(), func(item string) (key, val string) {
|
||||
splits := strings.Split(item, "=")
|
||||
key = splits[0]
|
||||
val = splits[1]
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func parseArgs(searchRequest *pb.SearchRequest) *server.Args {
|
||||
|
||||
environment := GetEnvironmentStandard()
|
||||
parser := argparse.NewParser("hub", "hub server and client")
|
||||
|
||||
serveCmd := parser.NewCommand("serve", "start the hub server")
|
||||
|
||||
host := parser.String("", "rpchost", &argparse.Options{Required: false, Help: "host", Default: defaultHost})
|
||||
port := parser.String("", "rpcport", &argparse.Options{Required: false, Help: "port", Default: defaultPort})
|
||||
esHost := parser.String("", "eshost", &argparse.Options{Required: false, Help: "host", Default: defaultEsHost})
|
||||
esPort := parser.String("", "esport", &argparse.Options{Required: false, Help: "port", Default: defaultEsPort})
|
||||
dev := parser.Flag("", "dev", &argparse.Options{Required: false, Help: "port", Default: false})
|
||||
|
||||
text := parser.String("", "text", &argparse.Options{Required: false, Help: "text query"})
|
||||
name := parser.String("", "name", &argparse.Options{Required: false, Help: "name"})
|
||||
claimType := parser.String("", "claim_type", &argparse.Options{Required: false, Help: "claim_type"})
|
||||
id := parser.String("", "id", &argparse.Options{Required: false, Help: "id"})
|
||||
author := parser.String("", "author", &argparse.Options{Required: false, Help: "author"})
|
||||
title := parser.String("", "title", &argparse.Options{Required: false, Help: "title"})
|
||||
description := parser.String("", "description", &argparse.Options{Required: false, Help: "description"})
|
||||
channelId := parser.String("", "channel_id", &argparse.Options{Required: false, Help: "channel id"})
|
||||
channelIds := parser.StringList("", "channel_ids", &argparse.Options{Required: false, Help: "channel ids"})
|
||||
|
||||
// Now parse the arguments
|
||||
err := parser.Parse(os.Args)
|
||||
if err != nil {
|
||||
log.Fatalln(parser.Usage(err))
|
||||
}
|
||||
|
||||
|
||||
args := &server.Args{
|
||||
Serve: false,
|
||||
Host: *host,
|
||||
Port: ":" + *port,
|
||||
EsHost: *esHost,
|
||||
EsPort: *esPort,
|
||||
Dev: *dev,
|
||||
}
|
||||
|
||||
if esHost, ok := environment["ELASTIC_HOST"]; ok {
|
||||
args.EsHost = esHost
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(args.EsHost, "http") {
|
||||
args.EsHost = "http://" + args.EsHost
|
||||
}
|
||||
|
||||
if esPort, ok := environment["ELASTIC_PORT"]; ok {
|
||||
args.EsPort = esPort
|
||||
}
|
||||
|
||||
/*
|
||||
Verify no invalid argument combinations
|
||||
*/
|
||||
if len(*channelIds) > 0 && *channelId != "" {
|
||||
log.Fatal("Cannot specify both channel_id and channel_ids")
|
||||
}
|
||||
|
||||
if serveCmd.Happened() {
|
||||
args.Serve = true
|
||||
}
|
||||
|
||||
if *text != "" {
|
||||
searchRequest.Text = *text
|
||||
}
|
||||
if *name!= "" {
|
||||
searchRequest.Name = []string{*name}
|
||||
}
|
||||
if *claimType != "" {
|
||||
searchRequest.ClaimType = []string{*claimType}
|
||||
}
|
||||
if *id != "" {
|
||||
searchRequest.XId = [][]byte{[]byte(*id)}
|
||||
}
|
||||
if *author != "" {
|
||||
searchRequest.Author = []string{*author}
|
||||
}
|
||||
if *title != "" {
|
||||
searchRequest.Title = []string{*title}
|
||||
}
|
||||
if *description != "" {
|
||||
searchRequest.Description = []string{*description}
|
||||
}
|
||||
if *channelId != "" {
|
||||
searchRequest.ChannelId = &pb.InvertibleField{Invert: false, Value: []string{*channelId}}
|
||||
}
|
||||
if len(*channelIds) > 0 {
|
||||
searchRequest.ChannelId = &pb.InvertibleField{Invert: false, Value: *channelIds}
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
log.SetFormatter(&log.TextFormatter{
|
||||
FullTimestamp: true,
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
searchRequest := &pb.SearchRequest{}
|
||||
|
||||
args := parseArgs(searchRequest)
|
||||
args := server.ParseArgs(searchRequest)
|
||||
|
||||
if args.Serve {
|
||||
if args.CmdType == server.ServeCmd {
|
||||
// This will cancel goroutines with the server finishes.
|
||||
stopGroup := stop.New()
|
||||
|
||||
l, err := net.Listen("tcp", args.Port)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to listen: %v", err)
|
||||
}
|
||||
initsignals()
|
||||
interrupt := interruptListener()
|
||||
|
||||
s := server.MakeHubServer(args)
|
||||
pb.RegisterHubServer(s.GrpcServer, s)
|
||||
reflection.Register(s.GrpcServer)
|
||||
s := server.MakeHubServer(stopGroup, args)
|
||||
go s.Run()
|
||||
|
||||
log.Printf("listening on %s\n", l.Addr().String())
|
||||
log.Println(s.Args)
|
||||
if err := s.GrpcServer.Serve(l); err != nil {
|
||||
log.Fatalf("failed to serve: %v", err)
|
||||
}
|
||||
defer s.Stop()
|
||||
|
||||
<-interrupt
|
||||
return
|
||||
}
|
||||
|
||||
conn, err := grpc.Dial("localhost"+args.Port,
|
||||
grpc.WithInsecure(),
|
||||
conn, err := grpc.Dial("localhost:"+fmt.Sprintf("%d", args.Port),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithBlock(),
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -171,18 +54,23 @@ func main() {
|
|||
|
||||
c := pb.NewHubClient(conn)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
ctxWTimeout, cancelQuery := context.WithTimeout(ctx, time.Second)
|
||||
defer cancelQuery()
|
||||
|
||||
log.Println(args)
|
||||
switch args.CmdType {
|
||||
case server.SearchCmd:
|
||||
r, err := c.Search(ctxWTimeout, searchRequest)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
r, err := c.Search(ctx, searchRequest)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("found %d results\n", r.GetTotal())
|
||||
|
||||
log.Printf("found %d results\n", r.GetTotal())
|
||||
|
||||
for _, t := range r.Txos {
|
||||
fmt.Printf("%s:%d\n", util.TxHashToTxId(t.TxHash), t.Nout)
|
||||
for _, t := range r.Txos {
|
||||
fmt.Printf("%s:%d\n", internal.TxHashToTxId(t.TxHash), t.Nout)
|
||||
}
|
||||
default:
|
||||
log.Fatalln("Unknown Command Type!")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ MIN_VERSION="3.0"
|
|||
version_gte "$VERSION" "$MIN_VERSION" || { echo >&2 "error: protoc version must be >= $MIN_VERSION (your $PROTOC is $VERSION)"; exit 1; }
|
||||
|
||||
|
||||
hash protoc-gen-go-grpc 2>/dev/null || go get github.com/golang/protobuf/protoc-gen-go google.golang.org/grpc/cmd/protoc-gen-go-grpc
|
||||
hash protoc-gen-go-grpc 2>/dev/null || go install google.golang.org/protobuf/cmd/protoc-gen-go google.golang.org/grpc/cmd/protoc-gen-go-grpc
|
||||
hash protoc-gen-go-grpc 2>/dev/null || { echo >&2 'error: Make sure $GOPATH/bin is in your $PATH'; exit 1; }
|
||||
|
||||
|
||||
|
|
1051
protobuf/definitions/claim.proto
Normal file
1051
protobuf/definitions/claim.proto
Normal file
File diff suppressed because it is too large
Load diff
|
@ -1,13 +1,35 @@
|
|||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/lbryio/hub/protobuf/go/pb";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
option go_package = "github.com/lbryio/herald.go/protobuf/go/pb";
|
||||
import "result.proto";
|
||||
|
||||
package pb;
|
||||
|
||||
service Hub {
|
||||
rpc Search (SearchRequest) returns (Outputs) {}
|
||||
rpc Search(SearchRequest) returns (Outputs) {}
|
||||
rpc Ping(EmptyMessage) returns (StringValue) {}
|
||||
rpc Hello(HelloMessage) returns (HelloMessage) {}
|
||||
rpc AddPeer(ServerMessage) returns (StringValue) {}
|
||||
rpc PeerSubscribe(ServerMessage) returns (StringValue) {}
|
||||
rpc Version(EmptyMessage) returns (StringValue) {}
|
||||
rpc Features(EmptyMessage) returns (StringValue) {}
|
||||
rpc Broadcast(EmptyMessage) returns (UInt32Value) {}
|
||||
rpc Height(EmptyMessage) returns (UInt32Value) {}
|
||||
rpc HeightSubscribe(UInt32Value) returns (stream UInt32Value) {}
|
||||
rpc Resolve(StringArray) returns (Outputs) {}
|
||||
}
|
||||
|
||||
message EmptyMessage {}
|
||||
|
||||
message ServerMessage {
|
||||
string address = 1;
|
||||
string port = 2;
|
||||
}
|
||||
|
||||
message HelloMessage {
|
||||
string port = 1;
|
||||
string host = 2;
|
||||
repeated ServerMessage servers = 3;
|
||||
}
|
||||
|
||||
message InvertibleField {
|
||||
|
@ -15,6 +37,22 @@ message InvertibleField {
|
|||
repeated string value = 2;
|
||||
}
|
||||
|
||||
message StringValue {
|
||||
string value = 1;
|
||||
}
|
||||
|
||||
message StringArray {
|
||||
repeated string value = 1;
|
||||
}
|
||||
|
||||
message BoolValue {
|
||||
bool value = 1;
|
||||
}
|
||||
|
||||
message UInt32Value {
|
||||
uint32 value = 1;
|
||||
}
|
||||
|
||||
message RangeField {
|
||||
enum Op {
|
||||
EQ = 0;
|
||||
|
@ -24,73 +62,63 @@ message RangeField {
|
|||
GT = 4;
|
||||
}
|
||||
Op op = 1;
|
||||
repeated string value = 2;
|
||||
repeated int32 value = 2;
|
||||
}
|
||||
|
||||
message SearchRequest {
|
||||
string text = 1;
|
||||
repeated string name = 2;
|
||||
.google.protobuf.Int32Value amount_order = 3;
|
||||
.google.protobuf.Int32Value limit = 4;
|
||||
InvertibleField claim_id = 1;
|
||||
InvertibleField channel_id = 2;
|
||||
string text = 3;
|
||||
int32 limit = 4;
|
||||
repeated string order_by = 5;
|
||||
.google.protobuf.Int32Value offset = 6;
|
||||
.google.protobuf.BoolValue is_controlling = 7;
|
||||
string last_take_over_height = 19;
|
||||
InvertibleField claim_id = 20;
|
||||
repeated string claim_name = 22;
|
||||
repeated string normalized = 23;
|
||||
RangeField tx_position = 24;
|
||||
RangeField amount = 25;
|
||||
RangeField timestamp = 26;
|
||||
RangeField creation_timestamp = 27;
|
||||
RangeField height = 28;
|
||||
RangeField creation_height = 29;
|
||||
RangeField activation_height = 30;
|
||||
RangeField expiration_height = 31;
|
||||
RangeField release_time = 32;
|
||||
repeated string short_url = 33;
|
||||
repeated string canonical_url = 34;
|
||||
repeated string title = 35;
|
||||
repeated string author = 36;
|
||||
repeated string description = 37;
|
||||
repeated string claim_type = 38;
|
||||
RangeField reposted = 39;
|
||||
repeated string stream_type = 40;
|
||||
repeated string media_type = 41;
|
||||
RangeField fee_amount = 42;
|
||||
repeated string fee_currency = 43;
|
||||
RangeField duration = 44;
|
||||
string reposted_claim_hash = 45;
|
||||
RangeField censor_type = 46;
|
||||
string claims_in_channel = 47;
|
||||
RangeField channel_join = 48;
|
||||
.google.protobuf.BoolValue signature_valid = 49;
|
||||
RangeField effective_amount = 51;
|
||||
RangeField support_amount = 52;
|
||||
RangeField trending_group = 53;
|
||||
RangeField trending_mixed = 54;
|
||||
RangeField trending_local = 55;
|
||||
RangeField trending_global = 56;
|
||||
InvertibleField channel_id = 57;
|
||||
InvertibleField channel_ids = 58;
|
||||
repeated string tx_id = 59;
|
||||
.google.protobuf.Int32Value tx_nout = 60;
|
||||
repeated string signature = 61;
|
||||
repeated string signature_digest = 62;
|
||||
repeated string public_key_bytes = 63;
|
||||
repeated string public_key_hash = 64;
|
||||
string public_key_id = 65;
|
||||
repeated bytes _id = 66;
|
||||
repeated string any_tags = 67;
|
||||
repeated string all_tags = 68;
|
||||
repeated string not_tags = 69;
|
||||
repeated string reposted_claim_id = 70;
|
||||
.google.protobuf.BoolValue has_channel_signature = 71;
|
||||
.google.protobuf.BoolValue has_source = 72;
|
||||
.google.protobuf.Int32Value limit_claims_per_channel = 73;
|
||||
repeated string any_languages = 74;
|
||||
repeated string all_languages = 75;
|
||||
.google.protobuf.BoolValue remove_duplicates = 76;
|
||||
.google.protobuf.BoolValue no_totals = 77;
|
||||
repeated string search_indices = 78;
|
||||
}
|
||||
uint32 offset = 6;
|
||||
bool is_controlling = 7;
|
||||
string last_take_over_height = 8;
|
||||
string claim_name = 9;
|
||||
string normalized_name = 10;
|
||||
repeated RangeField tx_position = 11;
|
||||
repeated RangeField amount = 12;
|
||||
repeated RangeField timestamp = 13;
|
||||
repeated RangeField creation_timestamp = 14;
|
||||
repeated RangeField height = 15;
|
||||
repeated RangeField creation_height = 16;
|
||||
repeated RangeField activation_height = 17;
|
||||
repeated RangeField expiration_height = 18;
|
||||
repeated RangeField release_time = 19;
|
||||
string short_url = 20;
|
||||
string canonical_url = 21;
|
||||
string title = 22;
|
||||
string author = 23;
|
||||
string description = 24;
|
||||
repeated string claim_type = 25;
|
||||
repeated RangeField repost_count = 26;
|
||||
repeated string stream_type = 27;
|
||||
repeated string media_type = 28;
|
||||
repeated RangeField fee_amount = 29;
|
||||
string fee_currency = 30;
|
||||
repeated RangeField duration = 31;
|
||||
string reposted_claim_id = 32;
|
||||
repeated RangeField censor_type = 33;
|
||||
string claims_in_channel = 34;
|
||||
BoolValue is_signature_valid = 36;
|
||||
repeated RangeField effective_amount = 37;
|
||||
repeated RangeField support_amount = 38;
|
||||
repeated RangeField trending_score = 39;
|
||||
string tx_id = 43;
|
||||
UInt32Value tx_nout = 44;
|
||||
string signature = 45;
|
||||
string signature_digest = 46;
|
||||
string public_key_bytes = 47;
|
||||
string public_key_id = 48;
|
||||
repeated string any_tags = 49;
|
||||
repeated string all_tags = 50;
|
||||
repeated string not_tags = 51;
|
||||
bool has_channel_signature = 52;
|
||||
BoolValue has_source = 53;
|
||||
int32 limit_claims_per_channel = 54;
|
||||
repeated string any_languages = 55;
|
||||
repeated string all_languages = 56;
|
||||
bool remove_duplicates = 57;
|
||||
bool no_totals = 58;
|
||||
string sd_hash = 59;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/lbryio/hub/protobuf/go/pb";
|
||||
option go_package = "github.com/lbryio/herald.go/protobuf/go/pb";
|
||||
|
||||
package pb;
|
||||
|
||||
|
@ -35,13 +35,9 @@ message ClaimMeta {
|
|||
uint32 expiration_height = 9;
|
||||
uint32 claims_in_channel = 10;
|
||||
uint32 reposted = 11;
|
||||
|
||||
uint64 effective_amount = 20;
|
||||
uint64 support_amount = 21;
|
||||
uint32 trending_group = 22;
|
||||
float trending_mixed = 23;
|
||||
float trending_local = 24;
|
||||
float trending_global = 25;
|
||||
double trending_score = 22;
|
||||
}
|
||||
|
||||
message Error {
|
||||
|
@ -59,4 +55,4 @@ message Error {
|
|||
message Blocked {
|
||||
uint32 count = 1;
|
||||
Output channel = 2;
|
||||
}
|
||||
}
|
5031
protobuf/go/claim.pb.go
Normal file
5031
protobuf/go/claim.pb.go
Normal file
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -1,4 +1,8 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.17.1
|
||||
// source: hub.proto
|
||||
|
||||
package pb
|
||||
|
||||
|
@ -19,6 +23,16 @@ const _ = grpc.SupportPackageIsVersion7
|
|||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type HubClient interface {
|
||||
Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*Outputs, error)
|
||||
Ping(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*StringValue, error)
|
||||
Hello(ctx context.Context, in *HelloMessage, opts ...grpc.CallOption) (*HelloMessage, error)
|
||||
AddPeer(ctx context.Context, in *ServerMessage, opts ...grpc.CallOption) (*StringValue, error)
|
||||
PeerSubscribe(ctx context.Context, in *ServerMessage, opts ...grpc.CallOption) (*StringValue, error)
|
||||
Version(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*StringValue, error)
|
||||
Features(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*StringValue, error)
|
||||
Broadcast(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*UInt32Value, error)
|
||||
Height(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*UInt32Value, error)
|
||||
HeightSubscribe(ctx context.Context, in *UInt32Value, opts ...grpc.CallOption) (Hub_HeightSubscribeClient, error)
|
||||
Resolve(ctx context.Context, in *StringArray, opts ...grpc.CallOption) (*Outputs, error)
|
||||
}
|
||||
|
||||
type hubClient struct {
|
||||
|
@ -38,11 +52,134 @@ func (c *hubClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *hubClient) Ping(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*StringValue, error) {
|
||||
out := new(StringValue)
|
||||
err := c.cc.Invoke(ctx, "/pb.Hub/Ping", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *hubClient) Hello(ctx context.Context, in *HelloMessage, opts ...grpc.CallOption) (*HelloMessage, error) {
|
||||
out := new(HelloMessage)
|
||||
err := c.cc.Invoke(ctx, "/pb.Hub/Hello", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *hubClient) AddPeer(ctx context.Context, in *ServerMessage, opts ...grpc.CallOption) (*StringValue, error) {
|
||||
out := new(StringValue)
|
||||
err := c.cc.Invoke(ctx, "/pb.Hub/AddPeer", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *hubClient) PeerSubscribe(ctx context.Context, in *ServerMessage, opts ...grpc.CallOption) (*StringValue, error) {
|
||||
out := new(StringValue)
|
||||
err := c.cc.Invoke(ctx, "/pb.Hub/PeerSubscribe", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *hubClient) Version(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*StringValue, error) {
|
||||
out := new(StringValue)
|
||||
err := c.cc.Invoke(ctx, "/pb.Hub/Version", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *hubClient) Features(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*StringValue, error) {
|
||||
out := new(StringValue)
|
||||
err := c.cc.Invoke(ctx, "/pb.Hub/Features", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *hubClient) Broadcast(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*UInt32Value, error) {
|
||||
out := new(UInt32Value)
|
||||
err := c.cc.Invoke(ctx, "/pb.Hub/Broadcast", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *hubClient) Height(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*UInt32Value, error) {
|
||||
out := new(UInt32Value)
|
||||
err := c.cc.Invoke(ctx, "/pb.Hub/Height", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *hubClient) HeightSubscribe(ctx context.Context, in *UInt32Value, opts ...grpc.CallOption) (Hub_HeightSubscribeClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &Hub_ServiceDesc.Streams[0], "/pb.Hub/HeightSubscribe", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &hubHeightSubscribeClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type Hub_HeightSubscribeClient interface {
|
||||
Recv() (*UInt32Value, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type hubHeightSubscribeClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *hubHeightSubscribeClient) Recv() (*UInt32Value, error) {
|
||||
m := new(UInt32Value)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *hubClient) Resolve(ctx context.Context, in *StringArray, opts ...grpc.CallOption) (*Outputs, error) {
|
||||
out := new(Outputs)
|
||||
err := c.cc.Invoke(ctx, "/pb.Hub/Resolve", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// HubServer is the server API for Hub service.
|
||||
// All implementations must embed UnimplementedHubServer
|
||||
// for forward compatibility
|
||||
type HubServer interface {
|
||||
Search(context.Context, *SearchRequest) (*Outputs, error)
|
||||
Ping(context.Context, *EmptyMessage) (*StringValue, error)
|
||||
Hello(context.Context, *HelloMessage) (*HelloMessage, error)
|
||||
AddPeer(context.Context, *ServerMessage) (*StringValue, error)
|
||||
PeerSubscribe(context.Context, *ServerMessage) (*StringValue, error)
|
||||
Version(context.Context, *EmptyMessage) (*StringValue, error)
|
||||
Features(context.Context, *EmptyMessage) (*StringValue, error)
|
||||
Broadcast(context.Context, *EmptyMessage) (*UInt32Value, error)
|
||||
Height(context.Context, *EmptyMessage) (*UInt32Value, error)
|
||||
HeightSubscribe(*UInt32Value, Hub_HeightSubscribeServer) error
|
||||
Resolve(context.Context, *StringArray) (*Outputs, error)
|
||||
mustEmbedUnimplementedHubServer()
|
||||
}
|
||||
|
||||
|
@ -53,6 +190,36 @@ type UnimplementedHubServer struct {
|
|||
func (UnimplementedHubServer) Search(context.Context, *SearchRequest) (*Outputs, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Search not implemented")
|
||||
}
|
||||
func (UnimplementedHubServer) Ping(context.Context, *EmptyMessage) (*StringValue, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented")
|
||||
}
|
||||
func (UnimplementedHubServer) Hello(context.Context, *HelloMessage) (*HelloMessage, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Hello not implemented")
|
||||
}
|
||||
func (UnimplementedHubServer) AddPeer(context.Context, *ServerMessage) (*StringValue, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method AddPeer not implemented")
|
||||
}
|
||||
func (UnimplementedHubServer) PeerSubscribe(context.Context, *ServerMessage) (*StringValue, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method PeerSubscribe not implemented")
|
||||
}
|
||||
func (UnimplementedHubServer) Version(context.Context, *EmptyMessage) (*StringValue, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Version not implemented")
|
||||
}
|
||||
func (UnimplementedHubServer) Features(context.Context, *EmptyMessage) (*StringValue, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Features not implemented")
|
||||
}
|
||||
func (UnimplementedHubServer) Broadcast(context.Context, *EmptyMessage) (*UInt32Value, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Broadcast not implemented")
|
||||
}
|
||||
func (UnimplementedHubServer) Height(context.Context, *EmptyMessage) (*UInt32Value, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Height not implemented")
|
||||
}
|
||||
func (UnimplementedHubServer) HeightSubscribe(*UInt32Value, Hub_HeightSubscribeServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method HeightSubscribe not implemented")
|
||||
}
|
||||
func (UnimplementedHubServer) Resolve(context.Context, *StringArray) (*Outputs, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Resolve not implemented")
|
||||
}
|
||||
func (UnimplementedHubServer) mustEmbedUnimplementedHubServer() {}
|
||||
|
||||
// UnsafeHubServer may be embedded to opt out of forward compatibility for this service.
|
||||
|
@ -84,6 +251,189 @@ func _Hub_Search_Handler(srv interface{}, ctx context.Context, dec func(interfac
|
|||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Hub_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(EmptyMessage)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HubServer).Ping(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pb.Hub/Ping",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HubServer).Ping(ctx, req.(*EmptyMessage))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Hub_Hello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(HelloMessage)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HubServer).Hello(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pb.Hub/Hello",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HubServer).Hello(ctx, req.(*HelloMessage))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Hub_AddPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ServerMessage)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HubServer).AddPeer(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pb.Hub/AddPeer",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HubServer).AddPeer(ctx, req.(*ServerMessage))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Hub_PeerSubscribe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ServerMessage)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HubServer).PeerSubscribe(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pb.Hub/PeerSubscribe",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HubServer).PeerSubscribe(ctx, req.(*ServerMessage))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Hub_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(EmptyMessage)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HubServer).Version(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pb.Hub/Version",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HubServer).Version(ctx, req.(*EmptyMessage))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Hub_Features_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(EmptyMessage)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HubServer).Features(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pb.Hub/Features",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HubServer).Features(ctx, req.(*EmptyMessage))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Hub_Broadcast_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(EmptyMessage)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HubServer).Broadcast(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pb.Hub/Broadcast",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HubServer).Broadcast(ctx, req.(*EmptyMessage))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Hub_Height_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(EmptyMessage)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HubServer).Height(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pb.Hub/Height",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HubServer).Height(ctx, req.(*EmptyMessage))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Hub_HeightSubscribe_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(UInt32Value)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(HubServer).HeightSubscribe(m, &hubHeightSubscribeServer{stream})
|
||||
}
|
||||
|
||||
type Hub_HeightSubscribeServer interface {
|
||||
Send(*UInt32Value) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type hubHeightSubscribeServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *hubHeightSubscribeServer) Send(m *UInt32Value) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func _Hub_Resolve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(StringArray)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HubServer).Resolve(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pb.Hub/Resolve",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HubServer).Resolve(ctx, req.(*StringArray))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// Hub_ServiceDesc is the grpc.ServiceDesc for Hub service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
|
@ -95,7 +445,49 @@ var Hub_ServiceDesc = grpc.ServiceDesc{
|
|||
MethodName: "Search",
|
||||
Handler: _Hub_Search_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Ping",
|
||||
Handler: _Hub_Ping_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Hello",
|
||||
Handler: _Hub_Hello_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "AddPeer",
|
||||
Handler: _Hub_AddPeer_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "PeerSubscribe",
|
||||
Handler: _Hub_PeerSubscribe_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Version",
|
||||
Handler: _Hub_Version_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Features",
|
||||
Handler: _Hub_Features_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Broadcast",
|
||||
Handler: _Hub_Broadcast_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Height",
|
||||
Handler: _Hub_Height_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Resolve",
|
||||
Handler: _Hub_Resolve_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "HeightSubscribe",
|
||||
Handler: _Hub_HeightSubscribe_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "hub.proto",
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc v3.17.1
|
||||
// source: result.proto
|
||||
|
||||
|
@ -281,10 +281,7 @@ type ClaimMeta struct {
|
|||
Reposted uint32 `protobuf:"varint,11,opt,name=reposted,proto3" json:"reposted"`
|
||||
EffectiveAmount uint64 `protobuf:"varint,20,opt,name=effective_amount,json=effectiveAmount,proto3" json:"effective_amount"`
|
||||
SupportAmount uint64 `protobuf:"varint,21,opt,name=support_amount,json=supportAmount,proto3" json:"support_amount"`
|
||||
TrendingGroup uint32 `protobuf:"varint,22,opt,name=trending_group,json=trendingGroup,proto3" json:"trending_group"`
|
||||
TrendingMixed float32 `protobuf:"fixed32,23,opt,name=trending_mixed,json=trendingMixed,proto3" json:"trending_mixed"`
|
||||
TrendingLocal float32 `protobuf:"fixed32,24,opt,name=trending_local,json=trendingLocal,proto3" json:"trending_local"`
|
||||
TrendingGlobal float32 `protobuf:"fixed32,25,opt,name=trending_global,json=trendingGlobal,proto3" json:"trending_global"`
|
||||
TrendingScore float64 `protobuf:"fixed64,22,opt,name=trending_score,json=trendingScore,proto3" json:"trending_score"`
|
||||
}
|
||||
|
||||
func (x *ClaimMeta) Reset() {
|
||||
|
@ -410,30 +407,9 @@ func (x *ClaimMeta) GetSupportAmount() uint64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (x *ClaimMeta) GetTrendingGroup() uint32 {
|
||||
func (x *ClaimMeta) GetTrendingScore() float64 {
|
||||
if x != nil {
|
||||
return x.TrendingGroup
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ClaimMeta) GetTrendingMixed() float32 {
|
||||
if x != nil {
|
||||
return x.TrendingMixed
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ClaimMeta) GetTrendingLocal() float32 {
|
||||
if x != nil {
|
||||
return x.TrendingLocal
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ClaimMeta) GetTrendingGlobal() float32 {
|
||||
if x != nil {
|
||||
return x.TrendingGlobal
|
||||
return x.TrendingScore
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
@ -583,7 +559,7 @@ var file_result_proto_rawDesc = []byte{
|
|||
0x61, 0x48, 0x00, 0x52, 0x05, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x21, 0x0a, 0x05, 0x65, 0x72,
|
||||
0x72, 0x6f, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45,
|
||||
0x72, 0x72, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x06, 0x0a,
|
||||
0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xa3, 0x05, 0x0a, 0x09, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x4d,
|
||||
0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xac, 0x04, 0x0a, 0x09, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x4d,
|
||||
0x65, 0x74, 0x61, 0x12, 0x24, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74,
|
||||
0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x22, 0x0a, 0x06, 0x72, 0x65, 0x70,
|
||||
|
@ -616,34 +592,27 @@ var file_result_proto_rawDesc = []byte{
|
|||
0x41, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72,
|
||||
0x74, 0x5f, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d,
|
||||
0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x0a,
|
||||
0x0e, 0x74, 0x72, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18,
|
||||
0x16, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x74, 0x72, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x47,
|
||||
0x72, 0x6f, 0x75, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67,
|
||||
0x5f, 0x6d, 0x69, 0x78, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0d, 0x74, 0x72,
|
||||
0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4d, 0x69, 0x78, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x74,
|
||||
0x72, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x18, 0x20,
|
||||
0x01, 0x28, 0x02, 0x52, 0x0d, 0x74, 0x72, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x63,
|
||||
0x61, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x72, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x67,
|
||||
0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x18, 0x19, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x74, 0x72, 0x65,
|
||||
0x6e, 0x64, 0x69, 0x6e, 0x67, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x22, 0xa9, 0x01, 0x0a, 0x05,
|
||||
0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x22, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43,
|
||||
0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x78,
|
||||
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x25, 0x0a,
|
||||
0x07, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b,
|
||||
0x2e, 0x70, 0x62, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x52, 0x07, 0x62, 0x6c, 0x6f,
|
||||
0x63, 0x6b, 0x65, 0x64, 0x22, 0x41, 0x0a, 0x04, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x0c,
|
||||
0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x00, 0x12, 0x0d,
|
||||
0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a,
|
||||
0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4c,
|
||||
0x4f, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x03, 0x22, 0x45, 0x0a, 0x07, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
|
||||
0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0d, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e,
|
||||
0x6e, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x4f,
|
||||
0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x42, 0x26,
|
||||
0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x62, 0x72,
|
||||
0x79, 0x69, 0x6f, 0x2f, 0x68, 0x75, 0x62, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
||||
0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x0e, 0x74, 0x72, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18,
|
||||
0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x74, 0x72, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53,
|
||||
0x63, 0x6f, 0x72, 0x65, 0x22, 0xa9, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x22,
|
||||
0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x70,
|
||||
0x62, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f,
|
||||
0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x25, 0x0a, 0x07, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65,
|
||||
0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x6c, 0x6f,
|
||||
0x63, 0x6b, 0x65, 0x64, 0x52, 0x07, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x22, 0x41, 0x0a,
|
||||
0x04, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
|
||||
0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46,
|
||||
0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49,
|
||||
0x44, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x03,
|
||||
0x22, 0x45, 0x0a, 0x07, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63,
|
||||
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e,
|
||||
0x74, 0x12, 0x24, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x07,
|
||||
0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x42, 0x2c, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x62, 0x72, 0x79, 0x69, 0x6f, 0x2f, 0x68, 0x65, 0x72,
|
||||
0x61, 0x6c, 0x64, 0x2e, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
|
||||
0x67, 0x6f, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
173
protobuf/python/claim_pb2.py
Normal file
173
protobuf/python/claim_pb2.py
Normal file
File diff suppressed because one or more lines are too long
4
protobuf/python/claim_pb2_grpc.py
Normal file
4
protobuf/python/claim_pb2_grpc.py
Normal file
|
@ -0,0 +1,4 @@
|
|||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
||||
|
132
protobuf/python/hub_pb2.py
Normal file
132
protobuf/python/hub_pb2.py
Normal file
|
@ -0,0 +1,132 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: hub.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import message as _message
|
||||
from google.protobuf import reflection as _reflection
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
import result_pb2 as result__pb2
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\thub.proto\x12\x02pb\x1a\x0cresult.proto\"\x0e\n\x0c\x45mptyMessage\".\n\rServerMessage\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\t\"N\n\x0cHelloMessage\x12\x0c\n\x04port\x18\x01 \x01(\t\x12\x0c\n\x04host\x18\x02 \x01(\t\x12\"\n\x07servers\x18\x03 \x03(\x0b\x32\x11.pb.ServerMessage\"0\n\x0fInvertibleField\x12\x0e\n\x06invert\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x03(\t\"\x1c\n\x0bStringValue\x12\r\n\x05value\x18\x01 \x01(\t\"\x1c\n\x0bStringArray\x12\r\n\x05value\x18\x01 \x03(\t\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"j\n\nRangeField\x12\x1d\n\x02op\x18\x01 \x01(\x0e\x32\x11.pb.RangeField.Op\x12\r\n\x05value\x18\x02 \x03(\x05\".\n\x02Op\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03LTE\x10\x01\x12\x07\n\x03GTE\x10\x02\x12\x06\n\x02LT\x10\x03\x12\x06\n\x02GT\x10\x04\"\x8e\x0c\n\rSearchRequest\x12%\n\x08\x63laim_id\x18\x01 \x01(\x0b\x32\x13.pb.InvertibleField\x12\'\n\nchannel_id\x18\x02 \x01(\x0b\x32\x13.pb.InvertibleField\x12\x0c\n\x04text\x18\x03 \x01(\t\x12\r\n\x05limit\x18\x04 \x01(\x05\x12\x10\n\x08order_by\x18\x05 \x03(\t\x12\x0e\n\x06offset\x18\x06 \x01(\r\x12\x16\n\x0eis_controlling\x18\x07 \x01(\x08\x12\x1d\n\x15last_take_over_height\x18\x08 \x01(\t\x12\x12\n\nclaim_name\x18\t \x01(\t\x12\x17\n\x0fnormalized_name\x18\n \x01(\t\x12#\n\x0btx_position\x18\x0b \x03(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06\x61mount\x18\x0c \x03(\x0b\x32\x0e.pb.RangeField\x12!\n\ttimestamp\x18\r \x03(\x0b\x32\x0e.pb.RangeField\x12*\n\x12\x63reation_timestamp\x18\x0e \x03(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06height\x18\x0f \x03(\x0b\x32\x0e.pb.RangeField\x12\'\n\x0f\x63reation_height\x18\x10 \x03(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x61\x63tivation_height\x18\x11 \x03(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x65xpiration_height\x18\x12 \x03(\x0b\x32\x0e.pb.RangeField\x12$\n\x0crelease_time\x18\x13 \x03(\x0b\x32\x0e.pb.RangeField\x12\x11\n\tshort_url\x18\x14 \x01(\t\x12\x15\n\rcanonical_url\x18\x15 \x01(\t\x12\r\n\x05title\x18\x16 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x17 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x18 \x01(\t\x12\x12\n\nclaim_type\x18\x19 \x03(\t\x12$\n\x0crepost_count\x18\x1a \x03(\x0b\x32\x0e.pb.RangeField\x12\x13\n\x0bstream_type\x18\x1b \x03(\t\x12\x12\n\nmedia_type\x18\x1c \x03(\t\x12\"\n\nfee_amount\x18\x1d \x03(\x0b\x32\x0e.pb.RangeField\x12\x14\n\x0c\x66\x65\x65_currency\x18\x1e \x01(\t\x12 \n\x08\x64uration\x18\x1f \x03(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11reposted_claim_id\x18 \x01(\t\x12#\n\x0b\x63\x65nsor_type\x18! \x03(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11\x63laims_in_channel\x18\" \x01(\t\x12)\n\x12is_signature_valid\x18$ \x01(\x0b\x32\r.pb.BoolValue\x12(\n\x10\x65\x66\x66\x65\x63tive_amount\x18% \x03(\x0b\x32\x0e.pb.RangeField\x12&\n\x0esupport_amount\x18& \x03(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_score\x18\' \x03(\x0b\x32\x0e.pb.RangeField\x12\r\n\x05tx_id\x18+ \x01(\t\x12 \n\x07tx_nout\x18, \x01(\x0b\x32\x0f.pb.UInt32Value\x12\x11\n\tsignature\x18- \x01(\t\x12\x18\n\x10signature_digest\x18. \x01(\t\x12\x18\n\x10public_key_bytes\x18/ \x01(\t\x12\x15\n\rpublic_key_id\x18\x30 \x01(\t\x12\x10\n\x08\x61ny_tags\x18\x31 \x03(\t\x12\x10\n\x08\x61ll_tags\x18\x32 \x03(\t\x12\x10\n\x08not_tags\x18\x33 \x03(\t\x12\x1d\n\x15has_channel_signature\x18\x34 \x01(\x08\x12!\n\nhas_source\x18\x35 \x01(\x0b\x32\r.pb.BoolValue\x12 \n\x18limit_claims_per_channel\x18\x36 \x01(\x05\x12\x15\n\rany_languages\x18\x37 \x03(\t\x12\x15\n\rall_languages\x18\x38 \x03(\t\x12\x19\n\x11remove_duplicates\x18\x39 \x01(\x08\x12\x11\n\tno_totals\x18: \x01(\x08\x12\x0f\n\x07sd_hash\x18; \x01(\t2\x9b\x04\n\x03Hub\x12*\n\x06Search\x12\x11.pb.SearchRequest\x1a\x0b.pb.Outputs\"\x00\x12+\n\x04Ping\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12-\n\x05Hello\x12\x10.pb.HelloMessage\x1a\x10.pb.HelloMessage\"\x00\x12/\n\x07\x41\x64\x64Peer\x12\x11.pb.ServerMessage\x1a\x0f.pb.StringValue\"\x00\x12\x35\n\rPeerSubscribe\x12\x11.pb.ServerMessage\x1a\x0f.pb.StringValue\"\x00\x12.\n\x07Version\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12/\n\x08\x46\x65\x61tures\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12\x30\n\tBroadcast\x12\x10.pb.EmptyMessage\x1a\x0f.pb.UInt32Value\"\x00\x12-\n\x06Height\x12\x10.pb.EmptyMessage\x1a\x0f.pb.UInt32Value\"\x00\x12\x37\n\x0fHeightSubscribe\x12\x0f.pb.UInt32Value\x1a\x0f.pb.UInt32Value\"\x00\x30\x01\x12)\n\x07Resolve\x12\x0f.pb.StringArray\x1a\x0b.pb.Outputs\"\x00\x42,Z*github.com/lbryio/herald.go/protobuf/go/pbb\x06proto3')
|
||||
|
||||
|
||||
|
||||
_EMPTYMESSAGE = DESCRIPTOR.message_types_by_name['EmptyMessage']
|
||||
_SERVERMESSAGE = DESCRIPTOR.message_types_by_name['ServerMessage']
|
||||
_HELLOMESSAGE = DESCRIPTOR.message_types_by_name['HelloMessage']
|
||||
_INVERTIBLEFIELD = DESCRIPTOR.message_types_by_name['InvertibleField']
|
||||
_STRINGVALUE = DESCRIPTOR.message_types_by_name['StringValue']
|
||||
_STRINGARRAY = DESCRIPTOR.message_types_by_name['StringArray']
|
||||
_BOOLVALUE = DESCRIPTOR.message_types_by_name['BoolValue']
|
||||
_UINT32VALUE = DESCRIPTOR.message_types_by_name['UInt32Value']
|
||||
_RANGEFIELD = DESCRIPTOR.message_types_by_name['RangeField']
|
||||
_SEARCHREQUEST = DESCRIPTOR.message_types_by_name['SearchRequest']
|
||||
_RANGEFIELD_OP = _RANGEFIELD.enum_types_by_name['Op']
|
||||
EmptyMessage = _reflection.GeneratedProtocolMessageType('EmptyMessage', (_message.Message,), {
|
||||
'DESCRIPTOR' : _EMPTYMESSAGE,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.EmptyMessage)
|
||||
})
|
||||
_sym_db.RegisterMessage(EmptyMessage)
|
||||
|
||||
ServerMessage = _reflection.GeneratedProtocolMessageType('ServerMessage', (_message.Message,), {
|
||||
'DESCRIPTOR' : _SERVERMESSAGE,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.ServerMessage)
|
||||
})
|
||||
_sym_db.RegisterMessage(ServerMessage)
|
||||
|
||||
HelloMessage = _reflection.GeneratedProtocolMessageType('HelloMessage', (_message.Message,), {
|
||||
'DESCRIPTOR' : _HELLOMESSAGE,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.HelloMessage)
|
||||
})
|
||||
_sym_db.RegisterMessage(HelloMessage)
|
||||
|
||||
InvertibleField = _reflection.GeneratedProtocolMessageType('InvertibleField', (_message.Message,), {
|
||||
'DESCRIPTOR' : _INVERTIBLEFIELD,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.InvertibleField)
|
||||
})
|
||||
_sym_db.RegisterMessage(InvertibleField)
|
||||
|
||||
StringValue = _reflection.GeneratedProtocolMessageType('StringValue', (_message.Message,), {
|
||||
'DESCRIPTOR' : _STRINGVALUE,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.StringValue)
|
||||
})
|
||||
_sym_db.RegisterMessage(StringValue)
|
||||
|
||||
StringArray = _reflection.GeneratedProtocolMessageType('StringArray', (_message.Message,), {
|
||||
'DESCRIPTOR' : _STRINGARRAY,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.StringArray)
|
||||
})
|
||||
_sym_db.RegisterMessage(StringArray)
|
||||
|
||||
BoolValue = _reflection.GeneratedProtocolMessageType('BoolValue', (_message.Message,), {
|
||||
'DESCRIPTOR' : _BOOLVALUE,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.BoolValue)
|
||||
})
|
||||
_sym_db.RegisterMessage(BoolValue)
|
||||
|
||||
UInt32Value = _reflection.GeneratedProtocolMessageType('UInt32Value', (_message.Message,), {
|
||||
'DESCRIPTOR' : _UINT32VALUE,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.UInt32Value)
|
||||
})
|
||||
_sym_db.RegisterMessage(UInt32Value)
|
||||
|
||||
RangeField = _reflection.GeneratedProtocolMessageType('RangeField', (_message.Message,), {
|
||||
'DESCRIPTOR' : _RANGEFIELD,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.RangeField)
|
||||
})
|
||||
_sym_db.RegisterMessage(RangeField)
|
||||
|
||||
SearchRequest = _reflection.GeneratedProtocolMessageType('SearchRequest', (_message.Message,), {
|
||||
'DESCRIPTOR' : _SEARCHREQUEST,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.SearchRequest)
|
||||
})
|
||||
_sym_db.RegisterMessage(SearchRequest)
|
||||
|
||||
_HUB = DESCRIPTOR.services_by_name['Hub']
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
DESCRIPTOR._serialized_options = b'Z*github.com/lbryio/herald.go/protobuf/go/pb'
|
||||
_EMPTYMESSAGE._serialized_start=31
|
||||
_EMPTYMESSAGE._serialized_end=45
|
||||
_SERVERMESSAGE._serialized_start=47
|
||||
_SERVERMESSAGE._serialized_end=93
|
||||
_HELLOMESSAGE._serialized_start=95
|
||||
_HELLOMESSAGE._serialized_end=173
|
||||
_INVERTIBLEFIELD._serialized_start=175
|
||||
_INVERTIBLEFIELD._serialized_end=223
|
||||
_STRINGVALUE._serialized_start=225
|
||||
_STRINGVALUE._serialized_end=253
|
||||
_STRINGARRAY._serialized_start=255
|
||||
_STRINGARRAY._serialized_end=283
|
||||
_BOOLVALUE._serialized_start=285
|
||||
_BOOLVALUE._serialized_end=311
|
||||
_UINT32VALUE._serialized_start=313
|
||||
_UINT32VALUE._serialized_end=341
|
||||
_RANGEFIELD._serialized_start=343
|
||||
_RANGEFIELD._serialized_end=449
|
||||
_RANGEFIELD_OP._serialized_start=403
|
||||
_RANGEFIELD_OP._serialized_end=449
|
||||
_SEARCHREQUEST._serialized_start=452
|
||||
_SEARCHREQUEST._serialized_end=2002
|
||||
_HUB._serialized_start=2005
|
||||
_HUB._serialized_end=2544
|
||||
# @@protoc_insertion_point(module_scope)
|
397
protobuf/python/hub_pb2_grpc.py
Normal file
397
protobuf/python/hub_pb2_grpc.py
Normal file
|
@ -0,0 +1,397 @@
|
|||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
||||
|
||||
import hub_pb2 as hub__pb2
|
||||
import result_pb2 as result__pb2
|
||||
|
||||
|
||||
class HubStub(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
def __init__(self, channel):
|
||||
"""Constructor.
|
||||
|
||||
Args:
|
||||
channel: A grpc.Channel.
|
||||
"""
|
||||
self.Search = channel.unary_unary(
|
||||
'/pb.Hub/Search',
|
||||
request_serializer=hub__pb2.SearchRequest.SerializeToString,
|
||||
response_deserializer=result__pb2.Outputs.FromString,
|
||||
)
|
||||
self.Ping = channel.unary_unary(
|
||||
'/pb.Hub/Ping',
|
||||
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.StringValue.FromString,
|
||||
)
|
||||
self.Hello = channel.unary_unary(
|
||||
'/pb.Hub/Hello',
|
||||
request_serializer=hub__pb2.HelloMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.HelloMessage.FromString,
|
||||
)
|
||||
self.AddPeer = channel.unary_unary(
|
||||
'/pb.Hub/AddPeer',
|
||||
request_serializer=hub__pb2.ServerMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.StringValue.FromString,
|
||||
)
|
||||
self.PeerSubscribe = channel.unary_unary(
|
||||
'/pb.Hub/PeerSubscribe',
|
||||
request_serializer=hub__pb2.ServerMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.StringValue.FromString,
|
||||
)
|
||||
self.Version = channel.unary_unary(
|
||||
'/pb.Hub/Version',
|
||||
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.StringValue.FromString,
|
||||
)
|
||||
self.Features = channel.unary_unary(
|
||||
'/pb.Hub/Features',
|
||||
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.StringValue.FromString,
|
||||
)
|
||||
self.Broadcast = channel.unary_unary(
|
||||
'/pb.Hub/Broadcast',
|
||||
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.UInt32Value.FromString,
|
||||
)
|
||||
self.Height = channel.unary_unary(
|
||||
'/pb.Hub/Height',
|
||||
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.UInt32Value.FromString,
|
||||
)
|
||||
self.HeightSubscribe = channel.unary_stream(
|
||||
'/pb.Hub/HeightSubscribe',
|
||||
request_serializer=hub__pb2.UInt32Value.SerializeToString,
|
||||
response_deserializer=hub__pb2.UInt32Value.FromString,
|
||||
)
|
||||
self.Resolve = channel.unary_unary(
|
||||
'/pb.Hub/Resolve',
|
||||
request_serializer=hub__pb2.StringArray.SerializeToString,
|
||||
response_deserializer=result__pb2.Outputs.FromString,
|
||||
)
|
||||
|
||||
|
||||
class HubServicer(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
def Search(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def Ping(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def Hello(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def AddPeer(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def PeerSubscribe(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def Version(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def Features(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def Broadcast(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def Height(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def HeightSubscribe(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def Resolve(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
|
||||
def add_HubServicer_to_server(servicer, server):
|
||||
rpc_method_handlers = {
|
||||
'Search': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Search,
|
||||
request_deserializer=hub__pb2.SearchRequest.FromString,
|
||||
response_serializer=result__pb2.Outputs.SerializeToString,
|
||||
),
|
||||
'Ping': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Ping,
|
||||
request_deserializer=hub__pb2.EmptyMessage.FromString,
|
||||
response_serializer=hub__pb2.StringValue.SerializeToString,
|
||||
),
|
||||
'Hello': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Hello,
|
||||
request_deserializer=hub__pb2.HelloMessage.FromString,
|
||||
response_serializer=hub__pb2.HelloMessage.SerializeToString,
|
||||
),
|
||||
'AddPeer': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.AddPeer,
|
||||
request_deserializer=hub__pb2.ServerMessage.FromString,
|
||||
response_serializer=hub__pb2.StringValue.SerializeToString,
|
||||
),
|
||||
'PeerSubscribe': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.PeerSubscribe,
|
||||
request_deserializer=hub__pb2.ServerMessage.FromString,
|
||||
response_serializer=hub__pb2.StringValue.SerializeToString,
|
||||
),
|
||||
'Version': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Version,
|
||||
request_deserializer=hub__pb2.EmptyMessage.FromString,
|
||||
response_serializer=hub__pb2.StringValue.SerializeToString,
|
||||
),
|
||||
'Features': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Features,
|
||||
request_deserializer=hub__pb2.EmptyMessage.FromString,
|
||||
response_serializer=hub__pb2.StringValue.SerializeToString,
|
||||
),
|
||||
'Broadcast': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Broadcast,
|
||||
request_deserializer=hub__pb2.EmptyMessage.FromString,
|
||||
response_serializer=hub__pb2.UInt32Value.SerializeToString,
|
||||
),
|
||||
'Height': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Height,
|
||||
request_deserializer=hub__pb2.EmptyMessage.FromString,
|
||||
response_serializer=hub__pb2.UInt32Value.SerializeToString,
|
||||
),
|
||||
'HeightSubscribe': grpc.unary_stream_rpc_method_handler(
|
||||
servicer.HeightSubscribe,
|
||||
request_deserializer=hub__pb2.UInt32Value.FromString,
|
||||
response_serializer=hub__pb2.UInt32Value.SerializeToString,
|
||||
),
|
||||
'Resolve': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Resolve,
|
||||
request_deserializer=hub__pb2.StringArray.FromString,
|
||||
response_serializer=result__pb2.Outputs.SerializeToString,
|
||||
),
|
||||
}
|
||||
generic_handler = grpc.method_handlers_generic_handler(
|
||||
'pb.Hub', rpc_method_handlers)
|
||||
server.add_generic_rpc_handlers((generic_handler,))
|
||||
|
||||
|
||||
# This class is part of an EXPERIMENTAL API.
|
||||
class Hub(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
@staticmethod
|
||||
def Search(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Search',
|
||||
hub__pb2.SearchRequest.SerializeToString,
|
||||
result__pb2.Outputs.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def Ping(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Ping',
|
||||
hub__pb2.EmptyMessage.SerializeToString,
|
||||
hub__pb2.StringValue.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def Hello(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Hello',
|
||||
hub__pb2.HelloMessage.SerializeToString,
|
||||
hub__pb2.HelloMessage.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def AddPeer(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/AddPeer',
|
||||
hub__pb2.ServerMessage.SerializeToString,
|
||||
hub__pb2.StringValue.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def PeerSubscribe(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/PeerSubscribe',
|
||||
hub__pb2.ServerMessage.SerializeToString,
|
||||
hub__pb2.StringValue.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def Version(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Version',
|
||||
hub__pb2.EmptyMessage.SerializeToString,
|
||||
hub__pb2.StringValue.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def Features(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Features',
|
||||
hub__pb2.EmptyMessage.SerializeToString,
|
||||
hub__pb2.StringValue.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def Broadcast(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Broadcast',
|
||||
hub__pb2.EmptyMessage.SerializeToString,
|
||||
hub__pb2.UInt32Value.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def Height(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Height',
|
||||
hub__pb2.EmptyMessage.SerializeToString,
|
||||
hub__pb2.UInt32Value.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def HeightSubscribe(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_stream(request, target, '/pb.Hub/HeightSubscribe',
|
||||
hub__pb2.UInt32Value.SerializeToString,
|
||||
hub__pb2.UInt32Value.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def Resolve(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Resolve',
|
||||
hub__pb2.StringArray.SerializeToString,
|
||||
result__pb2.Outputs.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
78
protobuf/python/result_pb2.py
Normal file
78
protobuf/python/result_pb2.py
Normal file
|
@ -0,0 +1,78 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: result.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import message as _message
|
||||
from google.protobuf import reflection as _reflection
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cresult.proto\x12\x02pb\"\x97\x01\n\x07Outputs\x12\x18\n\x04txos\x18\x01 \x03(\x0b\x32\n.pb.Output\x12\x1e\n\nextra_txos\x18\x02 \x03(\x0b\x32\n.pb.Output\x12\r\n\x05total\x18\x03 \x01(\r\x12\x0e\n\x06offset\x18\x04 \x01(\r\x12\x1c\n\x07\x62locked\x18\x05 \x03(\x0b\x32\x0b.pb.Blocked\x12\x15\n\rblocked_total\x18\x06 \x01(\r\"{\n\x06Output\x12\x0f\n\x07tx_hash\x18\x01 \x01(\x0c\x12\x0c\n\x04nout\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x1e\n\x05\x63laim\x18\x07 \x01(\x0b\x32\r.pb.ClaimMetaH\x00\x12\x1a\n\x05\x65rror\x18\x0f \x01(\x0b\x32\t.pb.ErrorH\x00\x42\x06\n\x04meta\"\xe6\x02\n\tClaimMeta\x12\x1b\n\x07\x63hannel\x18\x01 \x01(\x0b\x32\n.pb.Output\x12\x1a\n\x06repost\x18\x02 \x01(\x0b\x32\n.pb.Output\x12\x11\n\tshort_url\x18\x03 \x01(\t\x12\x15\n\rcanonical_url\x18\x04 \x01(\t\x12\x16\n\x0eis_controlling\x18\x05 \x01(\x08\x12\x18\n\x10take_over_height\x18\x06 \x01(\r\x12\x17\n\x0f\x63reation_height\x18\x07 \x01(\r\x12\x19\n\x11\x61\x63tivation_height\x18\x08 \x01(\r\x12\x19\n\x11\x65xpiration_height\x18\t \x01(\r\x12\x19\n\x11\x63laims_in_channel\x18\n \x01(\r\x12\x10\n\x08reposted\x18\x0b \x01(\r\x12\x18\n\x10\x65\x66\x66\x65\x63tive_amount\x18\x14 \x01(\x04\x12\x16\n\x0esupport_amount\x18\x15 \x01(\x04\x12\x16\n\x0etrending_score\x18\x16 \x01(\x01\"\x94\x01\n\x05\x45rror\x12\x1c\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x0e.pb.Error.Code\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x1c\n\x07\x62locked\x18\x03 \x01(\x0b\x32\x0b.pb.Blocked\"A\n\x04\x43ode\x12\x10\n\x0cUNKNOWN_CODE\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07\x42LOCKED\x10\x03\"5\n\x07\x42locked\x12\r\n\x05\x63ount\x18\x01 \x01(\r\x12\x1b\n\x07\x63hannel\x18\x02 \x01(\x0b\x32\n.pb.OutputB,Z*github.com/lbryio/herald.go/protobuf/go/pbb\x06proto3')
|
||||
|
||||
|
||||
|
||||
_OUTPUTS = DESCRIPTOR.message_types_by_name['Outputs']
|
||||
_OUTPUT = DESCRIPTOR.message_types_by_name['Output']
|
||||
_CLAIMMETA = DESCRIPTOR.message_types_by_name['ClaimMeta']
|
||||
_ERROR = DESCRIPTOR.message_types_by_name['Error']
|
||||
_BLOCKED = DESCRIPTOR.message_types_by_name['Blocked']
|
||||
_ERROR_CODE = _ERROR.enum_types_by_name['Code']
|
||||
Outputs = _reflection.GeneratedProtocolMessageType('Outputs', (_message.Message,), {
|
||||
'DESCRIPTOR' : _OUTPUTS,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.Outputs)
|
||||
})
|
||||
_sym_db.RegisterMessage(Outputs)
|
||||
|
||||
Output = _reflection.GeneratedProtocolMessageType('Output', (_message.Message,), {
|
||||
'DESCRIPTOR' : _OUTPUT,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.Output)
|
||||
})
|
||||
_sym_db.RegisterMessage(Output)
|
||||
|
||||
ClaimMeta = _reflection.GeneratedProtocolMessageType('ClaimMeta', (_message.Message,), {
|
||||
'DESCRIPTOR' : _CLAIMMETA,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.ClaimMeta)
|
||||
})
|
||||
_sym_db.RegisterMessage(ClaimMeta)
|
||||
|
||||
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), {
|
||||
'DESCRIPTOR' : _ERROR,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.Error)
|
||||
})
|
||||
_sym_db.RegisterMessage(Error)
|
||||
|
||||
Blocked = _reflection.GeneratedProtocolMessageType('Blocked', (_message.Message,), {
|
||||
'DESCRIPTOR' : _BLOCKED,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.Blocked)
|
||||
})
|
||||
_sym_db.RegisterMessage(Blocked)
|
||||
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
DESCRIPTOR._serialized_options = b'Z*github.com/lbryio/herald.go/protobuf/go/pb'
|
||||
_OUTPUTS._serialized_start=21
|
||||
_OUTPUTS._serialized_end=172
|
||||
_OUTPUT._serialized_start=174
|
||||
_OUTPUT._serialized_end=297
|
||||
_CLAIMMETA._serialized_start=300
|
||||
_CLAIMMETA._serialized_end=658
|
||||
_ERROR._serialized_start=661
|
||||
_ERROR._serialized_end=809
|
||||
_ERROR_CODE._serialized_start=744
|
||||
_ERROR_CODE._serialized_end=809
|
||||
_BLOCKED._serialized_start=811
|
||||
_BLOCKED._serialized_end=864
|
||||
# @@protoc_insertion_point(module_scope)
|
4
protobuf/python/result_pb2_grpc.py
Normal file
4
protobuf/python/result_pb2_grpc.py
Normal file
|
@ -0,0 +1,4 @@
|
|||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
||||
|
57
readme.md
57
readme.md
|
@ -1,17 +1,16 @@
|
|||
# LBRY Hub
|
||||
# LBRY Herald
|
||||
|
||||
A hub provides back-end services to LBRY clients. Services include
|
||||
herald.go is a not yet feature complete go rewrite of the [existing implementation in python](https://github.com/lbryio/hub/tree/master/hub/herald). A herald server provides back-end services to LBRY clients. Services include
|
||||
|
||||
- URL resolution
|
||||
- search
|
||||
- wallet services (getting and sending transactions, address subscription)
|
||||
- hub federation and discovery
|
||||
|
||||
This project will eventually subsume and replace the
|
||||
[wallet server](https://github.com/lbryio/lbry-sdk/blob/v0.92.0/docker/Dockerfile.wallet_server)
|
||||
[herald](https://github.com/lbryio/hub/blob/master/docs/docker_examples/hub-compose.yml#L38)
|
||||
and the [lighthouse](https://github.com/lbryio/lighthouse) search provider.
|
||||
|
||||
![](./diagram.png)
|
||||
![](https://raw.githubusercontent.com/lbryio/hub/master/docs/diagram.png)
|
||||
|
||||
## Installation
|
||||
|
||||
|
@ -26,13 +25,18 @@ Follow the instructions [here](https://lbry.tech/resources/wallet-server).
|
|||
### Run this hub
|
||||
|
||||
```bash
|
||||
./hub serve
|
||||
./herald serve
|
||||
```
|
||||
|
||||
```bash
|
||||
# run with remote services disabled so it can run completely solo
|
||||
./herald serve --disable-rocksdb-refresh --disable-load-peers --disable-resolve --disable-es --disable-blocking-and-filtering
|
||||
```
|
||||
|
||||
### Search for stuff
|
||||
|
||||
```bash
|
||||
./hub search text goes here
|
||||
./herald search text goes here
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
@ -41,23 +45,56 @@ Contributions to this project are welcome, encouraged, and compensated. Details
|
|||
|
||||
### Dev Dependencies
|
||||
|
||||
Install Go 1.14+
|
||||
Install Go 1.18+
|
||||
|
||||
- Ubuntu: `sudo add-apt-repository ppa:longsleep/golang-backports && sudo apt install golang-go`
|
||||
- Ubuntu: `sudo snap install go`
|
||||
- OSX: `brew install go`
|
||||
- Windows https://golang.org/doc/install
|
||||
|
||||
Download `protoc` from https://github.com/protocolbuffers/protobuf/releases and make sure it is
|
||||
executable and in your path.
|
||||
|
||||
Install Go plugin for protoc:
|
||||
Install Go plugin for protoc and python:
|
||||
|
||||
```
|
||||
go get google.golang.org/protobuf/cmd/protoc-gen-go google.golang.org/grpc/cmd/protoc-gen-go-grpc
|
||||
pip install grpcio grpcio-tools github3.py
|
||||
```
|
||||
|
||||
Lastly the hub needs protobuf version 3.17.1, it may work with newer version but this is what it's built with, on ubuntu systems you'll have to install this from source see the GitHub actions in `.github/workflows` for an example of this.
|
||||
|
||||
Install rocksdb and dependencies your CGO flags, on ubuntu. We use v6.29.5 for feature and statis build support.
|
||||
|
||||
```
|
||||
sudo apt-get install libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev libzstd-dev liblz4-dev
|
||||
wget https://github.com/facebook/rocksdb/archive/refs/tags/v6.29.5.tar.gz
|
||||
tar xfzv rocksdb-6.29.5.tar.gz
|
||||
cd rocksdb-6.29.5
|
||||
make static_lib
|
||||
sudo make install
|
||||
export CGO_CFLAGS="-I/usr/local/lib"
|
||||
export CGO_LDFLAGS="-L/usr/local/lib -lrocksdb -lstdc++ -lm -lz -lsnappy -llz4 -lzstd -lbz2"
|
||||
```
|
||||
|
||||
```
|
||||
https://github.com/protocolbuffers/protobuf/releases/download/v3.17.1/protobuf-all-3.17.1.tar.gz
|
||||
```
|
||||
|
||||
If you can run `./protobuf/build.sh` without errors, you have `go` and `protoc` installed correctly.
|
||||
|
||||
On Linux you probably need to instead the open file limits
|
||||
|
||||
```
|
||||
ulimit -n 1000000
|
||||
sysctl -w fs.file-max=1000000
|
||||
```
|
||||
|
||||
and `/etc/security/limits.conf` or `/etc/sysctl.conf` change:
|
||||
|
||||
```
|
||||
fs.file-max = 1000000
|
||||
```
|
||||
|
||||
Finally, run the block processor as described under Usage.
|
||||
|
||||
### Running from Source
|
||||
|
|
16
requirements.txt
Normal file
16
requirements.txt
Normal file
|
@ -0,0 +1,16 @@
|
|||
certifi==2022.6.15
|
||||
cffi==1.15.1
|
||||
charset-normalizer==2.1.0
|
||||
cryptography==37.0.4
|
||||
github3.py==3.2.0
|
||||
grpcio==1.47.0
|
||||
grpcio-tools==1.47.0
|
||||
idna==3.3
|
||||
protobuf==3.20.1
|
||||
pycparser==2.21
|
||||
PyJWT==2.4.0
|
||||
python-dateutil==2.8.2
|
||||
requests==2.28.1
|
||||
six==1.16.0
|
||||
uritemplate==4.1.1
|
||||
urllib3==1.26.11
|
29
scripts/build_and_push.sh
Executable file
29
scripts/build_and_push.sh
Executable file
|
@ -0,0 +1,29 @@
|
|||
#!/bin/bash
|
||||
|
||||
IMPORT_PATH="github.com/lbryio/herald.go"
|
||||
|
||||
function print_and_die() {
|
||||
echo "$1"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Get new tags from remote
|
||||
git fetch --tags
|
||||
|
||||
# Get latest tag name
|
||||
LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
# Make sure it match the format vX.XXXX.XX.XX
|
||||
[[ $LATEST_TAG =~ ^v[0-9]+\.[0-9]{4}\.[0-9]{2}\.[0-9]{2}.*$ ]] || print_and_die "bad version ${LATEST_TAG}"
|
||||
VERSION=$LATEST_TAG
|
||||
|
||||
echo "using tag $LATEST_TAG"
|
||||
|
||||
# Checkout latest tag
|
||||
git checkout "$LATEST_TAG"
|
||||
|
||||
# CGO_ENABLED=0 go build -v -ldflags "-X ${IMPORT_PATH}/meta.Version=${VERSION}"
|
||||
go build -o herald -v -ldflags "-X ${IMPORT_PATH}/meta.Version=${VERSION}"
|
||||
docker build . -t lbry/herald.go:latest
|
||||
docker tag lbry/herald.go:latest lbry/herald.go:"$LATEST_TAG"
|
||||
docker push lbry/herald.go:latest
|
||||
docker push lbry/herald.go:"$LATEST_TAG"
|
5
scripts/build_and_test.sh
Executable file
5
scripts/build_and_test.sh
Executable file
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
./protobuf/build.sh
|
||||
go version
|
||||
go build -o herald .
|
||||
go test -v -race -cover ./...
|
14
scripts/cicd_integration_test_runner.sh
Normal file
14
scripts/cicd_integration_test_runner.sh
Normal file
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# cicd_integration_test_runner.sh
|
||||
#
|
||||
# simple script to kick off herald and call the integration testing
|
||||
# script
|
||||
#
|
||||
# N.B. this currently just works locally until we figure a way to have
|
||||
# the data in the cicd environment.
|
||||
#
|
||||
|
||||
./herald serve --db-path /mnt/sdb1/wallet_server/_data/lbry-rocksdb &
|
||||
|
||||
./integration_tests.sh
|
|
@ -3,4 +3,4 @@
|
|||
hash reflex 2>/dev/null || go get github.com/cespare/reflex
|
||||
hash reflex 2>/dev/null || { echo >&2 'Make sure '"$(go env GOPATH)"'/bin is in your $PATH'; exit 1; }
|
||||
|
||||
reflex --decoration=none --start-service=true -- sh -c "go run . serve --dev"
|
||||
reflex --decoration=none --start-service=true -- sh -c "go run . serve --debug"
|
235
scripts/integration_tests.sh
Executable file
235
scripts/integration_tests.sh
Executable file
|
@ -0,0 +1,235 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# integration_testing.sh
|
||||
#
|
||||
# GitHub Action CI/CD based integration tests for herald.go
|
||||
# These are smoke / sanity tests for the server behaving correctly on a "live"
|
||||
# system, and looks for reasonable response codes, not specific correct
|
||||
# behavior. Those are covered in unit tests.
|
||||
#
|
||||
# N.B.
|
||||
# For the curl based json tests the `id` field existing is needed.
|
||||
#
|
||||
|
||||
# global variables
|
||||
|
||||
RES=(0)
|
||||
FINALRES=0
|
||||
# functions
|
||||
|
||||
|
||||
function logical_or {
|
||||
for res in ${RES[@]}; do
|
||||
if [ $res -eq 1 -o $FINALRES -eq 1 ]; then
|
||||
FINALRES=1
|
||||
return
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function want_got {
|
||||
if [ "${WANT}" != "${GOT}" ]; then
|
||||
echo "WANT: ${WANT}"
|
||||
echo "GOT: ${GOT}"
|
||||
RES+=(1)
|
||||
else
|
||||
RES+=(0)
|
||||
fi
|
||||
}
|
||||
|
||||
function want_greater {
|
||||
if [ ${WANT} -ge ${GOT} ]; then
|
||||
echo "WANT: ${WANT}"
|
||||
echo "GOT: ${GOT}"
|
||||
RES+=(1)
|
||||
else
|
||||
RES+=(0)
|
||||
fi
|
||||
}
|
||||
|
||||
function test_command_with_want {
|
||||
echo $CMD
|
||||
GOT=`eval $CMD`
|
||||
|
||||
want_got
|
||||
}
|
||||
|
||||
# grpc endpoint testing
|
||||
|
||||
|
||||
read -r -d '' CMD <<- EOM
|
||||
grpcurl -plaintext -d '{"value": ["@Styxhexenhammer666:2"]}' 127.0.0.1:50051 pb.Hub.Resolve
|
||||
| jq .txos[0].txHash | sed 's/"//g'
|
||||
EOM
|
||||
WANT="VOFP8MQEwps9Oa5NJJQ18WfVzUzlpCjst0Wz3xyOPd4="
|
||||
test_command_with_want
|
||||
|
||||
# GOT=`eval $CMD`
|
||||
|
||||
#want_got
|
||||
|
||||
##
|
||||
## N.B. This is a degenerate case that takes a long time to run.
|
||||
## The runtime should be fixed, but in the meantime, we definitely should
|
||||
## ensure this behaves as expected.
|
||||
##
|
||||
## TODO: Test runtime doesn't exceed worst case.
|
||||
##
|
||||
|
||||
#WANT=806389
|
||||
#read -r -d '' CMD <<- EOM
|
||||
# grpcurl -plaintext -d '{"value": ["foo"]}' 127.0.0.1:50051 pb.Hub.Resolve | jq .txos[0].height
|
||||
#EOM
|
||||
# test_command_with_want
|
||||
|
||||
# json rpc endpoint testing
|
||||
|
||||
## blockchain.block
|
||||
|
||||
### blockchain.block.get_chunk
|
||||
read -r -d '' CMD <<- EOM
|
||||
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
|
||||
--data '{"id": 1, "method": "blockchain.block.get_chunk", "params": [0]}'
|
||||
| jq .result | sed 's/"//g' | head -c 100
|
||||
EOM
|
||||
WANT="010000000000000000000000000000000000000000000000000000000000000000000000cc59e59ff97ac092b55e423aa549"
|
||||
test_command_with_want
|
||||
|
||||
### blockchain.block.get_header
|
||||
read -r -d '' CMD <<- EOM
|
||||
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
|
||||
--data '{"id": 1, "method": "blockchain.block.get_header", "params": []}'
|
||||
| jq .result.timestamp
|
||||
EOM
|
||||
WANT=1446058291
|
||||
test_command_with_want
|
||||
|
||||
### blockchain.block.headers
|
||||
read -r -d '' CMD <<- EOM
|
||||
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
|
||||
--data '{"id": 1, "method": "blockchain.block.headers", "params": []}'
|
||||
| jq .result.count
|
||||
EOM
|
||||
WANT=0
|
||||
test_command_with_want
|
||||
|
||||
## blockchain.claimtrie
|
||||
|
||||
read -r -d '' CMD <<- EOM
|
||||
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
|
||||
--data '{"id": 1, "method": "blockchain.claimtrie.resolve", "params":[{"Data": ["@Styxhexenhammer666:2"]}]}'
|
||||
| jq .result.txos[0].tx_hash | sed 's/"//g'
|
||||
EOM
|
||||
WANT="VOFP8MQEwps9Oa5NJJQ18WfVzUzlpCjst0Wz3xyOPd4="
|
||||
test_command_with_want
|
||||
|
||||
## blockchain.address
|
||||
|
||||
### blockchain.address.get_balance
|
||||
|
||||
read -r -d '' CMD <<- EOM
|
||||
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
|
||||
--data '{"id": 1, "method": "blockchain.address.get_balance", "params":[{"Address": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
|
||||
| jq .result.confirmed
|
||||
EOM
|
||||
WANT=44415602186
|
||||
test_command_with_want
|
||||
|
||||
## blockchain.address.get_history
|
||||
|
||||
read -r -d '' CMD <<- EOM
|
||||
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
|
||||
--data '{"id": 1, "method": "blockchain.address.get_history", "params":[{"Address": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
|
||||
| jq '.result.confirmed | length'
|
||||
EOM
|
||||
WANT=82
|
||||
test_command_with_want
|
||||
|
||||
## blockchain.address.listunspent
|
||||
|
||||
read -r -d '' CMD <<- EOM
|
||||
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
|
||||
--data '{"id": 1, "method": "blockchain.address.listunspent", "params":[{"Address": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
|
||||
| jq '.result | length'
|
||||
EOM
|
||||
WANT=32
|
||||
test_command_with_want
|
||||
|
||||
# blockchain.scripthash
|
||||
|
||||
## blockchain.scripthash.get_mempool
|
||||
|
||||
read -r -d '' CMD <<- EOM
|
||||
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
|
||||
--data '{"id": 1, "method": "blockchain.scripthash.get_mempool", "params":[{"scripthash": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
|
||||
| jq .error | sed 's/"//g'
|
||||
EOM
|
||||
WANT="encoding/hex: invalid byte: U+0047 'G'"
|
||||
test_command_with_want
|
||||
|
||||
## blockchain.scripthash.get_history
|
||||
|
||||
read -r -d '' CMD <<- EOM
|
||||
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
|
||||
--data '{"id": 1, "method": "blockchain.scripthash.get_history", "params":[{"scripthash": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
|
||||
| jq .error | sed 's/"//g'
|
||||
EOM
|
||||
WANT="encoding/hex: invalid byte: U+0047 'G'"
|
||||
test_command_with_want
|
||||
|
||||
## blockchain.scripthash.listunspent
|
||||
|
||||
read -r -d '' CMD <<- EOM
|
||||
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
|
||||
--data '{"id": 1, "method": "blockchain.scripthash.listunspent", "params":[{"scripthash": "bGqWuXRVm5bBqLvLPEQQpvsNxJ5ubc6bwN"}]}'
|
||||
| jq .error | sed 's/"//g'
|
||||
EOM
|
||||
WANT="encoding/hex: invalid byte: U+0047 'G'"
|
||||
test_command_with_want
|
||||
|
||||
## server.banner
|
||||
|
||||
read -r -d '' CMD <<- EOM
|
||||
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
|
||||
--data '{"id": 1, "method": "server.banner", "params":[]}'
|
||||
| jq .result | sed 's/"//g'
|
||||
EOM
|
||||
WANT="You are connected to an 0.107.0 server."
|
||||
test_command_with_want
|
||||
|
||||
## server.version
|
||||
|
||||
read -r -d '' CMD <<- EOM
|
||||
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
|
||||
--data '{"id": 1, "method": "server.version", "params":[]}'
|
||||
| jq .result | sed 's/"//g'
|
||||
EOM
|
||||
WANT="0.107.0"
|
||||
test_command_with_want
|
||||
|
||||
## server.features
|
||||
|
||||
read -r -d '' CMD <<- EOM
|
||||
curl http://127.0.0.1:50002/rpc -s -H "Content-Type: application/json"
|
||||
--data '{"id": 1, "method": "server.features", "params":[]}'
|
||||
EOM
|
||||
WANT='{"result":{"hosts":{},"pruning":"","server_version":"0.107.0","protocol_min":"0.54.0","protocol_max":"0.199.0","genesis_hash":"9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463","description":"Herald","payment_address":"","donation_address":"","daily_fee":"1.0","hash_function":"sha256","trending_algorithm":"fast_ar"},"error":null,"id":1}'
|
||||
test_command_with_want
|
||||
|
||||
# metrics endpoint testing
|
||||
|
||||
WANT=0
|
||||
GOT=$(curl http://127.0.0.1:2112/metrics -s | grep requests | grep resolve | awk '{print $NF}')
|
||||
want_greater
|
||||
|
||||
# caclulate return value
|
||||
|
||||
logical_or $RES
|
||||
|
||||
if [ $FINALRES -eq 1 ]; then
|
||||
echo "Failed!"
|
||||
exit 1
|
||||
else
|
||||
echo "Passed!"
|
||||
exit 0
|
||||
fi
|
351
scripts/version.py
Normal file
351
scripts/version.py
Normal file
|
@ -0,0 +1,351 @@
|
|||
import argparse
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
from datetime import date, datetime
|
||||
from getpass import getpass
|
||||
|
||||
try:
|
||||
import github3
|
||||
except ImportError:
|
||||
print('To run release tool you need to install github3.py:')
|
||||
print('')
|
||||
print(' $ pip install github3.py')
|
||||
print('')
|
||||
sys.exit(1)
|
||||
|
||||
AREA_RENAME = {
|
||||
'api': 'API',
|
||||
'dht': 'DHT'
|
||||
}
|
||||
|
||||
|
||||
def build_upload_binary(release: github3.repos.release.Release) -> None:
|
||||
# os.chdir(absolute_path)
|
||||
# os.system("go build .")
|
||||
cmd = f'go build -o herald -v -ldflags "-X github.com/lbryio/herald.go/meta.Version={release.name}"'
|
||||
print(cmd)
|
||||
# os.system("go build .")
|
||||
os.system(cmd)
|
||||
with open("./herald", "rb") as f:
|
||||
release.upload_asset("binary", "herald", f)
|
||||
|
||||
|
||||
def get_github():
|
||||
config_path = os.path.expanduser('~/.lbry-release-tool.json')
|
||||
if os.path.exists(config_path):
|
||||
with open(config_path, 'r') as config_file:
|
||||
config = json.load(config_file)
|
||||
return github3.github.GitHub(token=config['token'])
|
||||
|
||||
token = os.environ.get("GH_TOKEN")
|
||||
if not token:
|
||||
print('GitHub Credentials')
|
||||
username = input('username: ')
|
||||
password = getpass('password: ')
|
||||
gh = github3.github.GitHub(username, password)
|
||||
token = input('Enter 2FA: ')
|
||||
with open(config_path, 'w') as config_file:
|
||||
json.dump({'token': token}, config_file)
|
||||
gh.login(token=token)
|
||||
|
||||
return gh
|
||||
|
||||
|
||||
def get_labels(pr, prefix):
|
||||
for label in pr.labels:
|
||||
label_name = label['name']
|
||||
if label_name.startswith(f'{prefix}: '):
|
||||
yield label_name[len(f'{prefix}: '):]
|
||||
|
||||
|
||||
def get_label(pr, prefix):
|
||||
for label in get_labels(pr, prefix):
|
||||
return label
|
||||
|
||||
|
||||
BACKWARDS_INCOMPATIBLE = 'backwards-incompatible:'
|
||||
RELEASE_TEXT = 'release-text:'
|
||||
RELEASE_TEXT_LINES = 'release-text-lines:'
|
||||
|
||||
|
||||
def get_backwards_incompatible(desc: str):
|
||||
for line in desc.splitlines():
|
||||
if line.startswith(BACKWARDS_INCOMPATIBLE):
|
||||
yield line[len(BACKWARDS_INCOMPATIBLE):]
|
||||
|
||||
|
||||
def get_release_text(desc: str):
|
||||
in_release_lines = False
|
||||
for line in desc.splitlines():
|
||||
if in_release_lines:
|
||||
yield line.rstrip()
|
||||
elif line.startswith(RELEASE_TEXT_LINES):
|
||||
in_release_lines = True
|
||||
elif line.startswith(RELEASE_TEXT):
|
||||
yield line[len(RELEASE_TEXT):].strip()
|
||||
yield ''
|
||||
|
||||
|
||||
class Version:
|
||||
|
||||
def __init__(self, major=0, date=datetime.now(), micro=0, alphabeta=""):
|
||||
self.major = int(major)
|
||||
self.date = date
|
||||
self.micro = int(micro)
|
||||
self.alphabeta = alphabeta
|
||||
|
||||
@classmethod
|
||||
def from_string(cls, version_string):
|
||||
(major, datemicroalphabeta) = version_string.split('.', 1)
|
||||
parts = datemicroalphabeta.split("-")
|
||||
if len(parts) > 1:
|
||||
datemicro, alphabeta = parts[0], parts[1]
|
||||
else:
|
||||
datemicro, alphabeta = parts[0], ""
|
||||
|
||||
date, micro = datemicro.rsplit('.', 1) if datemicro.count('.') > 2 else (datemicro, "0")
|
||||
return cls(major.replace("v", ""), datetime.strptime(date, "%Y.%m.%d"), int(micro), alphabeta)
|
||||
|
||||
@classmethod
|
||||
def from_content(cls, content):
|
||||
src = content.decoded.decode('utf-8')
|
||||
return cls.from_string(src)
|
||||
|
||||
def increment(self, action):
|
||||
cls = self.__class__
|
||||
|
||||
if action == 'major':
|
||||
return cls(self.major + 1, datetime.now(), self.micro, self.alphabeta)
|
||||
elif action == 'date':
|
||||
return cls(self.major, datetime.now(), self.micro, self.alphabeta)
|
||||
elif action == 'micro':
|
||||
return cls(self.major, datetime.now(), self.micro + 1, self.alphabeta)
|
||||
|
||||
raise ValueError(f'unknown action: {action}')
|
||||
|
||||
@property
|
||||
def tag(self):
|
||||
return f'{self}'
|
||||
|
||||
def __str__(self):
|
||||
arr = ['v', self.major, '.', self.date.strftime("%Y.%m.%d")]
|
||||
if self.micro > 0:
|
||||
arr += [".", self.micro]
|
||||
if self.alphabeta != "":
|
||||
arr += ["-", self.alphabeta]
|
||||
return ''.join(str(p) for p in arr)
|
||||
|
||||
|
||||
def get_draft_prerelease_vars(args) -> (bool, bool):
|
||||
draft = True
|
||||
prerelease = False
|
||||
if args.confirm > 2:
|
||||
draft = False
|
||||
elif args.confirm == 2:
|
||||
draft = False
|
||||
prerelease = True
|
||||
return draft, prerelease
|
||||
|
||||
|
||||
def release(args):
|
||||
gh = get_github()
|
||||
repo = gh.repository('lbryio', 'herald.go')
|
||||
try:
|
||||
version_file = repo.file_contents('version.txt')
|
||||
current_version = Version.from_content(version_file)
|
||||
print(f'Current Version: {current_version}')
|
||||
except:
|
||||
current_version = Version()
|
||||
version_file = repo.create_file("version.txt", message="add version file",
|
||||
content=str(current_version).encode('utf-8'))
|
||||
|
||||
if args.confirm <= 0:
|
||||
print("\nDRY RUN ONLY. RUN WITH --confirm TO DO A REAL RELEASE.\n")
|
||||
|
||||
if args.action == 'current':
|
||||
new_version = current_version
|
||||
else:
|
||||
new_version = current_version.increment(args.action)
|
||||
print(f' New Version: {new_version}')
|
||||
|
||||
tag = args.start_tag if args.start_tag else current_version.tag
|
||||
|
||||
try:
|
||||
previous_release = repo.release_from_tag(tag)
|
||||
except github3.exceptions.NotFoundError:
|
||||
previous_release = list(repo.releases())[-1]
|
||||
print(f' Changelog From: {previous_release.tag_name} ({previous_release.created_at})')
|
||||
print()
|
||||
|
||||
incompats = []
|
||||
release_texts = []
|
||||
unlabeled = []
|
||||
fixups = []
|
||||
areas = {}
|
||||
for pr in gh.search_issues(f"merged:>={previous_release._json_data['created_at']} repo:lbryio/herald.go"):
|
||||
area_labels = list(get_labels(pr, 'area'))
|
||||
type_label = get_label(pr, 'type')
|
||||
if area_labels and type_label:
|
||||
for area_name in area_labels:
|
||||
for incompat in get_backwards_incompatible(pr.body or ""):
|
||||
incompats.append(f' * [{area_name}] {incompat.strip()} ({pr.html_url})')
|
||||
for release_text in get_release_text(pr.body or ""):
|
||||
release_texts.append(release_text)
|
||||
if type_label == 'fixup':
|
||||
fixups.append(f' * {pr.title} ({pr.html_url}) by {pr.user["login"]}')
|
||||
else:
|
||||
area = areas.setdefault(area_name, [])
|
||||
area.append(f' * [{type_label}] {pr.title} ({pr.html_url}) by {pr.user["login"]}')
|
||||
else:
|
||||
unlabeled.append(f' * {pr.title} ({pr.html_url}) by {pr.user["login"]}')
|
||||
|
||||
area_names = list(areas.keys())
|
||||
area_names.sort()
|
||||
|
||||
body = io.StringIO()
|
||||
w = lambda s: body.write(s + '\n')
|
||||
|
||||
w(f'## [{new_version}] - {date.today().isoformat()}')
|
||||
if release_texts:
|
||||
w('')
|
||||
for release_text in release_texts:
|
||||
w(release_text)
|
||||
if incompats:
|
||||
w('')
|
||||
w(f'### Backwards Incompatible Changes')
|
||||
for incompat in incompats:
|
||||
w(incompat)
|
||||
for area in area_names:
|
||||
prs = areas[area]
|
||||
area = AREA_RENAME.get(area.lower(), area.capitalize())
|
||||
w('')
|
||||
w(f'### {area}')
|
||||
for pr in prs:
|
||||
w(pr)
|
||||
|
||||
print(body.getvalue())
|
||||
|
||||
if unlabeled:
|
||||
w('')
|
||||
print('The following PRs were unlabeled and *will* be included in changelog:')
|
||||
for notskipped in unlabeled:
|
||||
print(notskipped)
|
||||
w(notskipped)
|
||||
|
||||
if fixups:
|
||||
print('The following PRs were marked as fixups and not included in changelog:')
|
||||
for skipped in fixups:
|
||||
print(skipped)
|
||||
|
||||
draft, prerelease = get_draft_prerelease_vars(args)
|
||||
if args.confirm > 0:
|
||||
|
||||
commit = version_file.update(
|
||||
new_version.tag,
|
||||
version_file.decoded.decode('utf-8').replace(str(current_version), str(new_version)).encode()
|
||||
)['commit']
|
||||
|
||||
release = None
|
||||
if args.action != "current":
|
||||
repo.create_tag(
|
||||
tag=new_version.tag,
|
||||
message=new_version.tag,
|
||||
sha=commit.sha,
|
||||
obj_type='commit',
|
||||
tagger=commit.committer
|
||||
)
|
||||
|
||||
release = repo.create_release(
|
||||
new_version.tag,
|
||||
name=new_version.tag,
|
||||
body=body.getvalue(),
|
||||
draft=draft,
|
||||
prerelease=prerelease
|
||||
)
|
||||
|
||||
build_upload_binary(release)
|
||||
elif args.action == "current":
|
||||
try:
|
||||
print(new_version.tag)
|
||||
# if we have the tag and release already don't do anything
|
||||
release = repo.release_from_tag(new_version.tag)
|
||||
if release.prerelease:
|
||||
build_upload_binary(release)
|
||||
release.edit(prerelease=False)
|
||||
else:
|
||||
build_upload_binary(release)
|
||||
return
|
||||
except Exception as e:
|
||||
print(e)
|
||||
try:
|
||||
# We need to do this to get draft and prerelease releases
|
||||
release = repo.releases().next()
|
||||
# Case me have a release and no tag
|
||||
if release.name == new_version.tag:
|
||||
release.edit(prerelease=prerelease, draft=draft)
|
||||
build_upload_binary(release)
|
||||
return
|
||||
else:
|
||||
raise Exception("asdf")
|
||||
except:
|
||||
repo.create_tag(
|
||||
tag=new_version.tag,
|
||||
message=new_version.tag,
|
||||
sha=commit.sha,
|
||||
obj_type='commit',
|
||||
tagger=commit.committer
|
||||
)
|
||||
|
||||
release = repo.create_release(
|
||||
new_version.tag,
|
||||
name=new_version.tag,
|
||||
body=body.getvalue(),
|
||||
draft=draft,
|
||||
prerelease=prerelease
|
||||
)
|
||||
finally:
|
||||
if release:
|
||||
build_upload_binary(release)
|
||||
|
||||
|
||||
class TestReleaseTool(unittest.TestCase):
|
||||
|
||||
def test_version_parsing(self):
|
||||
self.assertTrue(str(Version.from_string('v1.2020.01.01-beta')), 'v1.2020.01.01-beta')
|
||||
self.assertTrue(str(Version.from_string('v1.2020.01.01.10')), 'v1.2020.01.01-beta')
|
||||
|
||||
def test_version_increment(self):
|
||||
v = Version.from_string('v1.2020.01.01-beta')
|
||||
self.assertTrue(str(v.increment('major')), 'v2.2020.01.01.beta')
|
||||
self.assertTrue(str(v.increment('date')), f'v1.{datetime.now().strftime("YYYY.MM.dd")}-beta')
|
||||
|
||||
|
||||
def test():
|
||||
runner = unittest.TextTestRunner(verbosity=2)
|
||||
loader = unittest.TestLoader()
|
||||
suite = loader.loadTestsFromTestCase(TestReleaseTool)
|
||||
return 0 if runner.run(suite).wasSuccessful() else 1
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--confirm", default=0, action="count",
|
||||
help="without this flag, it will only print what it will do but will not actually do it")
|
||||
parser.add_argument("--start-tag", help="custom starting tag for changelog generation")
|
||||
parser.add_argument("action", choices=['test', 'current', 'major', 'date', 'micro'])
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.action == "test":
|
||||
code = test()
|
||||
else:
|
||||
code = release(args)
|
||||
|
||||
print()
|
||||
return code
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
415
server/args.go
Normal file
415
server/args.go
Normal file
|
@ -0,0 +1,415 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/akamensky/argparse"
|
||||
pb "github.com/lbryio/herald.go/protobuf/go"
|
||||
"github.com/lbryio/lbcd/chaincfg"
|
||||
)
|
||||
|
||||
const (
|
||||
ServeCmd = iota
|
||||
SearchCmd = iota
|
||||
DBCmd = iota
|
||||
)
|
||||
|
||||
// Args struct contains the arguments to the hub server.
|
||||
type Args struct {
|
||||
CmdType int
|
||||
Host string
|
||||
Port int
|
||||
DBPath string
|
||||
Chain *string
|
||||
DaemonURL *url.URL
|
||||
DaemonCAPath string
|
||||
EsHost string
|
||||
EsPort int
|
||||
PrometheusPort int
|
||||
NotifierPort int
|
||||
JSONRPCPort int
|
||||
JSONRPCHTTPPort int
|
||||
MaxSessions int
|
||||
SessionTimeout int
|
||||
EsIndex string
|
||||
RefreshDelta int
|
||||
CacheTTL int
|
||||
PeerFile string
|
||||
Banner *string
|
||||
Country string
|
||||
BlockingChannelIds []string
|
||||
FilteringChannelIds []string
|
||||
|
||||
GenesisHash string
|
||||
ServerVersion string
|
||||
ProtocolMin string
|
||||
ProtocolMax string
|
||||
ServerDescription string
|
||||
PaymentAddress string
|
||||
DonationAddress string
|
||||
DailyFee string
|
||||
|
||||
Debug bool
|
||||
DisableEs bool
|
||||
DisableLoadPeers bool
|
||||
DisableStartPrometheus bool
|
||||
DisableStartUDP bool
|
||||
DisableWritePeers bool
|
||||
DisableFederation bool
|
||||
DisableRocksDBRefresh bool
|
||||
DisableResolve bool
|
||||
DisableBlockingAndFiltering bool
|
||||
DisableStartNotifier bool
|
||||
DisableStartJSONRPC bool
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultHost = "0.0.0.0"
|
||||
DefaultPort = 50051
|
||||
DefaultDBPath = "/mnt/d/data/snapshot_1072108/lbry-rocksdb/" // FIXME
|
||||
DefaultEsHost = "http://localhost"
|
||||
DefaultEsIndex = "claims"
|
||||
DefaultEsPort = 9200
|
||||
DefaultPrometheusPort = 2112
|
||||
DefaultNotifierPort = 18080
|
||||
DefaultJSONRPCPort = 50001
|
||||
DefaultJSONRPCHTTPPort = 50002
|
||||
DefaultMaxSessions = 10000
|
||||
DefaultSessionTimeout = 300
|
||||
DefaultRefreshDelta = 5
|
||||
DefaultCacheTTL = 5
|
||||
DefaultPeerFile = "peers.txt"
|
||||
DefaultBannerFile = ""
|
||||
DefaultCountry = "US"
|
||||
|
||||
HUB_PROTOCOL_VERSION = "0.107.0"
|
||||
PROTOCOL_MIN = "0.54.0"
|
||||
PROTOCOL_MAX = "0.199.0"
|
||||
DefaultServerDescription = "Herald"
|
||||
DefaultPaymentAddress = ""
|
||||
DefaultDonationAddress = ""
|
||||
DefaultDailyFee = "1.0"
|
||||
|
||||
DefaultDisableLoadPeers = false
|
||||
DefaultDisableStartPrometheus = false
|
||||
DefaultDisableStartUDP = false
|
||||
DefaultDisableWritePeers = false
|
||||
DefaultDisableFederation = false
|
||||
DefaultDisableRockDBRefresh = false
|
||||
DefaultDisableResolve = false
|
||||
DefaultDisableBlockingAndFiltering = false
|
||||
DisableStartNotifier = false
|
||||
DisableStartJSONRPC = false
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultBlockingChannelIds = []string{}
|
||||
DefaultFilteringChannelIds = []string{}
|
||||
)
|
||||
|
||||
func loadBanner(bannerFile *string, serverVersion string) *string {
|
||||
var banner string
|
||||
|
||||
data, err := os.ReadFile(*bannerFile)
|
||||
if err != nil {
|
||||
banner = fmt.Sprintf("You are connected to an %s server.", serverVersion)
|
||||
} else {
|
||||
banner = string(data)
|
||||
}
|
||||
|
||||
/*
|
||||
banner := os.Getenv("BANNER")
|
||||
if banner == "" {
|
||||
return nil
|
||||
}
|
||||
*/
|
||||
|
||||
return &banner
|
||||
}
|
||||
|
||||
// MakeDefaultArgs creates a default set of arguments for testing the server.
|
||||
func MakeDefaultTestArgs() *Args {
|
||||
args := &Args{
|
||||
CmdType: ServeCmd,
|
||||
Host: DefaultHost,
|
||||
Port: DefaultPort,
|
||||
DBPath: DefaultDBPath,
|
||||
EsHost: DefaultEsHost,
|
||||
EsPort: DefaultEsPort,
|
||||
PrometheusPort: DefaultPrometheusPort,
|
||||
NotifierPort: DefaultNotifierPort,
|
||||
JSONRPCPort: DefaultJSONRPCPort,
|
||||
JSONRPCHTTPPort: DefaultJSONRPCHTTPPort,
|
||||
MaxSessions: DefaultMaxSessions,
|
||||
SessionTimeout: DefaultSessionTimeout,
|
||||
EsIndex: DefaultEsIndex,
|
||||
RefreshDelta: DefaultRefreshDelta,
|
||||
CacheTTL: DefaultCacheTTL,
|
||||
PeerFile: DefaultPeerFile,
|
||||
Banner: nil,
|
||||
Country: DefaultCountry,
|
||||
|
||||
GenesisHash: chaincfg.TestNet3Params.GenesisHash.String(),
|
||||
ServerVersion: HUB_PROTOCOL_VERSION,
|
||||
ProtocolMin: PROTOCOL_MIN,
|
||||
ProtocolMax: PROTOCOL_MAX,
|
||||
ServerDescription: DefaultServerDescription,
|
||||
PaymentAddress: DefaultPaymentAddress,
|
||||
DonationAddress: DefaultDonationAddress,
|
||||
DailyFee: DefaultDailyFee,
|
||||
|
||||
DisableEs: true,
|
||||
Debug: true,
|
||||
DisableLoadPeers: true,
|
||||
DisableStartPrometheus: true,
|
||||
DisableStartUDP: true,
|
||||
DisableWritePeers: true,
|
||||
DisableRocksDBRefresh: true,
|
||||
DisableResolve: true,
|
||||
DisableBlockingAndFiltering: true,
|
||||
DisableStartNotifier: true,
|
||||
DisableStartJSONRPC: true,
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
// GetEnvironment takes the environment variables as an array of strings
|
||||
// and a getkeyval function to turn it into a map.
|
||||
func GetEnvironment(data []string, getkeyval func(item string) (key, val string)) map[string]string {
|
||||
items := make(map[string]string)
|
||||
for _, item := range data {
|
||||
key, val := getkeyval(item)
|
||||
items[key] = val
|
||||
}
|
||||
return items
|
||||
}
|
||||
|
||||
// GetEnvironmentStandard gets the environment variables as a map.
|
||||
func GetEnvironmentStandard() map[string]string {
|
||||
return GetEnvironment(os.Environ(), func(item string) (key, val string) {
|
||||
splits := strings.Split(item, "=")
|
||||
key = splits[0]
|
||||
val = splits[1]
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// ParseArgs parses the command line arguments when started the hub server.
|
||||
func ParseArgs(searchRequest *pb.SearchRequest) *Args {
|
||||
|
||||
environment := GetEnvironmentStandard()
|
||||
parser := argparse.NewParser("herald", "herald server and client")
|
||||
|
||||
serveCmd := parser.NewCommand("serve", "start the herald server")
|
||||
searchCmd := parser.NewCommand("search", "claim search")
|
||||
dbCmd := parser.NewCommand("db", "db testing")
|
||||
|
||||
defaultDaemonURL := "http://localhost:9245"
|
||||
if url, ok := environment["DAEMON_URL"]; ok {
|
||||
defaultDaemonURL = url
|
||||
}
|
||||
|
||||
validateURL := func(arg []string) error {
|
||||
_, err := url.Parse(arg[0])
|
||||
return err
|
||||
}
|
||||
validatePort := func(arg []string) error {
|
||||
_, err := strconv.ParseUint(arg[0], 10, 16)
|
||||
return err
|
||||
}
|
||||
|
||||
// main server config arguments
|
||||
host := parser.String("", "rpchost", &argparse.Options{Required: false, Help: "RPC host", Default: DefaultHost})
|
||||
port := parser.Int("", "rpcport", &argparse.Options{Required: false, Help: "RPC port", Validate: validatePort, Default: DefaultPort})
|
||||
dbPath := parser.String("", "db-path", &argparse.Options{Required: false, Help: "RocksDB path", Default: DefaultDBPath})
|
||||
chain := parser.Selector("", "chain", []string{chaincfg.MainNetParams.Name, chaincfg.TestNet3Params.Name, chaincfg.RegressionNetParams.Name, "testnet"},
|
||||
&argparse.Options{Required: false, Help: "Which chain to use, default is 'mainnet'. Values 'regtest' and 'testnet' are for testing", Default: chaincfg.MainNetParams.Name})
|
||||
daemonURLStr := parser.String("", "daemon-url", &argparse.Options{Required: false, Help: "URL for rpc to lbrycrd or lbcd, <rpcuser>:<rpcpassword>@<lbcd rpc ip><lbrcd rpc port>.", Validate: validateURL, Default: defaultDaemonURL})
|
||||
daemonCAPath := parser.String("", "daemon-ca-path", &argparse.Options{Required: false, Help: "Path to the lbcd CA file. Use SSL certificate to verify connection to lbcd."})
|
||||
esHost := parser.String("", "eshost", &argparse.Options{Required: false, Help: "elasticsearch host", Default: DefaultEsHost})
|
||||
esPort := parser.Int("", "esport", &argparse.Options{Required: false, Help: "elasticsearch port", Default: DefaultEsPort})
|
||||
prometheusPort := parser.Int("", "prometheus-port", &argparse.Options{Required: false, Help: "prometheus port", Default: DefaultPrometheusPort})
|
||||
notifierPort := parser.Int("", "notifier-port", &argparse.Options{Required: false, Help: "notifier port", Default: DefaultNotifierPort})
|
||||
jsonRPCPort := parser.Int("", "json-rpc-port", &argparse.Options{Required: false, Help: "JSON RPC port", Validate: validatePort, Default: DefaultJSONRPCPort})
|
||||
jsonRPCHTTPPort := parser.Int("", "json-rpc-http-port", &argparse.Options{Required: false, Help: "JSON RPC over HTTP port", Validate: validatePort, Default: DefaultJSONRPCHTTPPort})
|
||||
maxSessions := parser.Int("", "max-sessions", &argparse.Options{Required: false, Help: "Maximum number of electrum clients that can be connected", Default: DefaultMaxSessions})
|
||||
sessionTimeout := parser.Int("", "session-timeout", &argparse.Options{Required: false, Help: "Session inactivity timeout (seconds)", Default: DefaultSessionTimeout})
|
||||
esIndex := parser.String("", "esindex", &argparse.Options{Required: false, Help: "elasticsearch index name", Default: DefaultEsIndex})
|
||||
refreshDelta := parser.Int("", "refresh-delta", &argparse.Options{Required: false, Help: "elasticsearch index refresh delta in seconds", Default: DefaultRefreshDelta})
|
||||
cacheTTL := parser.Int("", "cachettl", &argparse.Options{Required: false, Help: "Cache TTL in minutes", Default: DefaultCacheTTL})
|
||||
peerFile := parser.String("", "peerfile", &argparse.Options{Required: false, Help: "Initial peer file for federation", Default: DefaultPeerFile})
|
||||
bannerFile := parser.String("", "bannerfile", &argparse.Options{Required: false, Help: "Banner file server.banner", Default: DefaultBannerFile})
|
||||
country := parser.String("", "country", &argparse.Options{Required: false, Help: "Country this node is running in. Default US.", Default: DefaultCountry})
|
||||
blockingChannelIds := parser.StringList("", "blocking-channel-ids", &argparse.Options{Required: false, Help: "Blocking channel ids", Default: DefaultBlockingChannelIds})
|
||||
filteringChannelIds := parser.StringList("", "filtering-channel-ids", &argparse.Options{Required: false, Help: "Filtering channel ids", Default: DefaultFilteringChannelIds})
|
||||
|
||||
// arguments for server features
|
||||
serverDescription := parser.String("", "server-description", &argparse.Options{Required: false, Help: "Server description", Default: DefaultServerDescription})
|
||||
paymentAddress := parser.String("", "payment-address", &argparse.Options{Required: false, Help: "Payment address", Default: DefaultPaymentAddress})
|
||||
donationAddress := parser.String("", "donation-address", &argparse.Options{Required: false, Help: "Donation address", Default: DefaultDonationAddress})
|
||||
dailyFee := parser.String("", "daily-fee", &argparse.Options{Required: false, Help: "Daily fee", Default: DefaultDailyFee})
|
||||
|
||||
// flags for disabling features
|
||||
debug := parser.Flag("", "debug", &argparse.Options{Required: false, Help: "enable debug logging", Default: false})
|
||||
disableEs := parser.Flag("", "disable-es", &argparse.Options{Required: false, Help: "Disable elastic search, for running/testing independently", Default: false})
|
||||
disableLoadPeers := parser.Flag("", "disable-load-peers", &argparse.Options{Required: false, Help: "Disable load peers from disk at startup", Default: DefaultDisableLoadPeers})
|
||||
disableStartPrometheus := parser.Flag("", "disable-start-prometheus", &argparse.Options{Required: false, Help: "Disable start prometheus server", Default: DefaultDisableStartPrometheus})
|
||||
disableStartUdp := parser.Flag("", "disable-start-udp", &argparse.Options{Required: false, Help: "Disable start UDP ping server", Default: DefaultDisableStartUDP})
|
||||
disableWritePeers := parser.Flag("", "disable-write-peers", &argparse.Options{Required: false, Help: "Disable write peer to disk as we learn about them", Default: DefaultDisableWritePeers})
|
||||
disableFederation := parser.Flag("", "disable-federation", &argparse.Options{Required: false, Help: "Disable server federation", Default: DefaultDisableFederation})
|
||||
disableRocksDBRefresh := parser.Flag("", "disable-rocksdb-refresh", &argparse.Options{Required: false, Help: "Disable rocksdb refreshing", Default: DefaultDisableRockDBRefresh})
|
||||
disableResolve := parser.Flag("", "disable-resolve", &argparse.Options{Required: false, Help: "Disable resolve endpoint (and rocksdb loading)", Default: DefaultDisableRockDBRefresh})
|
||||
disableBlockingAndFiltering := parser.Flag("", "disable-blocking-and-filtering", &argparse.Options{Required: false, Help: "Disable blocking and filtering of channels and streams", Default: DefaultDisableBlockingAndFiltering})
|
||||
disableStartNotifier := parser.Flag("", "disable-start-notifier", &argparse.Options{Required: false, Help: "Disable start notifier", Default: DisableStartNotifier})
|
||||
disableStartJSONRPC := parser.Flag("", "disable-start-jsonrpc", &argparse.Options{Required: false, Help: "Disable start jsonrpc endpoint", Default: DisableStartJSONRPC})
|
||||
|
||||
// search command arguments
|
||||
text := parser.String("", "text", &argparse.Options{Required: false, Help: "text query"})
|
||||
name := parser.String("", "name", &argparse.Options{Required: false, Help: "name"})
|
||||
claimType := parser.String("", "claim_type", &argparse.Options{Required: false, Help: "claim_type"})
|
||||
id := parser.String("", "id", &argparse.Options{Required: false, Help: "id"})
|
||||
author := parser.String("", "author", &argparse.Options{Required: false, Help: "author"})
|
||||
title := parser.String("", "title", &argparse.Options{Required: false, Help: "title"})
|
||||
description := parser.String("", "description", &argparse.Options{Required: false, Help: "description"})
|
||||
channelId := parser.String("", "channel_id", &argparse.Options{Required: false, Help: "channel id"})
|
||||
channelIds := parser.StringList("", "channel_ids", &argparse.Options{Required: false, Help: "channel ids"})
|
||||
|
||||
// Now parse the arguments
|
||||
err := parser.Parse(os.Args)
|
||||
if err != nil {
|
||||
log.Fatalln(parser.Usage(err))
|
||||
}
|
||||
|
||||
// Use default JSON RPC port only if *neither* JSON RPC arg is specified.
|
||||
if *jsonRPCPort == 0 && *jsonRPCHTTPPort == 0 {
|
||||
*jsonRPCPort = DefaultJSONRPCPort
|
||||
}
|
||||
|
||||
daemonURL, err := url.Parse(*daemonURLStr)
|
||||
if err != nil {
|
||||
log.Fatalf("URL parse failed: %v", err)
|
||||
}
|
||||
|
||||
banner := loadBanner(bannerFile, HUB_PROTOCOL_VERSION)
|
||||
|
||||
args := &Args{
|
||||
CmdType: SearchCmd,
|
||||
Host: *host,
|
||||
Port: *port,
|
||||
DBPath: *dbPath,
|
||||
Chain: chain,
|
||||
DaemonURL: daemonURL,
|
||||
DaemonCAPath: *daemonCAPath,
|
||||
EsHost: *esHost,
|
||||
EsPort: *esPort,
|
||||
PrometheusPort: *prometheusPort,
|
||||
NotifierPort: *notifierPort,
|
||||
JSONRPCPort: *jsonRPCPort,
|
||||
JSONRPCHTTPPort: *jsonRPCHTTPPort,
|
||||
MaxSessions: *maxSessions,
|
||||
SessionTimeout: *sessionTimeout,
|
||||
EsIndex: *esIndex,
|
||||
RefreshDelta: *refreshDelta,
|
||||
CacheTTL: *cacheTTL,
|
||||
PeerFile: *peerFile,
|
||||
Banner: banner,
|
||||
Country: *country,
|
||||
BlockingChannelIds: *blockingChannelIds,
|
||||
FilteringChannelIds: *filteringChannelIds,
|
||||
|
||||
GenesisHash: "",
|
||||
ServerVersion: HUB_PROTOCOL_VERSION,
|
||||
ProtocolMin: PROTOCOL_MIN,
|
||||
ProtocolMax: PROTOCOL_MAX,
|
||||
ServerDescription: *serverDescription,
|
||||
PaymentAddress: *paymentAddress,
|
||||
DonationAddress: *donationAddress,
|
||||
DailyFee: *dailyFee,
|
||||
|
||||
Debug: *debug,
|
||||
DisableEs: *disableEs,
|
||||
DisableLoadPeers: *disableLoadPeers,
|
||||
DisableStartPrometheus: *disableStartPrometheus,
|
||||
DisableStartUDP: *disableStartUdp,
|
||||
DisableWritePeers: *disableWritePeers,
|
||||
DisableFederation: *disableFederation,
|
||||
DisableRocksDBRefresh: *disableRocksDBRefresh,
|
||||
DisableResolve: *disableResolve,
|
||||
DisableBlockingAndFiltering: *disableBlockingAndFiltering,
|
||||
DisableStartNotifier: *disableStartNotifier,
|
||||
DisableStartJSONRPC: *disableStartJSONRPC,
|
||||
}
|
||||
|
||||
if esHost, ok := environment["ELASTIC_HOST"]; ok {
|
||||
args.EsHost = esHost
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(args.EsHost, "http") {
|
||||
args.EsHost = "http://" + args.EsHost
|
||||
}
|
||||
|
||||
if esPort, ok := environment["ELASTIC_PORT"]; ok {
|
||||
args.EsPort, err = strconv.Atoi(esPort)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if prometheusPort, ok := environment["GOHUB_PROMETHEUS_PORT"]; ok {
|
||||
args.PrometheusPort, err = strconv.Atoi(prometheusPort)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Verify no invalid argument combinations
|
||||
*/
|
||||
if len(*channelIds) > 0 && *channelId != "" {
|
||||
log.Fatal("Cannot specify both channel_id and channel_ids")
|
||||
}
|
||||
|
||||
if serveCmd.Happened() {
|
||||
args.CmdType = ServeCmd
|
||||
} else if searchCmd.Happened() {
|
||||
args.CmdType = SearchCmd
|
||||
} else if dbCmd.Happened() {
|
||||
args.CmdType = DBCmd
|
||||
}
|
||||
|
||||
if *text != "" {
|
||||
searchRequest.Text = *text
|
||||
}
|
||||
if *name != "" {
|
||||
searchRequest.ClaimName = *name
|
||||
}
|
||||
if *claimType != "" {
|
||||
searchRequest.ClaimType = []string{*claimType}
|
||||
}
|
||||
if *id != "" {
|
||||
searchRequest.ClaimId = &pb.InvertibleField{Invert: false, Value: []string{*id}}
|
||||
}
|
||||
if *author != "" {
|
||||
searchRequest.Author = *author
|
||||
}
|
||||
if *title != "" {
|
||||
searchRequest.Title = *title
|
||||
}
|
||||
if *description != "" {
|
||||
searchRequest.Description = *description
|
||||
}
|
||||
if *channelId != "" {
|
||||
searchRequest.ChannelId = &pb.InvertibleField{Invert: false, Value: []string{*channelId}}
|
||||
}
|
||||
if len(*channelIds) > 0 {
|
||||
searchRequest.ChannelId = &pb.InvertibleField{Invert: false, Value: *channelIds}
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
35
server/args_test.go
Normal file
35
server/args_test.go
Normal file
|
@ -0,0 +1,35 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
pb "github.com/lbryio/herald.go/protobuf/go"
|
||||
)
|
||||
|
||||
// TestParseArgs
|
||||
func TestParseArgs(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Correctly disables elastic search",
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
os.Args = []string{"serve", "--disable-es"}
|
||||
searchRequest := new(pb.SearchRequest)
|
||||
args := ParseArgs(searchRequest)
|
||||
got := args.DisableEs
|
||||
if got != tt.want {
|
||||
t.Errorf("flags: got: %v, want: %v\n", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
428
server/federation.go
Normal file
428
server/federation.go
Normal file
|
@ -0,0 +1,428 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"math"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/herald.go/internal/metrics"
|
||||
pb "github.com/lbryio/herald.go/protobuf/go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
// Peer holds relevant information about peers that we know about.
|
||||
type Peer struct {
|
||||
Address string
|
||||
Port string
|
||||
LastSeen time.Time
|
||||
}
|
||||
|
||||
var (
|
||||
localHosts = map[string]bool{
|
||||
"127.0.0.1": true,
|
||||
"0.0.0.0": true,
|
||||
"localhost": true,
|
||||
"<nil>": true, // Empty net.IP turned into a string
|
||||
}
|
||||
)
|
||||
|
||||
// peerKey takes a peer and returns the key that for that peer
|
||||
// in our peer table.
|
||||
func peerKey(peer *Peer) string {
|
||||
return peer.Address + ":" + peer.Port
|
||||
}
|
||||
|
||||
// peerKey is a function on a FederatedServer struct to return the key for that
|
||||
// peer is out peer table.
|
||||
func (peer *Peer) peerKey() string {
|
||||
return peer.Address + ":" + peer.Port
|
||||
}
|
||||
|
||||
func (s *Server) incNumPeers() {
|
||||
atomic.AddInt64(s.NumPeerServers, 1)
|
||||
}
|
||||
|
||||
func (s *Server) decNumPeers() {
|
||||
atomic.AddInt64(s.NumPeerServers, -1)
|
||||
}
|
||||
|
||||
func (s *Server) getNumPeers() int64 {
|
||||
return *s.NumPeerServers
|
||||
}
|
||||
|
||||
func (s *Server) incNumSubs() {
|
||||
atomic.AddInt64(s.NumPeerSubs, 1)
|
||||
}
|
||||
|
||||
func (s *Server) decNumSubs() {
|
||||
atomic.AddInt64(s.NumPeerSubs, -1)
|
||||
}
|
||||
|
||||
func (s *Server) getNumSubs() int64 {
|
||||
return *s.NumPeerSubs
|
||||
}
|
||||
|
||||
// getAndSetExternalIp detects the server's external IP and stores it.
|
||||
func (s *Server) getAndSetExternalIp(ip, port string) error {
|
||||
pong, err := UDPPing(ip, port)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
myIp := pong.DecodeAddress()
|
||||
log.Println("my ip: ", myIp)
|
||||
s.ExternalIP = myIp
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadPeers takes the arguments given to the hub at startup and loads the
|
||||
// previously known peers from disk and verifies their existence before
|
||||
// storing them as known peers. Returns a map of peerKey -> object
|
||||
func (s *Server) loadPeers() error {
|
||||
peerFile := s.Args.PeerFile
|
||||
port := strconv.Itoa(s.Args.Port)
|
||||
|
||||
// First we make sure our server has come up, so we can answer back to peers.
|
||||
var failures = 0
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
|
||||
// log.Println("loadPeers #### waiting for server to come up")
|
||||
retry:
|
||||
time.Sleep(time.Second * time.Duration(math.Pow(float64(failures), 2)))
|
||||
conn, err := grpc.DialContext(ctx,
|
||||
"0.0.0.0:"+port,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithBlock(),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
if failures > 3 {
|
||||
log.Println("Warning! Our endpoint doesn't seem to have come up, didn't load peers")
|
||||
return err
|
||||
}
|
||||
failures += 1
|
||||
goto retry
|
||||
}
|
||||
if err = conn.Close(); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
cancel()
|
||||
// log.Println("loadPeers #### Past checking for server to come up")
|
||||
|
||||
f, err := os.Open(peerFile)
|
||||
if err != nil {
|
||||
// log.Println(err)
|
||||
return err
|
||||
}
|
||||
scanner := bufio.NewScanner(f)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
var text []string
|
||||
for scanner.Scan() {
|
||||
text = append(text, scanner.Text())
|
||||
}
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
log.Println("peer file failed to close: ", err)
|
||||
}
|
||||
|
||||
for _, line := range text {
|
||||
ipPort := strings.Split(line, ":")
|
||||
if len(ipPort) != 2 {
|
||||
log.Println("Malformed entry in peer file")
|
||||
continue
|
||||
}
|
||||
// If the peer is us, skip
|
||||
log.Println(ipPort)
|
||||
if ipPort[1] == port &&
|
||||
(localHosts[ipPort[0]] || ipPort[0] == s.ExternalIP.String()) {
|
||||
log.Println("Self peer, skipping ...")
|
||||
continue
|
||||
}
|
||||
|
||||
newPeer := &Peer{
|
||||
Address: ipPort[0],
|
||||
Port: ipPort[1],
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
log.Printf("pinging peer %+v\n", newPeer)
|
||||
err = s.addPeer(newPeer, true, true)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
log.Println("Returning from loadPeers")
|
||||
return nil
|
||||
}
|
||||
|
||||
// subscribeToPeer subscribes us to a peer to we'll get updates about their
|
||||
// known peers.
|
||||
func (s *Server) subscribeToPeer(peer *Peer) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn, err := grpc.DialContext(ctx,
|
||||
peer.Address+":"+peer.Port,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithBlock(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
msg := &pb.ServerMessage{
|
||||
Address: s.ExternalIP.String(),
|
||||
Port: strconv.Itoa(s.Args.Port),
|
||||
}
|
||||
|
||||
c := pb.NewHubClient(conn)
|
||||
|
||||
log.Printf("%s:%d subscribing to %+v\n", s.ExternalIP, s.Args.Port, peer)
|
||||
_, err = c.PeerSubscribe(ctx, msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// helloPeer takes a peer to say hello to and sends a hello message
|
||||
// containing all the peers we know about and information about us.
|
||||
// This is used to confirm existence of peers on start and let them
|
||||
// know about us. Returns the response from the server on success,
|
||||
// nil otherwise.
|
||||
func (s *Server) helloPeer(peer *Peer) (*pb.HelloMessage, error) {
|
||||
log.Println("In helloPeer")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn, err := grpc.DialContext(ctx,
|
||||
peer.Address+":"+peer.Port,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithBlock(),
|
||||
)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
c := pb.NewHubClient(conn)
|
||||
|
||||
msg := &pb.HelloMessage{
|
||||
Port: strconv.Itoa(s.Args.Port),
|
||||
Host: s.ExternalIP.String(),
|
||||
Servers: []*pb.ServerMessage{},
|
||||
}
|
||||
|
||||
log.Printf("%s:%d saying hello to %+v\n", s.ExternalIP, s.Args.Port, peer)
|
||||
res, err := c.Hello(ctx, msg)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Println(res)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// writePeers writes our current known peers to disk.
|
||||
func (s *Server) writePeers() {
|
||||
if s.Args.DisableWritePeers {
|
||||
return
|
||||
}
|
||||
f, err := os.Create(s.Args.PeerFile)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
writer := bufio.NewWriter(f)
|
||||
|
||||
for key := range s.PeerServers {
|
||||
line := key + "\n"
|
||||
_, err := writer.WriteString(line)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
err = writer.Flush()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
// notifyPeer takes a peer to notify and a new peer we just learned about
|
||||
// and informs the already known peer about the new peer.
|
||||
func (s *Server) notifyPeer(peerToNotify *Peer, newPeer *Peer) error {
|
||||
if s.Args.DisableFederation {
|
||||
return nil
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn, err := grpc.DialContext(ctx,
|
||||
peerToNotify.Address+":"+peerToNotify.Port,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithBlock(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
msg := &pb.ServerMessage{
|
||||
Address: newPeer.Address,
|
||||
Port: newPeer.Port,
|
||||
}
|
||||
|
||||
c := pb.NewHubClient(conn)
|
||||
|
||||
_, err = c.AddPeer(ctx, msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// notifyPeerSubs takes a new peer server we just learned about and notifies
|
||||
// all the peers that have subscribed to us about it.
|
||||
func (s *Server) notifyPeerSubs(newPeer *Peer) {
|
||||
var unsubscribe []string
|
||||
s.PeerSubsMut.RLock()
|
||||
for key, peer := range s.PeerSubs {
|
||||
log.Printf("Notifying peer %s of new node %+v\n", key, newPeer)
|
||||
err := s.notifyPeer(peer, newPeer)
|
||||
if err != nil {
|
||||
log.Println("Failed to send data to ", key)
|
||||
log.Println(err)
|
||||
unsubscribe = append(unsubscribe, key)
|
||||
}
|
||||
}
|
||||
s.PeerSubsMut.RUnlock()
|
||||
|
||||
s.PeerSubsMut.Lock()
|
||||
for _, key := range unsubscribe {
|
||||
if _, ok := s.PeerSubs[key]; ok {
|
||||
delete(s.PeerSubs, key)
|
||||
s.decNumSubs()
|
||||
metrics.PeersSubscribed.Dec()
|
||||
}
|
||||
}
|
||||
s.PeerSubsMut.Unlock()
|
||||
}
|
||||
|
||||
// addPeer takes a new peer, optionally checks to see if they're online, and
|
||||
// adds them to our list of peers. It will also optionally subscribe to it.
|
||||
func (s *Server) addPeer(newPeer *Peer, ping bool, subscribe bool) error {
|
||||
if s.Args.DisableFederation {
|
||||
return nil
|
||||
}
|
||||
// First thing we get our external ip if we don't have it, otherwise we
|
||||
// could end up subscribed to our self, which is silly.
|
||||
nilIP := net.IP{}
|
||||
localIP1 := net.IPv4(127, 0, 0, 1)
|
||||
if s.ExternalIP.Equal(nilIP) || s.ExternalIP.Equal(localIP1) {
|
||||
err := s.getAndSetExternalIp(newPeer.Address, newPeer.Port)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
log.Println("WARNING: can't determine external IP, continuing with ", s.Args.Host)
|
||||
}
|
||||
}
|
||||
|
||||
if strconv.Itoa(s.Args.Port) == newPeer.Port &&
|
||||
(localHosts[newPeer.Address] || newPeer.Address == s.ExternalIP.String()) {
|
||||
log.Printf("%s:%d addPeer: Self peer, skipping...\n", s.ExternalIP, s.Args.Port)
|
||||
return nil
|
||||
}
|
||||
|
||||
k := peerKey(newPeer)
|
||||
|
||||
log.Printf("%s:%d adding peer %+v\n", s.ExternalIP, s.Args.Port, newPeer)
|
||||
if oldServer, loaded := s.PeerServersLoadOrStore(newPeer); !loaded {
|
||||
if ping {
|
||||
_, err := s.helloPeer(newPeer)
|
||||
if err != nil {
|
||||
s.PeerServersMut.Lock()
|
||||
delete(s.PeerServers, k)
|
||||
s.PeerServersMut.Unlock()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
s.incNumPeers()
|
||||
metrics.PeersKnown.Inc()
|
||||
s.writePeers()
|
||||
s.notifyPeerSubs(newPeer)
|
||||
// This is weird because we're doing grpc and jsonrpc here.
|
||||
// Do we still want to custom grpc?
|
||||
log.Warn("Sending peer to NotifierChan")
|
||||
s.NotifierChan <- peerNotification{newPeer.Address, newPeer.Port}
|
||||
|
||||
// Subscribe to all our peers for now
|
||||
if subscribe {
|
||||
err := s.subscribeToPeer(newPeer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
oldServer.LastSeen = time.Now()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mergePeers is an internal convenience function to add a list of
|
||||
// peers.
|
||||
func (s *Server) mergePeers(servers []*pb.ServerMessage) {
|
||||
for _, srvMsg := range servers {
|
||||
newPeer := &Peer{
|
||||
Address: srvMsg.Address,
|
||||
Port: srvMsg.Port,
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
err := s.addPeer(newPeer, false, true)
|
||||
// This shouldn't happen because we're not pinging them.
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// makeHelloMessage makes a message for this hub to call the Hello endpoint
|
||||
// on another hub.
|
||||
func (s *Server) makeHelloMessage() *pb.HelloMessage {
|
||||
servers := make([]*pb.ServerMessage, 0, 10)
|
||||
|
||||
s.PeerServersMut.RLock()
|
||||
for _, peer := range s.PeerServers {
|
||||
servers = append(servers, &pb.ServerMessage{
|
||||
Address: peer.Address,
|
||||
Port: peer.Port,
|
||||
})
|
||||
}
|
||||
s.PeerServersMut.RUnlock()
|
||||
|
||||
return &pb.HelloMessage{
|
||||
Port: strconv.Itoa(s.Args.Port),
|
||||
Host: s.ExternalIP.String(),
|
||||
Servers: servers,
|
||||
}
|
||||
}
|
478
server/federation_test.go
Normal file
478
server/federation_test.go
Normal file
|
@ -0,0 +1,478 @@
|
|||
package server_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/lbryio/herald.go/internal/metrics"
|
||||
pb "github.com/lbryio/herald.go/protobuf/go"
|
||||
"github.com/lbryio/herald.go/server"
|
||||
"github.com/lbryio/lbry.go/v3/extras/stop"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
// lineCountFile takes a fileName and counts the number of lines in it.
|
||||
func lineCountFile(fileName string) int {
|
||||
f, err := os.Open(fileName)
|
||||
defer func() {
|
||||
err := f.Close()
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return 0
|
||||
}
|
||||
scanner := bufio.NewScanner(f)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
var lineCount = 0
|
||||
for scanner.Scan() {
|
||||
scanner.Text()
|
||||
lineCount = lineCount + 1
|
||||
}
|
||||
|
||||
return lineCount
|
||||
}
|
||||
|
||||
// removeFile removes a file.
|
||||
func removeFile(fileName string) {
|
||||
err := os.Remove(fileName)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAddPeer tests the ability to add peers
|
||||
func TestAddPeer(t *testing.T) {
|
||||
// ctx := context.Background()
|
||||
ctx := stop.NewDebug()
|
||||
args := server.MakeDefaultTestArgs()
|
||||
args.DisableStartNotifier = false
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
want int
|
||||
}{
|
||||
{
|
||||
name: "Add 10 peers",
|
||||
want: 10,
|
||||
},
|
||||
{
|
||||
name: "Add 10 peers, 1 unique",
|
||||
want: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
hubServer := server.MakeHubServer(ctx, args)
|
||||
hubServer.ExternalIP = net.IPv4(0, 0, 0, 0)
|
||||
metrics.PeersKnown.Set(0)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
var peer *server.Peer
|
||||
if strings.Contains(tt.name, "1 unique") {
|
||||
peer = &server.Peer{
|
||||
Address: "1.1.1.1",
|
||||
Port: "50051",
|
||||
}
|
||||
} else {
|
||||
x := i + 1
|
||||
peer = &server.Peer{
|
||||
Address: fmt.Sprintf("%d.%d.%d.%d", x, x, x, x),
|
||||
Port: "50051",
|
||||
}
|
||||
}
|
||||
//log.Printf("Adding peer %+v\n", msg)
|
||||
err := hubServer.AddPeerExported()(peer, false, false)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
var m = &dto.Metric{}
|
||||
if err := metrics.PeersKnown.Write(m); err != nil {
|
||||
t.Errorf("Error getting metrics %+v\n", err)
|
||||
}
|
||||
got := int(*m.Gauge.Value)
|
||||
if got != tt.want {
|
||||
t.Errorf("len(server.PeerServers) = %d, want %d\n", got, tt.want)
|
||||
}
|
||||
hubServer.Stop()
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestPeerWriter tests that peers get written properly
|
||||
func TestPeerWriter(t *testing.T) {
|
||||
ctx := stop.NewDebug()
|
||||
args := server.MakeDefaultTestArgs()
|
||||
args.DisableWritePeers = false
|
||||
args.DisableStartNotifier = false
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
want int
|
||||
}{
|
||||
{
|
||||
name: "Add 10 peers",
|
||||
want: 10,
|
||||
},
|
||||
{
|
||||
name: "Add 10 peers, 1 unique",
|
||||
want: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
hubServer := server.MakeHubServer(ctx, args)
|
||||
hubServer.ExternalIP = net.IPv4(0, 0, 0, 0)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
var peer *server.Peer
|
||||
if strings.Contains(tt.name, "1 unique") {
|
||||
peer = &server.Peer{
|
||||
Address: "1.1.1.1",
|
||||
Port: "50051",
|
||||
}
|
||||
} else {
|
||||
x := i + 1
|
||||
peer = &server.Peer{
|
||||
Address: fmt.Sprintf("%d.%d.%d.%d", x, x, x, x),
|
||||
Port: "50051",
|
||||
}
|
||||
}
|
||||
err := hubServer.AddPeerExported()(peer, false, false)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
got := lineCountFile(hubServer.Args.PeerFile)
|
||||
if got != tt.want {
|
||||
t.Errorf("lineCountFile(peers.txt) = %d, want %d", got, tt.want)
|
||||
}
|
||||
hubServer.Stop()
|
||||
})
|
||||
}
|
||||
|
||||
removeFile(args.PeerFile)
|
||||
}
|
||||
|
||||
// TestAddPeerEndpoint tests the ability to add peers
|
||||
func TestAddPeerEndpoint(t *testing.T) {
|
||||
ctx := stop.NewDebug()
|
||||
args := server.MakeDefaultTestArgs()
|
||||
args.DisableStartNotifier = false
|
||||
args2 := server.MakeDefaultTestArgs()
|
||||
args2.DisableStartNotifier = false
|
||||
args2.Port = 50052
|
||||
args2.NotifierPort = 18081
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
wantServerOne int64
|
||||
wantServerTwo int64
|
||||
}{
|
||||
{
|
||||
// outside -> server1.AddPeer(server2, ping=true) : server1 = 1, server2 = 0
|
||||
// server1 -> server2.Hello(server1) : server1 = 1, server2 = 0
|
||||
// server2 -> server2.addPeer(server1, ping=false) : server1 = 1, server2 = 1
|
||||
// server2 -> server1.PeerSubscribe(server2) : server1 = 1, server2 = 1
|
||||
// server1 <- server2.makeHelloMessage() : server1 = 1, server2 = 1
|
||||
// server1.notifyPeer() : server1 = 1, server2 = 1
|
||||
// server1 -> server2.AddPeer(server2) : server1 = 1, server2 = 1
|
||||
// server2 self peer, skipping : server1 = 1, server2 = 1
|
||||
// server1 -> server2.PeerSubscribe(server1) : server1 = 1, server2 = 1
|
||||
name: "Add 1 peer",
|
||||
wantServerOne: 1,
|
||||
wantServerTwo: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
hubServer := server.MakeHubServer(ctx, args)
|
||||
hubServer2 := server.MakeHubServer(ctx, args2)
|
||||
metrics.PeersKnown.Set(0)
|
||||
go hubServer.Run()
|
||||
go hubServer2.Run()
|
||||
conn, err := grpc.Dial("localhost:"+strconv.Itoa(args.Port),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithBlock(),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("did not connect: %v", err)
|
||||
}
|
||||
|
||||
c := pb.NewHubClient(conn)
|
||||
|
||||
msg := &pb.ServerMessage{
|
||||
Address: "0.0.0.0",
|
||||
Port: "50052",
|
||||
}
|
||||
|
||||
_, err = c.AddPeer(context.Background(), msg)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
got1 := hubServer.GetNumPeersExported()()
|
||||
got2 := hubServer2.GetNumPeersExported()()
|
||||
if got1 != tt.wantServerOne {
|
||||
t.Errorf("len(hubServer.PeerServers) = %d, want %d\n", got1, tt.wantServerOne)
|
||||
}
|
||||
if got2 != tt.wantServerTwo {
|
||||
t.Errorf("len(hubServer2.PeerServers) = %d, want %d\n", got2, tt.wantServerTwo)
|
||||
}
|
||||
hubServer.Stop()
|
||||
hubServer2.Stop()
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestAddPeerEndpoint2 tests the ability to add peers
|
||||
func TestAddPeerEndpoint2(t *testing.T) {
|
||||
ctx := stop.NewDebug()
|
||||
args := server.MakeDefaultTestArgs()
|
||||
args2 := server.MakeDefaultTestArgs()
|
||||
args3 := server.MakeDefaultTestArgs()
|
||||
args2.Port = 50052
|
||||
args3.Port = 50053
|
||||
args.DisableStartNotifier = false
|
||||
args2.DisableStartNotifier = false
|
||||
args3.DisableStartNotifier = false
|
||||
args2.NotifierPort = 18081
|
||||
args3.NotifierPort = 18082
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
wantServerOne int64
|
||||
wantServerTwo int64
|
||||
wantServerThree int64
|
||||
}{
|
||||
{
|
||||
name: "Add 2 peers",
|
||||
wantServerOne: 2,
|
||||
wantServerTwo: 2,
|
||||
wantServerThree: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
hubServer := server.MakeHubServer(ctx, args)
|
||||
hubServer2 := server.MakeHubServer(ctx, args2)
|
||||
hubServer3 := server.MakeHubServer(ctx, args3)
|
||||
metrics.PeersKnown.Set(0)
|
||||
go hubServer.Run()
|
||||
go hubServer2.Run()
|
||||
go hubServer3.Run()
|
||||
conn, err := grpc.Dial("localhost:"+strconv.Itoa(args.Port),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithBlock(),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("did not connect: %v", err)
|
||||
}
|
||||
|
||||
c := pb.NewHubClient(conn)
|
||||
|
||||
msg := &pb.ServerMessage{
|
||||
Address: "0.0.0.0",
|
||||
Port: "50052",
|
||||
}
|
||||
|
||||
msg2 := &pb.ServerMessage{
|
||||
Address: "0.0.0.0",
|
||||
Port: "50053",
|
||||
}
|
||||
|
||||
_, err = c.AddPeer(context.Background(), msg)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
_, err = c.AddPeer(context.Background(), msg2)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
got1 := hubServer.GetNumPeersExported()()
|
||||
got2 := hubServer2.GetNumPeersExported()()
|
||||
got3 := hubServer3.GetNumPeersExported()()
|
||||
if got1 != tt.wantServerOne {
|
||||
t.Errorf("len(hubServer.PeerServers) = %d, want %d\n", got1, tt.wantServerOne)
|
||||
}
|
||||
if got2 != tt.wantServerTwo {
|
||||
t.Errorf("len(hubServer2.PeerServers) = %d, want %d\n", got2, tt.wantServerTwo)
|
||||
}
|
||||
if got3 != tt.wantServerThree {
|
||||
t.Errorf("len(hubServer3.PeerServers) = %d, want %d\n", got3, tt.wantServerThree)
|
||||
}
|
||||
hubServer.Stop()
|
||||
hubServer2.Stop()
|
||||
hubServer3.Stop()
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestAddPeerEndpoint3 tests the ability to add peers
|
||||
func TestAddPeerEndpoint3(t *testing.T) {
|
||||
ctx := stop.NewDebug()
|
||||
args := server.MakeDefaultTestArgs()
|
||||
args2 := server.MakeDefaultTestArgs()
|
||||
args3 := server.MakeDefaultTestArgs()
|
||||
args2.Port = 50052
|
||||
args3.Port = 50053
|
||||
args.DisableStartNotifier = false
|
||||
args2.DisableStartNotifier = false
|
||||
args3.DisableStartNotifier = false
|
||||
args2.NotifierPort = 18081
|
||||
args3.NotifierPort = 18082
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
wantServerOne int64
|
||||
wantServerTwo int64
|
||||
wantServerThree int64
|
||||
}{
|
||||
{
|
||||
name: "Add 1 peer to each",
|
||||
wantServerOne: 2,
|
||||
wantServerTwo: 2,
|
||||
wantServerThree: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
hubServer := server.MakeHubServer(ctx, args)
|
||||
hubServer2 := server.MakeHubServer(ctx, args2)
|
||||
hubServer3 := server.MakeHubServer(ctx, args3)
|
||||
metrics.PeersKnown.Set(0)
|
||||
go hubServer.Run()
|
||||
go hubServer2.Run()
|
||||
go hubServer3.Run()
|
||||
conn, err := grpc.Dial("localhost:"+strconv.Itoa(args.Port),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithBlock(),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("did not connect: %v", err)
|
||||
}
|
||||
conn2, err := grpc.Dial("localhost:50052",
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithBlock(),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("did not connect: %v", err)
|
||||
}
|
||||
|
||||
c := pb.NewHubClient(conn)
|
||||
c2 := pb.NewHubClient(conn2)
|
||||
|
||||
msg := &pb.ServerMessage{
|
||||
Address: "0.0.0.0",
|
||||
Port: "50052",
|
||||
}
|
||||
|
||||
msg2 := &pb.ServerMessage{
|
||||
Address: "0.0.0.0",
|
||||
Port: "50053",
|
||||
}
|
||||
|
||||
_, err = c.AddPeer(context.Background(), msg)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
_, err = c2.AddPeer(context.Background(), msg2)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
hubServer.Stop()
|
||||
hubServer2.Stop()
|
||||
hubServer3.Stop()
|
||||
got1 := hubServer.GetNumPeersExported()()
|
||||
got2 := hubServer2.GetNumPeersExported()()
|
||||
got3 := hubServer3.GetNumPeersExported()()
|
||||
if got1 != tt.wantServerOne {
|
||||
t.Errorf("len(hubServer.PeerServers) = %d, want %d\n", got1, tt.wantServerOne)
|
||||
}
|
||||
if got2 != tt.wantServerTwo {
|
||||
t.Errorf("len(hubServer2.PeerServers) = %d, want %d\n", got2, tt.wantServerTwo)
|
||||
}
|
||||
if got3 != tt.wantServerThree {
|
||||
t.Errorf("len(hubServer3.PeerServers) = %d, want %d\n", got3, tt.wantServerThree)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestAddPeer tests the ability to add peers
|
||||
func TestUDPServer(t *testing.T) {
|
||||
ctx := stop.NewDebug()
|
||||
args := server.MakeDefaultTestArgs()
|
||||
args2 := server.MakeDefaultTestArgs()
|
||||
args2.Port = 50052
|
||||
args.DisableStartUDP = false
|
||||
args2.DisableStartUDP = false
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "hubs hubServer external ip",
|
||||
want: "127.0.0.1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
hubServer := server.MakeHubServer(ctx, args)
|
||||
hubServer2 := server.MakeHubServer(ctx, args2)
|
||||
go hubServer.Run()
|
||||
go hubServer2.Run()
|
||||
metrics.PeersKnown.Set(0)
|
||||
|
||||
peer := &server.Peer{
|
||||
Address: "0.0.0.0",
|
||||
Port: "50052",
|
||||
}
|
||||
|
||||
err := hubServer.AddPeerExported()(peer, true, true)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
hubServer.Stop()
|
||||
hubServer2.Stop()
|
||||
|
||||
got1 := hubServer.ExternalIP.String()
|
||||
if got1 != tt.want {
|
||||
t.Errorf("hubServer.ExternalIP = %s, want %s\n", got1, tt.want)
|
||||
t.Errorf("hubServer.Args.Port = %d\n", hubServer.Args.Port)
|
||||
}
|
||||
got2 := hubServer2.ExternalIP.String()
|
||||
if got2 != tt.want {
|
||||
t.Errorf("hubServer2.ExternalIP = %s, want %s\n", got2, tt.want)
|
||||
t.Errorf("hubServer2.Args.Port = %d\n", hubServer2.Args.Port)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
924
server/jsonrpc_blockchain.go
Normal file
924
server/jsonrpc_blockchain.go
Normal file
|
@ -0,0 +1,924 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/lbryio/herald.go/db"
|
||||
"github.com/lbryio/herald.go/internal"
|
||||
"github.com/lbryio/lbcd/chaincfg"
|
||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||
"github.com/lbryio/lbcd/txscript"
|
||||
"github.com/lbryio/lbcd/wire"
|
||||
"github.com/lbryio/lbcutil"
|
||||
"golang.org/x/exp/constraints"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// BlockchainBlockService methods handle "blockchain.block.*" RPCs
|
||||
type BlockchainBlockService struct {
|
||||
DB *db.ReadOnlyDBColumnFamily
|
||||
Chain *chaincfg.Params
|
||||
}
|
||||
|
||||
// BlockchainBlockService methods handle "blockchain.headers.*" RPCs
|
||||
type BlockchainHeadersService struct {
|
||||
DB *db.ReadOnlyDBColumnFamily
|
||||
Chain *chaincfg.Params
|
||||
// needed for subscribe/unsubscribe
|
||||
sessionMgr *sessionManager
|
||||
session *session
|
||||
}
|
||||
|
||||
// BlockchainAddressService methods handle "blockchain.address.*" RPCs
|
||||
type BlockchainAddressService struct {
|
||||
DB *db.ReadOnlyDBColumnFamily
|
||||
Chain *chaincfg.Params
|
||||
// needed for subscribe/unsubscribe
|
||||
sessionMgr *sessionManager
|
||||
session *session
|
||||
}
|
||||
|
||||
// BlockchainScripthashService methods handle "blockchain.scripthash.*" RPCs
|
||||
type BlockchainScripthashService struct {
|
||||
DB *db.ReadOnlyDBColumnFamily
|
||||
Chain *chaincfg.Params
|
||||
// needed for subscribe/unsubscribe
|
||||
sessionMgr *sessionManager
|
||||
session *session
|
||||
}
|
||||
|
||||
// BlockchainTransactionService methods handle "blockchain.transaction.*" RPCs
|
||||
type BlockchainTransactionService struct {
|
||||
DB *db.ReadOnlyDBColumnFamily
|
||||
Chain *chaincfg.Params
|
||||
// needed for broadcast TX
|
||||
sessionMgr *sessionManager
|
||||
}
|
||||
|
||||
const CHUNK_SIZE = 96
|
||||
const MAX_CHUNK_SIZE = 40960
|
||||
const HEADER_SIZE = wire.MaxBlockHeaderPayload
|
||||
const HASHX_LEN = 11
|
||||
|
||||
func min[Ord constraints.Ordered](x, y Ord) Ord {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
func max[Ord constraints.Ordered](x, y Ord) Ord {
|
||||
if x > y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
type BlockHeaderElectrum struct {
|
||||
Version uint32 `json:"version"`
|
||||
PrevBlockHash string `json:"prev_block_hash"`
|
||||
MerkleRoot string `json:"merkle_root"`
|
||||
ClaimTrieRoot string `json:"claim_trie_root"`
|
||||
Timestamp uint32 `json:"timestamp"`
|
||||
Bits uint32 `json:"bits"`
|
||||
Nonce uint32 `json:"nonce"`
|
||||
BlockHeight uint32 `json:"block_height"`
|
||||
}
|
||||
|
||||
func newBlockHeaderElectrum(header *[HEADER_SIZE]byte, height uint32) *BlockHeaderElectrum {
|
||||
var h1, h2, h3 chainhash.Hash
|
||||
h1.SetBytes(header[4:36])
|
||||
h2.SetBytes(header[36:68])
|
||||
h3.SetBytes(header[68:100])
|
||||
return &BlockHeaderElectrum{
|
||||
Version: binary.LittleEndian.Uint32(header[0:]),
|
||||
PrevBlockHash: h1.String(),
|
||||
MerkleRoot: h2.String(),
|
||||
ClaimTrieRoot: h3.String(),
|
||||
Timestamp: binary.LittleEndian.Uint32(header[100:]),
|
||||
Bits: binary.LittleEndian.Uint32(header[104:]),
|
||||
Nonce: binary.LittleEndian.Uint32(header[108:]),
|
||||
BlockHeight: height,
|
||||
}
|
||||
}
|
||||
|
||||
type BlockGetServerHeightReq struct{}
|
||||
type BlockGetServerHeightResp uint32
|
||||
|
||||
// blockchain.block.get_server_height
|
||||
func (s *BlockchainBlockService) Get_server_height(req *BlockGetServerHeightReq, resp **BlockGetServerHeightResp) error {
|
||||
if s.DB == nil || s.DB.LastState == nil {
|
||||
return fmt.Errorf("unknown height")
|
||||
}
|
||||
result := BlockGetServerHeightResp(s.DB.LastState.Height)
|
||||
*resp = &result
|
||||
return nil
|
||||
}
|
||||
|
||||
type BlockGetChunkReq uint32
|
||||
type BlockGetChunkResp string
|
||||
|
||||
// 'blockchain.block.get_chunk'
|
||||
func (s *BlockchainBlockService) Get_chunk(req *BlockGetChunkReq, resp **BlockGetChunkResp) error {
|
||||
index := uint32(*req)
|
||||
db_headers, err := s.DB.GetHeaders(index*CHUNK_SIZE, CHUNK_SIZE)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
raw := make([]byte, 0, HEADER_SIZE*len(db_headers))
|
||||
for _, h := range db_headers {
|
||||
raw = append(raw, h[:]...)
|
||||
}
|
||||
headers := BlockGetChunkResp(hex.EncodeToString(raw))
|
||||
*resp = &headers
|
||||
return err
|
||||
}
|
||||
|
||||
type BlockGetHeaderReq uint32
|
||||
type BlockGetHeaderResp struct {
|
||||
BlockHeaderElectrum
|
||||
}
|
||||
|
||||
// 'blockchain.block.get_header'
|
||||
func (s *BlockchainBlockService) Get_header(req *BlockGetHeaderReq, resp **BlockGetHeaderResp) error {
|
||||
height := uint32(*req)
|
||||
headers, err := s.DB.GetHeaders(height, 1)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
if len(headers) < 1 {
|
||||
return errors.New("not found")
|
||||
}
|
||||
*resp = &BlockGetHeaderResp{*newBlockHeaderElectrum(&headers[0], height)}
|
||||
return err
|
||||
}
|
||||
|
||||
type BlockHeadersReq struct {
|
||||
StartHeight uint32 `json:"start_height"`
|
||||
Count uint32 `json:"count"`
|
||||
CpHeight uint32 `json:"cp_height"`
|
||||
B64 bool `json:"b64"`
|
||||
}
|
||||
|
||||
func (req *BlockHeadersReq) UnmarshalJSON(b []byte) error {
|
||||
var params [4]interface{}
|
||||
err := json.Unmarshal(b, ¶ms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch params[0].(type) {
|
||||
case float64:
|
||||
req.StartHeight = uint32(params[0].(float64))
|
||||
default:
|
||||
return fmt.Errorf("expected numeric argument #0 (start_height)")
|
||||
}
|
||||
switch params[1].(type) {
|
||||
case float64:
|
||||
req.Count = uint32(params[1].(float64))
|
||||
default:
|
||||
return fmt.Errorf("expected numeric argument #1 (count)")
|
||||
}
|
||||
switch params[2].(type) {
|
||||
case float64:
|
||||
req.CpHeight = uint32(params[2].(float64))
|
||||
default:
|
||||
return fmt.Errorf("expected numeric argument #2 (cp_height)")
|
||||
}
|
||||
switch params[3].(type) {
|
||||
case bool:
|
||||
req.B64 = params[3].(bool)
|
||||
default:
|
||||
return fmt.Errorf("expected boolean argument #3 (b64)")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type BlockHeadersResp struct {
|
||||
Base64 string `json:"base64,omitempty"`
|
||||
Hex string `json:"hex"`
|
||||
Count uint32 `json:"count"`
|
||||
Max uint32 `json:"max"`
|
||||
Branch string `json:"branch,omitempty"`
|
||||
Root string `json:"root,omitempty"`
|
||||
}
|
||||
|
||||
// 'blockchain.block.headers'
|
||||
func (s *BlockchainBlockService) Headers(req *BlockHeadersReq, resp **BlockHeadersResp) error {
|
||||
count := min(req.Count, MAX_CHUNK_SIZE)
|
||||
db_headers, err := s.DB.GetHeaders(req.StartHeight, count)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
count = uint32(len(db_headers))
|
||||
raw := make([]byte, 0, HEADER_SIZE*count)
|
||||
for _, h := range db_headers {
|
||||
raw = append(raw, h[:]...)
|
||||
}
|
||||
result := &BlockHeadersResp{
|
||||
Count: count,
|
||||
Max: MAX_CHUNK_SIZE,
|
||||
}
|
||||
if req.B64 {
|
||||
zipped := bytes.Buffer{}
|
||||
w := zlib.NewWriter(&zipped)
|
||||
w.Write(raw)
|
||||
w.Close()
|
||||
result.Base64 = base64.StdEncoding.EncodeToString(zipped.Bytes())
|
||||
} else {
|
||||
result.Hex = hex.EncodeToString(raw)
|
||||
}
|
||||
if count > 0 && req.CpHeight > 0 {
|
||||
// TODO
|
||||
//last_height := height + count - 1
|
||||
}
|
||||
*resp = result
|
||||
return err
|
||||
}
|
||||
|
||||
type HeadersSubscribeReq struct {
|
||||
Raw bool `json:"raw"`
|
||||
}
|
||||
|
||||
func (req *HeadersSubscribeReq) UnmarshalJSON(b []byte) error {
|
||||
var params [1]interface{}
|
||||
err := json.Unmarshal(b, ¶ms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch params[0].(type) {
|
||||
case bool:
|
||||
req.Raw = params[0].(bool)
|
||||
default:
|
||||
return fmt.Errorf("expected bool argument #0 (raw)")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type HeadersSubscribeResp struct {
|
||||
BlockHeaderElectrum
|
||||
}
|
||||
type HeadersSubscribeRawResp struct {
|
||||
Hex string `json:"hex"`
|
||||
Height uint32 `json:"height"`
|
||||
}
|
||||
|
||||
// 'blockchain.headers.subscribe'
|
||||
func (s *BlockchainHeadersService) Subscribe(req *HeadersSubscribeReq, resp *interface{}) error {
|
||||
if s.sessionMgr == nil || s.session == nil {
|
||||
return errors.New("no session, rpc not supported")
|
||||
}
|
||||
s.sessionMgr.headersSubscribe(s.session, req.Raw, true /*subscribe*/)
|
||||
height := s.DB.Height
|
||||
if s.DB.LastState != nil {
|
||||
height = s.DB.LastState.Height
|
||||
}
|
||||
headers, err := s.DB.GetHeaders(height, 1)
|
||||
if err != nil {
|
||||
s.sessionMgr.headersSubscribe(s.session, req.Raw, false /*subscribe*/)
|
||||
return err
|
||||
}
|
||||
if len(headers) < 1 {
|
||||
return errors.New("not found")
|
||||
}
|
||||
if req.Raw {
|
||||
*resp = &HeadersSubscribeRawResp{
|
||||
Hex: hex.EncodeToString(headers[0][:]),
|
||||
Height: height,
|
||||
}
|
||||
} else {
|
||||
*resp = &HeadersSubscribeResp{*newBlockHeaderElectrum(&headers[0], height)}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func decodeScriptHash(scripthash string) ([]byte, error) {
|
||||
sh, err := hex.DecodeString(scripthash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(sh) != chainhash.HashSize {
|
||||
return nil, fmt.Errorf("invalid scripthash: %v (length %v)", scripthash, len(sh))
|
||||
}
|
||||
internal.ReverseBytesInPlace(sh)
|
||||
return sh, nil
|
||||
}
|
||||
|
||||
func hashX(scripthash []byte) []byte {
|
||||
return scripthash[:HASHX_LEN]
|
||||
}
|
||||
|
||||
func hashXScript(script []byte, coin *chaincfg.Params) []byte {
|
||||
if _, err := txscript.ExtractClaimScript(script); err == nil {
|
||||
baseScript := txscript.StripClaimScriptPrefix(script)
|
||||
if class, addrs, _, err := txscript.ExtractPkScriptAddrs(baseScript, coin); err == nil {
|
||||
switch class {
|
||||
case txscript.PubKeyHashTy, txscript.ScriptHashTy, txscript.PubKeyTy:
|
||||
script, _ := txscript.PayToAddrScript(addrs[0])
|
||||
return hashXScript(script, coin)
|
||||
}
|
||||
}
|
||||
}
|
||||
sum := sha256.Sum256(script)
|
||||
return sum[:HASHX_LEN]
|
||||
}
|
||||
|
||||
type AddressGetBalanceReq struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
type AddressGetBalanceResp struct {
|
||||
Confirmed uint64 `json:"confirmed"`
|
||||
Unconfirmed uint64 `json:"unconfirmed"`
|
||||
}
|
||||
|
||||
// 'blockchain.address.get_balance'
|
||||
func (s *BlockchainAddressService) Get_balance(req *AddressGetBalanceReq, resp **AddressGetBalanceResp) error {
|
||||
address, err := lbcutil.DecodeAddress(req.Address, s.Chain)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
script, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
hashX := hashXScript(script, s.Chain)
|
||||
confirmed, unconfirmed, err := s.DB.GetBalance(hashX)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
*resp = &AddressGetBalanceResp{confirmed, unconfirmed}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type scripthashGetBalanceReq struct {
|
||||
ScriptHash string `json:"scripthash"`
|
||||
}
|
||||
type ScripthashGetBalanceResp struct {
|
||||
Confirmed uint64 `json:"confirmed"`
|
||||
Unconfirmed uint64 `json:"unconfirmed"`
|
||||
}
|
||||
|
||||
// 'blockchain.scripthash.get_balance'
|
||||
func (s *BlockchainScripthashService) Get_balance(req *scripthashGetBalanceReq, resp **ScripthashGetBalanceResp) error {
|
||||
scripthash, err := decodeScriptHash(req.ScriptHash)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
hashX := hashX(scripthash)
|
||||
confirmed, unconfirmed, err := s.DB.GetBalance(hashX)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
*resp = &ScripthashGetBalanceResp{confirmed, unconfirmed}
|
||||
return err
|
||||
}
|
||||
|
||||
type AddressGetHistoryReq struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
|
||||
func (req *AddressGetHistoryReq) UnmarshalJSON(b []byte) error {
|
||||
var params [1]interface{}
|
||||
json.Unmarshal(b, ¶ms)
|
||||
err := json.Unmarshal(b, ¶ms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch params[0].(type) {
|
||||
case string:
|
||||
req.Address = params[0].(string)
|
||||
default:
|
||||
return fmt.Errorf("expected string argument #0 (address)")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type TxInfo struct {
|
||||
TxHash string `json:"tx_hash"`
|
||||
Height uint32 `json:"height"`
|
||||
}
|
||||
type TxInfoFee struct {
|
||||
TxInfo
|
||||
Fee uint64 `json:"fee"`
|
||||
}
|
||||
type AddressGetHistoryResp []TxInfoFee
|
||||
|
||||
// 'blockchain.address.get_history'
|
||||
func (s *BlockchainAddressService) Get_history(req *AddressGetHistoryReq, resp **AddressGetHistoryResp) error {
|
||||
address, err := lbcutil.DecodeAddress(req.Address, s.Chain)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
script, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
hashX := hashXScript(script, s.Chain)
|
||||
dbTXs, err := s.DB.GetHistory(hashX)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
confirmed := make([]TxInfo, 0, len(dbTXs))
|
||||
for _, tx := range dbTXs {
|
||||
confirmed = append(confirmed,
|
||||
TxInfo{
|
||||
TxHash: tx.TxHash.String(),
|
||||
Height: tx.Height,
|
||||
})
|
||||
}
|
||||
unconfirmed := []TxInfoFee{} // TODO
|
||||
result := make(AddressGetHistoryResp, len(confirmed)+len(unconfirmed))
|
||||
i := 0
|
||||
for _, tx := range confirmed {
|
||||
result[i].TxInfo = tx
|
||||
i += 1
|
||||
}
|
||||
for _, tx := range unconfirmed {
|
||||
result[i] = tx
|
||||
i += 1
|
||||
}
|
||||
*resp = &result
|
||||
return err
|
||||
}
|
||||
|
||||
type ScripthashGetHistoryReq struct {
|
||||
ScriptHash string `json:"scripthash"`
|
||||
}
|
||||
type ScripthashGetHistoryResp struct {
|
||||
Confirmed []TxInfo `json:"confirmed"`
|
||||
Unconfirmed []TxInfoFee `json:"unconfirmed"`
|
||||
}
|
||||
|
||||
// 'blockchain.scripthash.get_history'
|
||||
func (s *BlockchainScripthashService) Get_history(req *ScripthashGetHistoryReq, resp **ScripthashGetHistoryResp) error {
|
||||
scripthash, err := decodeScriptHash(req.ScriptHash)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
hashX := hashX(scripthash)
|
||||
dbTXs, err := s.DB.GetHistory(hashX)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
confirmed := make([]TxInfo, 0, len(dbTXs))
|
||||
for _, tx := range dbTXs {
|
||||
confirmed = append(confirmed,
|
||||
TxInfo{
|
||||
TxHash: tx.TxHash.String(),
|
||||
Height: tx.Height,
|
||||
})
|
||||
}
|
||||
result := &ScripthashGetHistoryResp{
|
||||
Confirmed: confirmed,
|
||||
Unconfirmed: []TxInfoFee{}, // TODO
|
||||
}
|
||||
*resp = result
|
||||
return err
|
||||
}
|
||||
|
||||
type AddressGetMempoolReq struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
type AddressGetMempoolResp []TxInfoFee
|
||||
|
||||
// 'blockchain.address.get_mempool'
|
||||
func (s *BlockchainAddressService) Get_mempool(req *AddressGetMempoolReq, resp **AddressGetMempoolResp) error {
|
||||
address, err := lbcutil.DecodeAddress(req.Address, s.Chain)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
script, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
hashX := hashXScript(script, s.Chain)
|
||||
// TODO...
|
||||
internal.ReverseBytesInPlace(hashX)
|
||||
unconfirmed := make([]TxInfoFee, 0, 100)
|
||||
result := AddressGetMempoolResp(unconfirmed)
|
||||
*resp = &result
|
||||
return err
|
||||
}
|
||||
|
||||
type ScripthashGetMempoolReq struct {
|
||||
ScriptHash string `json:"scripthash"`
|
||||
}
|
||||
type ScripthashGetMempoolResp []TxInfoFee
|
||||
|
||||
// 'blockchain.scripthash.get_mempool'
|
||||
func (s *BlockchainScripthashService) Get_mempool(req *ScripthashGetMempoolReq, resp **ScripthashGetMempoolResp) error {
|
||||
scripthash, err := decodeScriptHash(req.ScriptHash)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
hashX := hashX(scripthash)
|
||||
// TODO...
|
||||
internal.ReverseBytesInPlace(hashX)
|
||||
unconfirmed := make([]TxInfoFee, 0, 100)
|
||||
result := ScripthashGetMempoolResp(unconfirmed)
|
||||
*resp = &result
|
||||
return err
|
||||
}
|
||||
|
||||
type AddressListUnspentReq struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
type TXOInfo struct {
|
||||
TxHash string `json:"tx_hash"`
|
||||
TxPos uint16 `json:"tx_pos"`
|
||||
Height uint32 `json:"height"`
|
||||
Value uint64 `json:"value"`
|
||||
}
|
||||
type AddressListUnspentResp []TXOInfo
|
||||
|
||||
// 'blockchain.address.listunspent'
|
||||
func (s *BlockchainAddressService) Listunspent(req *AddressListUnspentReq, resp **AddressListUnspentResp) error {
|
||||
address, err := lbcutil.DecodeAddress(req.Address, s.Chain)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
script, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
hashX := hashXScript(script, s.Chain)
|
||||
dbTXOs, err := s.DB.GetUnspent(hashX)
|
||||
unspent := make([]TXOInfo, 0, len(dbTXOs))
|
||||
for _, txo := range dbTXOs {
|
||||
unspent = append(unspent,
|
||||
TXOInfo{
|
||||
TxHash: txo.TxHash.String(),
|
||||
TxPos: txo.TxPos,
|
||||
Height: txo.Height,
|
||||
Value: txo.Value,
|
||||
})
|
||||
}
|
||||
result := AddressListUnspentResp(unspent)
|
||||
*resp = &result
|
||||
return err
|
||||
}
|
||||
|
||||
type ScripthashListUnspentReq struct {
|
||||
ScriptHash string `json:"scripthash"`
|
||||
}
|
||||
type ScripthashListUnspentResp []TXOInfo
|
||||
|
||||
// 'blockchain.scripthash.listunspent'
|
||||
func (s *BlockchainScripthashService) Listunspent(req *ScripthashListUnspentReq, resp **ScripthashListUnspentResp) error {
|
||||
scripthash, err := decodeScriptHash(req.ScriptHash)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return err
|
||||
}
|
||||
hashX := hashX(scripthash)
|
||||
dbTXOs, err := s.DB.GetUnspent(hashX)
|
||||
unspent := make([]TXOInfo, 0, len(dbTXOs))
|
||||
for _, txo := range dbTXOs {
|
||||
unspent = append(unspent,
|
||||
TXOInfo{
|
||||
TxHash: txo.TxHash.String(),
|
||||
TxPos: txo.TxPos,
|
||||
Height: txo.Height,
|
||||
Value: txo.Value,
|
||||
})
|
||||
}
|
||||
result := ScripthashListUnspentResp(unspent)
|
||||
*resp = &result
|
||||
return err
|
||||
}
|
||||
|
||||
type AddressSubscribeReq []string
|
||||
type AddressSubscribeResp []string
|
||||
|
||||
// 'blockchain.address.subscribe'
|
||||
func (s *BlockchainAddressService) Subscribe(req *AddressSubscribeReq, resp **AddressSubscribeResp) error {
|
||||
if s.sessionMgr == nil || s.session == nil {
|
||||
return errors.New("no session, rpc not supported")
|
||||
}
|
||||
result := make([]string, 0, len(*req))
|
||||
for _, addr := range *req {
|
||||
address, err := lbcutil.DecodeAddress(addr, s.Chain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
script, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hashX := hashXScript(script, s.Chain)
|
||||
s.sessionMgr.hashXSubscribe(s.session, hashX, addr, true /*subscribe*/)
|
||||
status, err := s.DB.GetStatus(hashX)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result = append(result, hex.EncodeToString(status))
|
||||
}
|
||||
*resp = (*AddressSubscribeResp)(&result)
|
||||
return nil
|
||||
}
|
||||
|
||||
// 'blockchain.address.unsubscribe'
|
||||
func (s *BlockchainAddressService) Unsubscribe(req *AddressSubscribeReq, resp **AddressSubscribeResp) error {
|
||||
if s.sessionMgr == nil || s.session == nil {
|
||||
return errors.New("no session, rpc not supported")
|
||||
}
|
||||
for _, addr := range *req {
|
||||
address, err := lbcutil.DecodeAddress(addr, s.Chain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
script, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hashX := hashXScript(script, s.Chain)
|
||||
s.sessionMgr.hashXSubscribe(s.session, hashX, addr, false /*subscribe*/)
|
||||
}
|
||||
*resp = (*AddressSubscribeResp)(nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
type ScripthashSubscribeReq string
|
||||
type ScripthashSubscribeResp string
|
||||
|
||||
// 'blockchain.scripthash.subscribe'
|
||||
func (s *BlockchainScripthashService) Subscribe(req *ScripthashSubscribeReq, resp **ScripthashSubscribeResp) error {
|
||||
if s.sessionMgr == nil || s.session == nil {
|
||||
return errors.New("no session, rpc not supported")
|
||||
}
|
||||
var result string
|
||||
scripthash, err := decodeScriptHash(string(*req))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hashX := hashX(scripthash)
|
||||
s.sessionMgr.hashXSubscribe(s.session, hashX, string(*req), true /*subscribe*/)
|
||||
|
||||
status, err := s.DB.GetStatus(hashX)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result = hex.EncodeToString(status)
|
||||
*resp = (*ScripthashSubscribeResp)(&result)
|
||||
return nil
|
||||
}
|
||||
|
||||
// 'blockchain.scripthash.unsubscribe'
|
||||
func (s *BlockchainScripthashService) Unsubscribe(req *ScripthashSubscribeReq, resp **ScripthashSubscribeResp) error {
|
||||
if s.sessionMgr == nil || s.session == nil {
|
||||
return errors.New("no session, rpc not supported")
|
||||
}
|
||||
scripthash, err := decodeScriptHash(string(*req))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hashX := hashX(scripthash)
|
||||
s.sessionMgr.hashXSubscribe(s.session, hashX, string(*req), false /*subscribe*/)
|
||||
*resp = (*ScripthashSubscribeResp)(nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
type TransactionBroadcastReq [1]string
|
||||
type TransactionBroadcastResp string
|
||||
|
||||
// 'blockchain.transaction.broadcast'
|
||||
func (s *BlockchainTransactionService) Broadcast(req *TransactionBroadcastReq, resp **TransactionBroadcastResp) error {
|
||||
if s.sessionMgr == nil {
|
||||
return errors.New("no session manager, rpc not supported")
|
||||
}
|
||||
strTx := string((*req)[0])
|
||||
rawTx, err := hex.DecodeString(strTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txhash, err := s.sessionMgr.broadcastTx(rawTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result := txhash.String()
|
||||
*resp = (*TransactionBroadcastResp)(&result)
|
||||
return nil
|
||||
}
|
||||
|
||||
type TransactionGetReq string
|
||||
type TXFullDetail struct {
|
||||
Height int `json:"block_height"`
|
||||
Pos uint32 `json:"pos"`
|
||||
Merkle []string `json:"merkle"`
|
||||
}
|
||||
type TXDetail struct {
|
||||
Height int `json:"block_height"`
|
||||
}
|
||||
|
||||
type TXGetItem struct {
|
||||
TxHash string
|
||||
TxRaw string
|
||||
Detail interface{} // TXFullDetail or TXDetail struct
|
||||
}
|
||||
type TransactionGetResp TXGetItem
|
||||
|
||||
// 'blockchain.transaction.get'
|
||||
func (s *BlockchainTransactionService) Get(req *TransactionGetReq, resp **TransactionGetResp) error {
|
||||
txids := [1]string{string(*req)}
|
||||
request := TransactionGetBatchReq(txids[:])
|
||||
var response *TransactionGetBatchResp
|
||||
err := s.Get_batch(&request, &response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(*response) < 1 {
|
||||
return errors.New("tx not found")
|
||||
}
|
||||
switch (*response)[0].Detail.(type) {
|
||||
case TXFullDetail:
|
||||
break
|
||||
case TXDetail:
|
||||
default:
|
||||
return errors.New("tx not confirmed")
|
||||
}
|
||||
*resp = (*TransactionGetResp)(&(*response)[0])
|
||||
return err
|
||||
}
|
||||
|
||||
type TransactionGetBatchReq []string
|
||||
|
||||
func (req *TransactionGetBatchReq) UnmarshalJSON(b []byte) error {
|
||||
var params []interface{}
|
||||
json.Unmarshal(b, ¶ms)
|
||||
if len(params) > 100 {
|
||||
return fmt.Errorf("too many tx hashes in request: %v", len(params))
|
||||
}
|
||||
for i, txhash := range params {
|
||||
switch params[0].(type) {
|
||||
case string:
|
||||
*req = append(*req, txhash.(string))
|
||||
default:
|
||||
return fmt.Errorf("expected string argument #%d (tx_hash)", i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type TransactionGetBatchResp []TXGetItem
|
||||
|
||||
func (resp *TransactionGetBatchResp) MarshalJSON() ([]byte, error) {
|
||||
// encode key/value pairs as variable length JSON object
|
||||
var buf bytes.Buffer
|
||||
enc := json.NewEncoder(&buf)
|
||||
buf.WriteString("{")
|
||||
for i, r := range *resp {
|
||||
if i > 0 {
|
||||
buf.WriteString(",")
|
||||
}
|
||||
txhash, raw, detail := r.TxHash, r.TxRaw, r.Detail
|
||||
err := enc.Encode(txhash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf.WriteString(":[")
|
||||
err = enc.Encode(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf.WriteString(",")
|
||||
err = enc.Encode(detail)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf.WriteString("]")
|
||||
}
|
||||
buf.WriteString("}")
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// 'blockchain.transaction.get_batch'
|
||||
func (s *BlockchainTransactionService) Get_batch(req *TransactionGetBatchReq, resp **TransactionGetBatchResp) error {
|
||||
if len(*req) > 100 {
|
||||
return fmt.Errorf("too many tx hashes in request: %v", len(*req))
|
||||
}
|
||||
tx_hashes := make([]chainhash.Hash, 0, len(*req))
|
||||
for i, txid := range *req {
|
||||
tx_hashes = append(tx_hashes, chainhash.Hash{})
|
||||
if err := chainhash.Decode(&tx_hashes[i], txid); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
dbResult, err := s.DB.GetTxMerkle(tx_hashes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result := make([]TXGetItem, 0, len(dbResult))
|
||||
for _, r := range dbResult {
|
||||
merkles := make([]string, len(r.Merkle))
|
||||
for i, m := range r.Merkle {
|
||||
merkles[i] = m.String()
|
||||
}
|
||||
detail := TXFullDetail{
|
||||
Height: r.Height,
|
||||
Pos: r.Pos,
|
||||
Merkle: merkles,
|
||||
}
|
||||
result = append(result, TXGetItem{r.TxHash.String(), hex.EncodeToString(r.RawTx), &detail})
|
||||
}
|
||||
*resp = (*TransactionGetBatchResp)(&result)
|
||||
return err
|
||||
}
|
||||
|
||||
type TransactionGetMerkleReq struct {
|
||||
TxHash string `json:"tx_hash"`
|
||||
Height uint32 `json:"height"`
|
||||
}
|
||||
type TransactionGetMerkleResp TXGetItem
|
||||
|
||||
// 'blockchain.transaction.get_merkle'
|
||||
func (s *BlockchainTransactionService) Get_merkle(req *TransactionGetMerkleReq, resp **TransactionGetMerkleResp) error {
|
||||
txids := [1]string{string(req.TxHash)}
|
||||
request := TransactionGetBatchReq(txids[:])
|
||||
var response *TransactionGetBatchResp
|
||||
err := s.Get_batch(&request, &response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(*response) < 1 {
|
||||
return errors.New("tx not found")
|
||||
}
|
||||
switch (*response)[0].Detail.(type) {
|
||||
case TXFullDetail:
|
||||
break
|
||||
case TXDetail:
|
||||
default:
|
||||
return errors.New("tx not confirmed")
|
||||
}
|
||||
*resp = (*TransactionGetMerkleResp)(&(*response)[0])
|
||||
return err
|
||||
}
|
||||
|
||||
type TransactionGetHeightReq string
|
||||
type TransactionGetHeightResp uint32
|
||||
|
||||
// 'blockchain.transaction.get_height'
|
||||
func (s *BlockchainTransactionService) Get_height(req *TransactionGetHeightReq, resp **TransactionGetHeightResp) error {
|
||||
txid := string(*(req))
|
||||
txhash, err := chainhash.NewHashFromStr(txid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
height, err := s.DB.GetTxHeight(txhash)
|
||||
*resp = (*TransactionGetHeightResp)(&height)
|
||||
return err
|
||||
}
|
||||
|
||||
type TransactionInfoReq string
|
||||
type TransactionInfoResp TXGetItem
|
||||
|
||||
// 'blockchain.transaction.info'
|
||||
func (s *BlockchainTransactionService) Info(req *TransactionInfoReq, resp **TransactionInfoResp) error {
|
||||
txids := [1]string{string(*req)}
|
||||
request := TransactionGetBatchReq(txids[:])
|
||||
var response *TransactionGetBatchResp
|
||||
err := s.Get_batch(&request, &response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(*response) < 1 {
|
||||
return errors.New("tx not found")
|
||||
}
|
||||
switch (*response)[0].Detail.(type) {
|
||||
case TXFullDetail:
|
||||
break
|
||||
case TXDetail:
|
||||
default:
|
||||
if (*response)[0].TxHash == "" {
|
||||
return errors.New("no such mempool or blockchain transaction")
|
||||
}
|
||||
}
|
||||
*resp = (*TransactionInfoResp)(&(*response)[0])
|
||||
return err
|
||||
}
|
469
server/jsonrpc_blockchain_test.go
Normal file
469
server/jsonrpc_blockchain_test.go
Normal file
|
@ -0,0 +1,469 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/lbryio/herald.go/db"
|
||||
"github.com/lbryio/herald.go/internal"
|
||||
"github.com/lbryio/lbcd/chaincfg"
|
||||
"github.com/lbryio/lbcd/txscript"
|
||||
"github.com/lbryio/lbcutil"
|
||||
"github.com/lbryio/lbry.go/v3/extras/stop"
|
||||
)
|
||||
|
||||
// Source: test_variety_of_transactions_and_longish_history (lbry-sdk/tests/integration/transactions)
|
||||
const regTestDBPath = "../testdata/test_variety_of_transactions/lbry-rocksdb"
|
||||
const regTestHeight = 502
|
||||
|
||||
var regTestAddrs = [30]string{
|
||||
"mtgiQkd35xpx3TaZ4RBNirf3uSMQ8tXQ7z",
|
||||
"mqMjBtzGTtRty7Y54RqeNLk9QE8rYUfpm3",
|
||||
"n2q8ASDZmib4adu2eU4dPvVvjYeU97pks4",
|
||||
"mzxYWTJogAtduNaeyH9pSSmBSPkJj33HDJ",
|
||||
"mweCKeZkeUUi8RQdHry3Mziphb87vCwiiW",
|
||||
"mp7ZuiZgBNJHFX6DVmeZrCj8SuzVQNDLwb",
|
||||
"n2zZoBocGCcxe6jFo1anbbAsUFMPXdYfnY",
|
||||
"msps28KwRJF77DxhzqD98prdwCrZwdUxJc",
|
||||
"mjvkjuss63pq2mpsRn4Q5tsNKVMLG9qUt7",
|
||||
"miF9cJn8HiX6vsorRDXtZEgcW7BeWowqkX",
|
||||
"mx87wRYFchYaLjXyNaboMuEMRLRboFSPDD",
|
||||
"mhvb94idtQvTSCQk9EB16wLLkSrbWizPRG",
|
||||
"mx3Fu8FDM4nKR9VYtHWPtSGKVt1D588Ay1",
|
||||
"mhqvhX7kLNQ2bUNWZxMhE1z6QEJKrqdV8T",
|
||||
"mgekw8L4xEezFtkYdSarL4sk5Sc8n9UtzG",
|
||||
"myhFrTz99ZHwbGo7qV4D7fJKfji7YJ3vZ8",
|
||||
"mnf8UCVoo6DBq6Tg4QpnFFdV1mFVHi43TF",
|
||||
"mn7hKyh6EA8oLAPkvTd9vPEgzLRejLxkj2",
|
||||
"msfarwFff7LX6DkXk295x3YMnJtR5Yw8uy",
|
||||
"mn8sUv6ryiLn4kzssBTqNaB1oL6qcKDzJ4",
|
||||
"mhwgeQFyi1z1RxNR1CphE8PcwG2xBWcxDp",
|
||||
"n2jKpDXhVaQHiKqhdQYwwykhoYtKtbh8P1",
|
||||
"mhnt4btqpAuiNwjAfFxPEaA4ekCE8faRYN",
|
||||
"mmTFCt6Du1VsdxSKc7f21vYsT75KnRy7NM",
|
||||
"mm1nx1xSmgRponM5tmdq15KREa7f6M36La",
|
||||
"mxMXmMKUqoj19hxEA5r3hZJgirT6nCQh14",
|
||||
"mx2L4iqNGzpuNNsDmjvCpcomefDWLAjdv1",
|
||||
"mohJcUzQdCYL7nEySKNQC8PUzowNS5gGvo",
|
||||
"mjv1vErZiDXsh9TvBDGCBpzobZx7aVYuy7",
|
||||
"mwDPTZzHsM6p1DfDnBeojDLRCDceTcejkT",
|
||||
}
|
||||
|
||||
// const dbPath := "/Users/swdev1/hub/scribe_db.599529/lbry-rocksdb"
|
||||
// const dbPath := "/mnt/d/data/snapshot_1072108/lbry-rocksdb"
|
||||
|
||||
func TestServerGetHeight(t *testing.T) {
|
||||
secondaryPath := "asdf"
|
||||
grp := stop.NewDebug()
|
||||
db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
s := &BlockchainBlockService{
|
||||
DB: db,
|
||||
Chain: &chaincfg.RegressionNetParams,
|
||||
}
|
||||
|
||||
req := BlockGetServerHeightReq{}
|
||||
var resp *BlockGetServerHeightResp
|
||||
err = s.Get_server_height(&req, &resp)
|
||||
if err != nil {
|
||||
t.Errorf("handler err: %v", err)
|
||||
}
|
||||
marshalled, err := json.MarshalIndent(resp, "", " ")
|
||||
if err != nil {
|
||||
t.Errorf("unmarshal err: %v", err)
|
||||
}
|
||||
t.Logf("resp: %v", string(marshalled))
|
||||
if string(marshalled) != strconv.FormatInt(regTestHeight, 10) {
|
||||
t.Errorf("bad height: %v", string(marshalled))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetChunk(t *testing.T) {
|
||||
secondaryPath := "asdf"
|
||||
grp := stop.NewDebug()
|
||||
db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
s := &BlockchainBlockService{
|
||||
DB: db,
|
||||
Chain: &chaincfg.RegressionNetParams,
|
||||
}
|
||||
|
||||
for index := 0; index < 10; index++ {
|
||||
req := BlockGetChunkReq(index)
|
||||
var resp *BlockGetChunkResp
|
||||
err := s.Get_chunk(&req, &resp)
|
||||
if err != nil {
|
||||
t.Errorf("index: %v handler err: %v", index, err)
|
||||
}
|
||||
marshalled, err := json.MarshalIndent(resp, "", " ")
|
||||
if err != nil {
|
||||
t.Errorf("index: %v unmarshal err: %v", index, err)
|
||||
}
|
||||
t.Logf("index: %v resp: %v", index, string(marshalled))
|
||||
switch index {
|
||||
case 0, 1, 2, 3, 4:
|
||||
if len(*resp) != (CHUNK_SIZE * HEADER_SIZE * 2) {
|
||||
t.Errorf("index: %v bad length: %v", index, len(*resp))
|
||||
}
|
||||
case 5:
|
||||
if len(*resp) != 23*112*2 {
|
||||
t.Errorf("index: %v bad length: %v", index, len(*resp))
|
||||
}
|
||||
default:
|
||||
if len(*resp) != 0 {
|
||||
t.Errorf("index: %v bad length: %v", index, len(*resp))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHeader(t *testing.T) {
|
||||
secondaryPath := "asdf"
|
||||
grp := stop.NewDebug()
|
||||
db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
s := &BlockchainBlockService{
|
||||
DB: db,
|
||||
Chain: &chaincfg.RegressionNetParams,
|
||||
}
|
||||
|
||||
for height := 0; height < 700; height += 100 {
|
||||
req := BlockGetHeaderReq(height)
|
||||
var resp *BlockGetHeaderResp
|
||||
err := s.Get_header(&req, &resp)
|
||||
if err != nil && height <= 500 {
|
||||
t.Errorf("height: %v handler err: %v", height, err)
|
||||
}
|
||||
marshalled, err := json.MarshalIndent(resp, "", " ")
|
||||
if err != nil {
|
||||
t.Errorf("height: %v unmarshal err: %v", height, err)
|
||||
}
|
||||
t.Logf("height: %v resp: %v", height, string(marshalled))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaders(t *testing.T) {
|
||||
secondaryPath := "asdf"
|
||||
grp := stop.NewDebug()
|
||||
db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
s := &BlockchainBlockService{
|
||||
DB: db,
|
||||
Chain: &chaincfg.RegressionNetParams,
|
||||
}
|
||||
|
||||
for height := uint32(0); height < 700; height += 100 {
|
||||
req := BlockHeadersReq{
|
||||
StartHeight: height,
|
||||
Count: 1,
|
||||
CpHeight: 0,
|
||||
B64: false,
|
||||
}
|
||||
var resp *BlockHeadersResp
|
||||
err := s.Headers(&req, &resp)
|
||||
if err != nil {
|
||||
t.Errorf("Headers: %v", err)
|
||||
}
|
||||
marshalled, err := json.MarshalIndent(resp, "", " ")
|
||||
if err != nil {
|
||||
t.Errorf("height: %v unmarshal err: %v", height, err)
|
||||
}
|
||||
t.Logf("height: %v resp: %v", height, string(marshalled))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadersSubscribe(t *testing.T) {
|
||||
args := MakeDefaultTestArgs()
|
||||
grp := stop.NewDebug()
|
||||
secondaryPath := "asdf"
|
||||
db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
sm := newSessionManager(nil, db, args, grp, &chaincfg.RegressionNetParams, nil)
|
||||
sm.start()
|
||||
defer sm.stop()
|
||||
|
||||
client1, server1 := net.Pipe()
|
||||
sess1 := sm.addSession(server1)
|
||||
client2, server2 := net.Pipe()
|
||||
sess2 := sm.addSession(server2)
|
||||
|
||||
// Set up logic to read a notification.
|
||||
var received sync.WaitGroup
|
||||
recv := func(client net.Conn) {
|
||||
defer received.Done()
|
||||
buf := make([]byte, 1024)
|
||||
len, err := client.Read(buf)
|
||||
if err != nil {
|
||||
t.Errorf("read err: %v", err)
|
||||
}
|
||||
t.Logf("len: %v notification: %v", len, string(buf))
|
||||
}
|
||||
received.Add(2)
|
||||
go recv(client1)
|
||||
go recv(client2)
|
||||
|
||||
s1 := &BlockchainHeadersService{
|
||||
DB: db,
|
||||
Chain: &chaincfg.RegressionNetParams,
|
||||
sessionMgr: sm,
|
||||
session: sess1,
|
||||
}
|
||||
s2 := &BlockchainHeadersService{
|
||||
DB: db,
|
||||
Chain: &chaincfg.RegressionNetParams,
|
||||
sessionMgr: sm,
|
||||
session: sess2,
|
||||
}
|
||||
|
||||
// Subscribe with Raw: false.
|
||||
req1 := HeadersSubscribeReq{Raw: false}
|
||||
var r any
|
||||
err = s1.Subscribe(&req1, &r)
|
||||
if err != nil {
|
||||
t.Errorf("handler err: %v", err)
|
||||
}
|
||||
resp1 := r.(*HeadersSubscribeResp)
|
||||
marshalled1, err := json.MarshalIndent(resp1, "", " ")
|
||||
if err != nil {
|
||||
t.Errorf("unmarshal err: %v", err)
|
||||
}
|
||||
// Subscribe with Raw: true.
|
||||
t.Logf("resp: %v", string(marshalled1))
|
||||
req2 := HeadersSubscribeReq{Raw: true}
|
||||
err = s2.Subscribe(&req2, &r)
|
||||
if err != nil {
|
||||
t.Errorf("handler err: %v", err)
|
||||
}
|
||||
resp2 := r.(*HeadersSubscribeRawResp)
|
||||
marshalled2, err := json.MarshalIndent(resp2, "", " ")
|
||||
if err != nil {
|
||||
t.Errorf("unmarshal err: %v", err)
|
||||
}
|
||||
t.Logf("resp: %v", string(marshalled2))
|
||||
|
||||
// Now send a notification.
|
||||
header500, err := hex.DecodeString("00000020e9537f98ae80a3aa0936dd424439b2b9305e5e9d9d5c7aa571b4422c447741e739b3109304ed4f0330d6854271db17da221559a46b68db4ceecfebd9f0c75dbe0100000000000000000000000000000000000000000000000000000000000000b3e02063ffff7f2001000000")
|
||||
if err != nil {
|
||||
t.Errorf("decode err: %v", err)
|
||||
}
|
||||
note1 := headerNotification{
|
||||
HeightHash: internal.HeightHash{Height: 500, BlockHeader: header500},
|
||||
blockHeaderElectrum: nil,
|
||||
blockHeaderStr: "",
|
||||
}
|
||||
t.Logf("sending notification")
|
||||
sm.doNotify(note1)
|
||||
|
||||
t.Logf("waiting to receive notification(s)...")
|
||||
received.Wait()
|
||||
}
|
||||
|
||||
func TestGetBalance(t *testing.T) {
|
||||
secondaryPath := "asdf"
|
||||
grp := stop.NewDebug()
|
||||
db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
s := &BlockchainAddressService{
|
||||
DB: db,
|
||||
Chain: &chaincfg.RegressionNetParams,
|
||||
}
|
||||
|
||||
for _, addr := range regTestAddrs {
|
||||
req := AddressGetBalanceReq{addr}
|
||||
var resp *AddressGetBalanceResp
|
||||
err := s.Get_balance(&req, &resp)
|
||||
if err != nil {
|
||||
t.Errorf("address: %v handler err: %v", addr, err)
|
||||
}
|
||||
marshalled, err := json.MarshalIndent(resp, "", " ")
|
||||
if err != nil {
|
||||
t.Errorf("address: %v unmarshal err: %v", addr, err)
|
||||
}
|
||||
t.Logf("address: %v resp: %v", addr, string(marshalled))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHistory(t *testing.T) {
|
||||
secondaryPath := "asdf"
|
||||
grp := stop.NewDebug()
|
||||
db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
s := &BlockchainAddressService{
|
||||
DB: db,
|
||||
Chain: &chaincfg.RegressionNetParams,
|
||||
}
|
||||
|
||||
for _, addr := range regTestAddrs {
|
||||
req := AddressGetHistoryReq{addr}
|
||||
var resp *AddressGetHistoryResp
|
||||
err := s.Get_history(&req, &resp)
|
||||
if err != nil {
|
||||
t.Errorf("address: %v handler err: %v", addr, err)
|
||||
}
|
||||
marshalled, err := json.MarshalIndent(resp, "", " ")
|
||||
if err != nil {
|
||||
t.Errorf("address: %v unmarshal err: %v", addr, err)
|
||||
}
|
||||
t.Logf("address: %v resp: %v", addr, string(marshalled))
|
||||
}
|
||||
}
|
||||
|
||||
func TestListUnspent(t *testing.T) {
|
||||
secondaryPath := "asdf"
|
||||
grp := stop.NewDebug()
|
||||
db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
s := &BlockchainAddressService{
|
||||
DB: db,
|
||||
Chain: &chaincfg.RegressionNetParams,
|
||||
}
|
||||
|
||||
for _, addr := range regTestAddrs {
|
||||
req := AddressListUnspentReq{addr}
|
||||
var resp *AddressListUnspentResp
|
||||
err := s.Listunspent(&req, &resp)
|
||||
if err != nil {
|
||||
t.Errorf("address: %v handler err: %v", addr, err)
|
||||
}
|
||||
marshalled, err := json.MarshalIndent(resp, "", " ")
|
||||
if err != nil {
|
||||
t.Errorf("address: %v unmarshal err: %v", addr, err)
|
||||
}
|
||||
t.Logf("address: %v resp: %v", addr, string(marshalled))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddressSubscribe(t *testing.T) {
|
||||
args := MakeDefaultTestArgs()
|
||||
grp := stop.NewDebug()
|
||||
secondaryPath := "asdf"
|
||||
db, err := db.GetProdDB(regTestDBPath, secondaryPath, grp)
|
||||
defer db.Shutdown()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
sm := newSessionManager(nil, db, args, grp, &chaincfg.RegressionNetParams, nil)
|
||||
sm.start()
|
||||
defer sm.stop()
|
||||
|
||||
client1, server1 := net.Pipe()
|
||||
sess1 := sm.addSession(server1)
|
||||
client2, server2 := net.Pipe()
|
||||
sess2 := sm.addSession(server2)
|
||||
|
||||
// Set up logic to read a notification.
|
||||
var received sync.WaitGroup
|
||||
recv := func(client net.Conn) {
|
||||
buf := make([]byte, 1024)
|
||||
len, err := client.Read(buf)
|
||||
if err != nil {
|
||||
t.Errorf("read err: %v", err)
|
||||
}
|
||||
t.Logf("len: %v notification: %v", len, string(buf))
|
||||
received.Done()
|
||||
}
|
||||
received.Add(2)
|
||||
go recv(client1)
|
||||
go recv(client2)
|
||||
|
||||
s1 := &BlockchainAddressService{
|
||||
DB: db,
|
||||
Chain: &chaincfg.RegressionNetParams,
|
||||
sessionMgr: sm,
|
||||
session: sess1,
|
||||
}
|
||||
s2 := &BlockchainAddressService{
|
||||
DB: db,
|
||||
Chain: &chaincfg.RegressionNetParams,
|
||||
sessionMgr: sm,
|
||||
session: sess2,
|
||||
}
|
||||
|
||||
addr1, addr2 := regTestAddrs[1], regTestAddrs[2]
|
||||
// Subscribe to addr1 and addr2.
|
||||
req1 := AddressSubscribeReq{addr1, addr2}
|
||||
var resp1 *AddressSubscribeResp
|
||||
err = s1.Subscribe(&req1, &resp1)
|
||||
if err != nil {
|
||||
t.Errorf("handler err: %v", err)
|
||||
}
|
||||
marshalled1, err := json.MarshalIndent(resp1, "", " ")
|
||||
if err != nil {
|
||||
t.Errorf("unmarshal err: %v", err)
|
||||
}
|
||||
// Subscribe to addr2 only.
|
||||
t.Logf("resp: %v", string(marshalled1))
|
||||
req2 := AddressSubscribeReq{addr2}
|
||||
var resp2 *AddressSubscribeResp
|
||||
err = s2.Subscribe(&req2, &resp2)
|
||||
if err != nil {
|
||||
t.Errorf("handler err: %v", err)
|
||||
}
|
||||
marshalled2, err := json.MarshalIndent(resp2, "", " ")
|
||||
if err != nil {
|
||||
t.Errorf("unmarshal err: %v", err)
|
||||
}
|
||||
t.Logf("resp: %v", string(marshalled2))
|
||||
|
||||
// Now send a notification for addr2.
|
||||
address, _ := lbcutil.DecodeAddress(addr2, sm.chain)
|
||||
script, _ := txscript.PayToAddrScript(address)
|
||||
note := hashXNotification{}
|
||||
copy(note.hashX[:], hashXScript(script, sm.chain))
|
||||
status, err := hex.DecodeString((*resp1)[1])
|
||||
if err != nil {
|
||||
t.Errorf("decode err: %v", err)
|
||||
}
|
||||
note.status = append(note.status, []byte(status)...)
|
||||
t.Logf("sending notification")
|
||||
sm.doNotify(note)
|
||||
|
||||
t.Logf("waiting to receive notification(s)...")
|
||||
received.Wait()
|
||||
}
|
108
server/jsonrpc_claimtrie.go
Normal file
108
server/jsonrpc_claimtrie.go
Normal file
|
@ -0,0 +1,108 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/lbryio/herald.go/db"
|
||||
"github.com/lbryio/herald.go/internal/metrics"
|
||||
pb "github.com/lbryio/herald.go/protobuf/go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type ClaimtrieService struct {
|
||||
DB *db.ReadOnlyDBColumnFamily
|
||||
Server *Server
|
||||
}
|
||||
|
||||
type ResolveData struct {
|
||||
Data []string `json:"data"`
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
type GetClaimByIDData struct {
|
||||
ClaimID string `json:"claim_id"`
|
||||
}
|
||||
|
||||
// Resolve is the json rpc endpoint for 'blockchain.claimtrie.resolve'.
|
||||
func (t *ClaimtrieService) Resolve(args *ResolveData, result **pb.Outputs) error {
|
||||
log.Println("Resolve")
|
||||
res, err := InternalResolve(args.Data, t.DB)
|
||||
*result = res
|
||||
return err
|
||||
}
|
||||
|
||||
// Search is the json rpc endpoint for 'blockchain.claimtrie.search'.
|
||||
func (t *ClaimtrieService) Search(args *pb.SearchRequest, result **pb.Outputs) error {
|
||||
log.Println("Search")
|
||||
if t.Server == nil {
|
||||
log.Warnln("Server is nil in Search")
|
||||
*result = nil
|
||||
return nil
|
||||
}
|
||||
ctx := context.Background()
|
||||
res, err := t.Server.Search(ctx, args)
|
||||
*result = res
|
||||
return err
|
||||
}
|
||||
|
||||
// GetClaimByID is the json rpc endpoint for 'blockchain.claimtrie.getclaimbyid'.
|
||||
func (t *ClaimtrieService) GetClaimByID(args *GetClaimByIDData, result **pb.Outputs) error {
|
||||
log.Println("GetClaimByID")
|
||||
if len(args.ClaimID) != 40 {
|
||||
*result = nil
|
||||
return fmt.Errorf("len(claim_id) != 40")
|
||||
}
|
||||
|
||||
rows, extras, err := t.DB.GetClaimByID(args.ClaimID)
|
||||
if err != nil {
|
||||
*result = nil
|
||||
return err
|
||||
}
|
||||
|
||||
metrics.RequestsCount.With(prometheus.Labels{"method": "blockchain.claimtrie.getclaimbyid"}).Inc()
|
||||
|
||||
// FIXME: this has txos and extras and so does GetClaimById?
|
||||
allTxos := make([]*pb.Output, 0)
|
||||
allExtraTxos := make([]*pb.Output, 0)
|
||||
|
||||
for _, row := range rows {
|
||||
txos, extraTxos, err := row.ToOutputs()
|
||||
if err != nil {
|
||||
*result = nil
|
||||
return err
|
||||
}
|
||||
// TODO: there may be a more efficient way to do this.
|
||||
allTxos = append(allTxos, txos...)
|
||||
allExtraTxos = append(allExtraTxos, extraTxos...)
|
||||
}
|
||||
|
||||
for _, extra := range extras {
|
||||
txos, extraTxos, err := extra.ToOutputs()
|
||||
if err != nil {
|
||||
*result = nil
|
||||
return err
|
||||
}
|
||||
// TODO: there may be a more efficient way to do this.
|
||||
allTxos = append(allTxos, txos...)
|
||||
allExtraTxos = append(allExtraTxos, extraTxos...)
|
||||
}
|
||||
|
||||
res := &pb.Outputs{
|
||||
Txos: allTxos,
|
||||
ExtraTxos: allExtraTxos,
|
||||
Total: uint32(len(allTxos) + len(allExtraTxos)),
|
||||
Offset: 0, //TODO
|
||||
Blocked: nil, //TODO
|
||||
BlockedTotal: 0, //TODO
|
||||
}
|
||||
|
||||
log.Warn(res)
|
||||
|
||||
*result = res
|
||||
return nil
|
||||
}
|
39
server/jsonrpc_federation.go
Normal file
39
server/jsonrpc_federation.go
Normal file
|
@ -0,0 +1,39 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type PeersService struct {
|
||||
Server *Server
|
||||
// needed for subscribe/unsubscribe
|
||||
sessionMgr *sessionManager
|
||||
session *session
|
||||
}
|
||||
|
||||
type PeersSubscribeReq struct {
|
||||
Ip string `json:"ip"`
|
||||
Host string `json:"host"`
|
||||
Details []string `json:"details"`
|
||||
}
|
||||
|
||||
type PeersSubscribeResp string
|
||||
|
||||
// Features is the json rpc endpoint for 'server.peers.subcribe'.
|
||||
func (t *PeersService) Subscribe(req *PeersSubscribeReq, res **PeersSubscribeResp) error {
|
||||
log.Println("PeersSubscribe")
|
||||
// var port = "50001"
|
||||
|
||||
// FIXME: Get the actual port from the request details
|
||||
|
||||
if t.sessionMgr == nil || t.session == nil {
|
||||
*res = nil
|
||||
return errors.New("no session, rpc not supported")
|
||||
}
|
||||
t.sessionMgr.peersSubscribe(t.session, true /*subscribe*/)
|
||||
|
||||
*res = nil
|
||||
return nil
|
||||
}
|
88
server/jsonrpc_server.go
Normal file
88
server/jsonrpc_server.go
Normal file
|
@ -0,0 +1,88 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type ServerService struct {
|
||||
Args *Args
|
||||
}
|
||||
|
||||
type ServerFeatureService struct {
|
||||
Args *Args
|
||||
}
|
||||
|
||||
type ServerFeaturesReq struct{}
|
||||
|
||||
type ServerFeaturesRes struct {
|
||||
Hosts map[string]string `json:"hosts"`
|
||||
Pruning string `json:"pruning"`
|
||||
ServerVersion string `json:"server_version"`
|
||||
ProtocolMin string `json:"protocol_min"`
|
||||
ProtocolMax string `json:"protocol_max"`
|
||||
GenesisHash string `json:"genesis_hash"`
|
||||
Description string `json:"description"`
|
||||
PaymentAddress string `json:"payment_address"`
|
||||
DonationAddress string `json:"donation_address"`
|
||||
DailyFee string `json:"daily_fee"`
|
||||
HashFunction string `json:"hash_function"`
|
||||
TrendingAlgorithm string `json:"trending_algorithm"`
|
||||
}
|
||||
|
||||
// Features is the json rpc endpoint for 'server.features'.
|
||||
func (t *ServerService) Features(req *ServerFeaturesReq, res **ServerFeaturesRes) error {
|
||||
log.Println("Features")
|
||||
|
||||
features := &ServerFeaturesRes{
|
||||
Hosts: map[string]string{},
|
||||
Pruning: "",
|
||||
ServerVersion: HUB_PROTOCOL_VERSION,
|
||||
ProtocolMin: PROTOCOL_MIN,
|
||||
ProtocolMax: PROTOCOL_MAX,
|
||||
GenesisHash: t.Args.GenesisHash,
|
||||
Description: t.Args.ServerDescription,
|
||||
PaymentAddress: t.Args.PaymentAddress,
|
||||
DonationAddress: t.Args.DonationAddress,
|
||||
DailyFee: t.Args.DailyFee,
|
||||
HashFunction: "sha256",
|
||||
TrendingAlgorithm: "fast_ar",
|
||||
}
|
||||
*res = features
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ServerBannerService struct {
|
||||
Args *Args
|
||||
}
|
||||
|
||||
type ServerBannerReq struct{}
|
||||
|
||||
type ServerBannerRes string
|
||||
|
||||
// Banner is the json rpc endpoint for 'server.banner'.
|
||||
func (t *ServerService) Banner(req *ServerBannerReq, res **ServerBannerRes) error {
|
||||
log.Println("Banner")
|
||||
|
||||
*res = (*ServerBannerRes)(t.Args.Banner)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ServerVersionService struct {
|
||||
Args *Args
|
||||
}
|
||||
|
||||
type ServerVersionReq [2]string // [client_name, client_version]
|
||||
|
||||
type ServerVersionRes [2]string // [version, protocol_version]
|
||||
|
||||
// Version is the json rpc endpoint for 'server.version'.
|
||||
func (t *ServerService) Version(req *ServerVersionReq, res **ServerVersionRes) error {
|
||||
// FIXME: We may need to do the computation of a negotiated version here.
|
||||
// Also return an error if client is not supported?
|
||||
result := [2]string{t.Args.ServerVersion, t.Args.ServerVersion}
|
||||
*res = (*ServerVersionRes)(&result)
|
||||
log.Printf("Version(%v) -> %v", *req, **res)
|
||||
return nil
|
||||
}
|
154
server/jsonrpc_service.go
Normal file
154
server/jsonrpc_service.go
Normal file
|
@ -0,0 +1,154 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
gorilla_mux "github.com/gorilla/mux"
|
||||
gorilla_rpc "github.com/gorilla/rpc"
|
||||
gorilla_json "github.com/gorilla/rpc/json"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/netutil"
|
||||
)
|
||||
|
||||
type gorillaRpcCodec struct {
|
||||
gorilla_rpc.Codec
|
||||
}
|
||||
|
||||
func (c *gorillaRpcCodec) NewRequest(r *http.Request) gorilla_rpc.CodecRequest {
|
||||
return &gorillaRpcCodecRequest{c.Codec.NewRequest(r)}
|
||||
}
|
||||
|
||||
// gorillaRpcCodecRequest provides ability to rewrite the incoming
|
||||
// request "method" field. For example:
|
||||
// blockchain.block.get_header -> blockchain_block.Get_header
|
||||
// blockchain.address.listunspent -> blockchain_address.Listunspent
|
||||
// This makes the "method" string compatible with Gorilla/RPC
|
||||
// requirements.
|
||||
type gorillaRpcCodecRequest struct {
|
||||
gorilla_rpc.CodecRequest
|
||||
}
|
||||
|
||||
func (cr *gorillaRpcCodecRequest) Method() (string, error) {
|
||||
rawMethod, err := cr.CodecRequest.Method()
|
||||
if err != nil {
|
||||
return rawMethod, err
|
||||
}
|
||||
parts := strings.Split(rawMethod, ".")
|
||||
if len(parts) < 2 {
|
||||
return rawMethod, fmt.Errorf("blockchain rpc: service/method ill-formed: %q", rawMethod)
|
||||
}
|
||||
service := strings.Join(parts[0:len(parts)-1], "_")
|
||||
method := parts[len(parts)-1]
|
||||
if len(method) < 1 {
|
||||
return rawMethod, fmt.Errorf("blockchain rpc: method ill-formed: %q", method)
|
||||
}
|
||||
method = strings.ToUpper(string(method[0])) + string(method[1:])
|
||||
return service + "." + method, err
|
||||
}
|
||||
|
||||
// StartJsonRPC starts the json rpc server and registers the endpoints.
|
||||
func (s *Server) StartJsonRPC() error {
|
||||
// Set up the pure JSONRPC server with persistent connections/sessions.
|
||||
if s.Args.JSONRPCPort != 0 {
|
||||
port := ":" + strconv.Itoa(s.Args.JSONRPCPort)
|
||||
laddr, err := net.ResolveTCPAddr("tcp4", port)
|
||||
if err != nil {
|
||||
log.Errorf("ResoveIPAddr: %v\n", err)
|
||||
goto fail1
|
||||
}
|
||||
listener, err := net.ListenTCP("tcp4", laddr)
|
||||
if err != nil {
|
||||
log.Errorf("ListenTCP: %v\n", err)
|
||||
goto fail1
|
||||
}
|
||||
log.Infof("JSONRPC server listening on %s", listener.Addr().String())
|
||||
s.sessionManager.start()
|
||||
acceptConnections := func(listener net.Listener) {
|
||||
defer s.sessionManager.stop()
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
log.Errorf("Accept: %v\n", err)
|
||||
break
|
||||
}
|
||||
log.Infof("Accepted: %v", conn.RemoteAddr())
|
||||
s.sessionManager.addSession(conn)
|
||||
|
||||
}
|
||||
}
|
||||
go acceptConnections(netutil.LimitListener(listener, s.sessionManager.sessionsMax))
|
||||
}
|
||||
|
||||
fail1:
|
||||
// Set up the JSONRPC over HTTP server.
|
||||
if s.Args.JSONRPCHTTPPort != 0 {
|
||||
s1 := gorilla_rpc.NewServer() // Create a new RPC server
|
||||
// Register the type of data requested as JSON, with custom codec.
|
||||
s1.RegisterCodec(&gorillaRpcCodec{gorilla_json.NewCodec()}, "application/json")
|
||||
|
||||
// Register "blockchain.claimtrie.*"" handlers.
|
||||
claimtrieSvc := &ClaimtrieService{s.DB, s}
|
||||
err := s1.RegisterTCPService(claimtrieSvc, "blockchain_claimtrie")
|
||||
if err != nil {
|
||||
log.Errorf("RegisterTCPService: %v\n", err)
|
||||
goto fail2
|
||||
}
|
||||
|
||||
// Register "blockchain.{block,address,scripthash,transaction}.*" handlers.
|
||||
blockchainSvc := &BlockchainBlockService{s.DB, s.Chain}
|
||||
err = s1.RegisterTCPService(blockchainSvc, "blockchain_block")
|
||||
if err != nil {
|
||||
log.Errorf("RegisterTCPService: %v\n", err)
|
||||
goto fail2
|
||||
}
|
||||
err = s1.RegisterTCPService(&BlockchainHeadersService{s.DB, s.Chain, s.sessionManager, nil}, "blockchain_headers")
|
||||
if err != nil {
|
||||
log.Errorf("RegisterTCPService: %v\n", err)
|
||||
goto fail2
|
||||
}
|
||||
err = s1.RegisterTCPService(&BlockchainAddressService{s.DB, s.Chain, s.sessionManager, nil}, "blockchain_address")
|
||||
if err != nil {
|
||||
log.Errorf("RegisterTCPService: %v\n", err)
|
||||
goto fail2
|
||||
}
|
||||
err = s1.RegisterTCPService(&BlockchainScripthashService{s.DB, s.Chain, s.sessionManager, nil}, "blockchain_scripthash")
|
||||
if err != nil {
|
||||
log.Errorf("RegisterTCPService: %v\n", err)
|
||||
goto fail2
|
||||
}
|
||||
err = s1.RegisterTCPService(&BlockchainTransactionService{s.DB, s.Chain, s.sessionManager}, "blockchain_transaction")
|
||||
if err != nil {
|
||||
log.Errorf("RegisterTCPService: %v\n", err)
|
||||
goto fail2
|
||||
}
|
||||
|
||||
// Register "server.{features,banner,version}" handlers.
|
||||
serverSvc := &ServerService{s.Args}
|
||||
err = s1.RegisterTCPService(serverSvc, "server")
|
||||
if err != nil {
|
||||
log.Errorf("RegisterTCPService: %v\n", err)
|
||||
goto fail2
|
||||
}
|
||||
|
||||
// Register "server.peers" handlers.
|
||||
peersSvc := &PeersService{Server: s}
|
||||
err = s1.RegisterTCPService(peersSvc, "server_peers")
|
||||
if err != nil {
|
||||
log.Errorf("RegisterTCPService: %v\n", err)
|
||||
goto fail2
|
||||
}
|
||||
|
||||
r := gorilla_mux.NewRouter()
|
||||
r.Handle("/rpc", s1)
|
||||
port := ":" + strconv.FormatUint(uint64(s.Args.JSONRPCHTTPPort), 10)
|
||||
log.Infof("HTTP JSONRPC server listening on %s", port)
|
||||
log.Fatal(http.ListenAndServe(port, r))
|
||||
}
|
||||
|
||||
fail2:
|
||||
return nil
|
||||
}
|
126
server/notifier.go
Normal file
126
server/notifier.go
Normal file
|
@ -0,0 +1,126 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/lbryio/herald.go/internal"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const NotifierResponseLength = 40
|
||||
|
||||
// AddHeightSub adds a new height subscriber
|
||||
func (s *Server) AddHeightSub(addr net.Addr, conn net.Conn) {
|
||||
s.HeightSubsMut.Lock()
|
||||
defer s.HeightSubsMut.Unlock()
|
||||
s.HeightSubs[addr] = conn
|
||||
}
|
||||
|
||||
// DoNotify sends a notification to all height subscribers
|
||||
func (s *Server) DoNotify(heightHash *internal.HeightHash) error {
|
||||
buff := make([]byte, NotifierResponseLength)
|
||||
toDelete := make([]net.Addr, 0)
|
||||
|
||||
s.HeightSubsMut.RLock()
|
||||
for addr, conn := range s.HeightSubs {
|
||||
// struct.pack(b'>Q32s', height, block_hash)
|
||||
binary.BigEndian.PutUint64(buff, heightHash.Height)
|
||||
copy(buff[8:], heightHash.BlockHash[:32])
|
||||
logrus.Tracef("notifying %s", addr)
|
||||
n, err := conn.Write(buff)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
toDelete = append(toDelete, addr)
|
||||
}
|
||||
if n != NotifierResponseLength {
|
||||
logrus.Warn("not all bytes written")
|
||||
}
|
||||
}
|
||||
s.HeightSubsMut.RUnlock()
|
||||
|
||||
if len(toDelete) > 0 {
|
||||
s.HeightSubsMut.Lock()
|
||||
for _, v := range toDelete {
|
||||
delete(s.HeightSubs, v)
|
||||
}
|
||||
s.HeightSubsMut.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunNotifier Runs the notfying action forever
|
||||
func (s *Server) RunNotifier() error {
|
||||
for notification := range s.NotifierChan {
|
||||
switch note := notification.(type) {
|
||||
case internal.HeightHash:
|
||||
heightHash := note
|
||||
s.DoNotify(&heightHash)
|
||||
// Do we need this?
|
||||
// case peerNotification:
|
||||
// peer, _ := notification.(peerNotification)
|
||||
// s.notifyPeerSubs(&Peer{Address: peer.address, Port: peer.port})
|
||||
default:
|
||||
logrus.Warn("unknown notification type")
|
||||
}
|
||||
s.sessionManager.doNotify(notification)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifierServer implements the TCP protocol for height/blockheader notifications
|
||||
func (s *Server) NotifierServer() error {
|
||||
s.Grp.Add(1)
|
||||
address := ":" + fmt.Sprintf("%d", s.Args.NotifierPort)
|
||||
addr, err := net.ResolveTCPAddr("tcp", address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
listen, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer listen.Close()
|
||||
rdyCh := make(chan bool)
|
||||
|
||||
for {
|
||||
var conn net.Conn
|
||||
var err error
|
||||
|
||||
logrus.Info("Waiting for connection")
|
||||
|
||||
go func() {
|
||||
conn, err = listen.Accept()
|
||||
rdyCh <- true
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-s.Grp.Ch():
|
||||
s.Grp.Done()
|
||||
return nil
|
||||
case <-rdyCh:
|
||||
logrus.Info("Connection accepted")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
continue
|
||||
}
|
||||
|
||||
addr := conn.RemoteAddr()
|
||||
|
||||
logrus.Println(addr)
|
||||
|
||||
// _, err = conn.Write([]byte(addr.String()))
|
||||
// if err != nil {
|
||||
// logrus.Warn(err)
|
||||
// continue
|
||||
// }
|
||||
|
||||
go s.AddHeightSub(addr, conn)
|
||||
}
|
||||
}
|
110
server/notifier_test.go
Normal file
110
server/notifier_test.go
Normal file
|
@ -0,0 +1,110 @@
|
|||
package server_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/herald.go/internal"
|
||||
"github.com/lbryio/herald.go/server"
|
||||
"github.com/lbryio/lbry.go/v3/extras/stop"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const defaultBufferSize = 1024
|
||||
|
||||
func subReady(s *server.Server) error {
|
||||
sleepTime := time.Millisecond * 100
|
||||
for {
|
||||
if sleepTime > time.Second {
|
||||
return fmt.Errorf("timeout")
|
||||
}
|
||||
s.HeightSubsMut.RLock()
|
||||
if len(s.HeightSubs) > 0 {
|
||||
s.HeightSubsMut.RUnlock()
|
||||
return nil
|
||||
}
|
||||
s.HeightSubsMut.RUnlock()
|
||||
|
||||
logrus.Warn("waiting for subscriber")
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime = sleepTime * 2
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func tcpConnReady(addr string) (net.Conn, error) {
|
||||
sleepTime := time.Millisecond * 100
|
||||
for {
|
||||
if sleepTime > time.Second {
|
||||
return nil, fmt.Errorf("timeout")
|
||||
}
|
||||
|
||||
conn, err := net.Dial("tcp", addr)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime = sleepTime * 2
|
||||
continue
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
}
|
||||
|
||||
func tcpRead(conn net.Conn) ([]byte, error) {
|
||||
buf := make([]byte, defaultBufferSize)
|
||||
n, err := conn.Read(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n != server.NotifierResponseLength {
|
||||
return nil, fmt.Errorf("not all bytes read")
|
||||
}
|
||||
|
||||
return buf[:n], nil
|
||||
}
|
||||
|
||||
func TestNotifierServer(t *testing.T) {
|
||||
args := server.MakeDefaultTestArgs()
|
||||
ctx := stop.NewDebug()
|
||||
hub := server.MakeHubServer(ctx, args)
|
||||
|
||||
go hub.NotifierServer()
|
||||
go hub.RunNotifier()
|
||||
|
||||
addr := fmt.Sprintf(":%d", args.NotifierPort)
|
||||
logrus.Info(addr)
|
||||
conn, err := tcpConnReady(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resCh := make(chan []byte)
|
||||
|
||||
go func() {
|
||||
logrus.Warn("waiting for response")
|
||||
res, err := tcpRead(conn)
|
||||
logrus.Warn("got response")
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
return
|
||||
}
|
||||
resCh <- res
|
||||
}()
|
||||
|
||||
// Hacky but needed because if the reader isn't ready
|
||||
// before the writer sends it won't get the data
|
||||
err = subReady(hub)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hash, _ := hex.DecodeString("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
|
||||
logrus.Warn("sending hash")
|
||||
hub.NotifierChan <- internal.HeightHash{Height: 1, BlockHash: hash}
|
||||
|
||||
res := <-resCh
|
||||
logrus.Info(string(res))
|
||||
}
|
760
server/search.go
760
server/search.go
File diff suppressed because it is too large
Load diff
71
server/search_test.go
Normal file
71
server/search_test.go
Normal file
|
@ -0,0 +1,71 @@
|
|||
package server_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
pb "github.com/lbryio/herald.go/protobuf/go"
|
||||
server "github.com/lbryio/herald.go/server"
|
||||
"github.com/lbryio/lbry.go/v3/extras/stop"
|
||||
"github.com/olivere/elastic/v7"
|
||||
)
|
||||
|
||||
func TestInt32ArrToInterface(t *testing.T) {
|
||||
want := []int32{0, 10, 100}
|
||||
got := server.Int32ArrToInterface(want)
|
||||
for i, x := range got {
|
||||
if x.(int32) != want[i] {
|
||||
t.Errorf("flags: got: %v, want: %v\n", x, want[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrArrToInterface(t *testing.T) {
|
||||
want := []string{"asdf", "qwer", "xczv"}
|
||||
got := server.StrArrToInterface(want)
|
||||
for i, x := range got {
|
||||
if strings.Compare(x.(string), want[i]) != 0 {
|
||||
t.Errorf("flags: got: %v, want: %v\n", x, want[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddTermsField(t *testing.T) {
|
||||
name := "qwer"
|
||||
arr := []string{"a", "b", "c"}
|
||||
var query *elastic.BoolQuery = elastic.NewBoolQuery()
|
||||
query = server.AddTermsField(query, arr, name)
|
||||
fmt.Printf("query: %v\n", query)
|
||||
}
|
||||
|
||||
func TestSearch(t *testing.T) {
|
||||
handler := http.NotFound
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
handler(w, r)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
handler = func(w http.ResponseWriter, r *http.Request) {
|
||||
resp := `{}`
|
||||
|
||||
w.Write([]byte(resp))
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
stopGroup := stop.NewDebug()
|
||||
args := server.MakeDefaultTestArgs()
|
||||
hubServer := server.MakeHubServer(stopGroup, args)
|
||||
req := &pb.SearchRequest{
|
||||
Text: "asdf",
|
||||
}
|
||||
out, err := hubServer.Search(ctx, req)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
log.Println(out)
|
||||
}
|
581
server/server.go
581
server/server.go
|
@ -1,30 +1,70 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io/ioutil"
|
||||
golog "log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
pb "github.com/lbryio/hub/protobuf/go"
|
||||
"github.com/ReneKroon/ttlcache/v2"
|
||||
"github.com/lbryio/herald.go/db"
|
||||
"github.com/lbryio/herald.go/internal/metrics"
|
||||
"github.com/lbryio/herald.go/meta"
|
||||
pb "github.com/lbryio/herald.go/protobuf/go"
|
||||
"github.com/lbryio/lbcd/chaincfg"
|
||||
lbcd "github.com/lbryio/lbcd/rpcclient"
|
||||
"github.com/lbryio/lbry.go/v3/extras/stop"
|
||||
"github.com/olivere/elastic/v7"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/reflection"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
GrpcServer *grpc.Server
|
||||
Args *Args
|
||||
MultiSpaceRe *regexp.Regexp
|
||||
WeirdCharsRe *regexp.Regexp
|
||||
EsClient *elastic.Client
|
||||
GrpcServer *grpc.Server
|
||||
Args *Args
|
||||
MultiSpaceRe *regexp.Regexp
|
||||
WeirdCharsRe *regexp.Regexp
|
||||
DB *db.ReadOnlyDBColumnFamily
|
||||
Chain *chaincfg.Params
|
||||
DaemonClient *lbcd.Client
|
||||
EsClient *elastic.Client
|
||||
QueryCache *ttlcache.Cache
|
||||
S256 *hash.Hash
|
||||
LastRefreshCheck time.Time
|
||||
RefreshDelta time.Duration
|
||||
NumESRefreshes int64
|
||||
PeerServers map[string]*Peer
|
||||
PeerServersMut sync.RWMutex
|
||||
NumPeerServers *int64
|
||||
PeerSubs map[string]*Peer
|
||||
PeerSubsMut sync.RWMutex
|
||||
NumPeerSubs *int64
|
||||
ExternalIP net.IP
|
||||
HeightSubs map[net.Addr]net.Conn
|
||||
HeightSubsMut sync.RWMutex
|
||||
NotifierChan chan interface{}
|
||||
Grp *stop.Group
|
||||
notiferListener *net.TCPListener
|
||||
sessionManager *sessionManager
|
||||
pb.UnimplementedHubServer
|
||||
}
|
||||
|
||||
type Args struct {
|
||||
Serve bool
|
||||
Host string
|
||||
Port string
|
||||
EsHost string
|
||||
EsPort string
|
||||
Dev bool
|
||||
func getVersion() string {
|
||||
return meta.Version
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -66,10 +106,147 @@ type Args struct {
|
|||
'blockchain.address.unsubscribe'
|
||||
*/
|
||||
|
||||
func MakeHubServer(args *Args) *Server {
|
||||
grpcServer := grpc.NewServer()
|
||||
// PeerSubsLoadOrStore thread safe load or store for peer subs
|
||||
func (s *Server) PeerSubsLoadOrStore(peer *Peer) (actual *Peer, loaded bool) {
|
||||
key := peer.peerKey()
|
||||
s.PeerSubsMut.RLock()
|
||||
if actual, ok := s.PeerSubs[key]; ok {
|
||||
s.PeerSubsMut.RUnlock()
|
||||
return actual, true
|
||||
} else {
|
||||
s.PeerSubsMut.RUnlock()
|
||||
s.PeerSubsMut.Lock()
|
||||
s.PeerSubs[key] = peer
|
||||
s.PeerSubsMut.Unlock()
|
||||
return peer, false
|
||||
}
|
||||
}
|
||||
|
||||
multiSpaceRe, err := regexp.Compile("\\s{2,}")
|
||||
// PeerServersLoadOrStore thread safe load or store for peer servers
|
||||
func (s *Server) PeerServersLoadOrStore(peer *Peer) (actual *Peer, loaded bool) {
|
||||
key := peer.peerKey()
|
||||
s.PeerServersMut.RLock()
|
||||
if actual, ok := s.PeerServers[key]; ok {
|
||||
s.PeerServersMut.RUnlock()
|
||||
return actual, true
|
||||
} else {
|
||||
s.PeerServersMut.RUnlock()
|
||||
s.PeerServersMut.Lock()
|
||||
s.PeerServers[key] = peer
|
||||
s.PeerServersMut.Unlock()
|
||||
return peer, false
|
||||
}
|
||||
}
|
||||
|
||||
// Run "main" function for starting the server. This blocks.
|
||||
func (s *Server) Run() {
|
||||
address := ":" + strconv.Itoa(s.Args.Port)
|
||||
l, err := net.Listen("tcp", address)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to listen: %v", err)
|
||||
}
|
||||
|
||||
pb.RegisterHubServer(s.GrpcServer, s)
|
||||
reflection.Register(s.GrpcServer)
|
||||
|
||||
log.Printf("Server.Run() #### listening on %s\n", l.Addr().String())
|
||||
log.Printf("%#v\n", s.Args)
|
||||
if err := s.GrpcServer.Serve(l); err != nil {
|
||||
log.Fatalf("failed to serve: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) Stop() {
|
||||
log.Println("Shutting down server...")
|
||||
|
||||
if s.EsClient != nil {
|
||||
log.Println("Stopping es client...")
|
||||
s.EsClient.Stop()
|
||||
}
|
||||
if s.GrpcServer != nil {
|
||||
log.Println("Stopping grpc server...")
|
||||
s.GrpcServer.GracefulStop()
|
||||
}
|
||||
log.Println("Stopping other server threads...")
|
||||
s.Grp.StopAndWait()
|
||||
if s.DB != nil {
|
||||
log.Println("Stopping database connection...")
|
||||
s.DB.Shutdown()
|
||||
}
|
||||
|
||||
log.Println("Returning from Stop...")
|
||||
}
|
||||
|
||||
func LoadDatabase(args *Args, grp *stop.Group) (*db.ReadOnlyDBColumnFamily, error) {
|
||||
tmpName, err := os.MkdirTemp("", "go-lbry-hub")
|
||||
if err != nil {
|
||||
log.Info(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Info("tmpName", tmpName)
|
||||
if err != nil {
|
||||
log.Info(err)
|
||||
}
|
||||
myDB, err := db.GetProdDB(args.DBPath, tmpName, grp)
|
||||
if err != nil {
|
||||
// Can't load the db, fail loudly
|
||||
log.Info(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if myDB.LastState != nil {
|
||||
log.Infof("DB version: %v", myDB.LastState.DBVersion)
|
||||
log.Infof("height: %v", myDB.LastState.Height)
|
||||
log.Infof("genesis: %v", myDB.LastState.Genesis.String())
|
||||
log.Infof("tip: %v", myDB.LastState.Tip.String())
|
||||
log.Infof("tx count: %v", myDB.LastState.TxCount)
|
||||
}
|
||||
|
||||
blockingChannelHashes := make([][]byte, 0, 10)
|
||||
blockingIds := make([]string, 0, 10)
|
||||
filteringChannelHashes := make([][]byte, 0, 10)
|
||||
filteringIds := make([]string, 0, 10)
|
||||
|
||||
for _, id := range args.BlockingChannelIds {
|
||||
hash, err := hex.DecodeString(id)
|
||||
if err != nil {
|
||||
log.Warn("Invalid channel id: ", id)
|
||||
continue
|
||||
}
|
||||
blockingChannelHashes = append(blockingChannelHashes, hash)
|
||||
blockingIds = append(blockingIds, id)
|
||||
}
|
||||
|
||||
for _, id := range args.FilteringChannelIds {
|
||||
hash, err := hex.DecodeString(id)
|
||||
if err != nil {
|
||||
log.Warn("Invalid channel id: ", id)
|
||||
continue
|
||||
}
|
||||
filteringChannelHashes = append(filteringChannelHashes, hash)
|
||||
filteringIds = append(filteringIds, id)
|
||||
}
|
||||
|
||||
myDB.BlockingChannelHashes = blockingChannelHashes
|
||||
myDB.FilteringChannelHashes = filteringChannelHashes
|
||||
|
||||
if len(filteringIds) > 0 {
|
||||
log.Infof("filtering claims reposted by channels: %+s", filteringIds)
|
||||
}
|
||||
if len(blockingIds) > 0 {
|
||||
log.Infof("blocking claims reposted by channels: %+s", blockingIds)
|
||||
}
|
||||
|
||||
return myDB, nil
|
||||
}
|
||||
|
||||
// MakeHubServer takes the arguments given to a hub when it's started and
|
||||
// initializes everything. It loads information about previously known peers,
|
||||
// creates needed internal data structures, and initializes goroutines.
|
||||
func MakeHubServer(grp *stop.Group, args *Args) *Server {
|
||||
grpcServer := grpc.NewServer(grpc.NumStreamWorkers(0))
|
||||
|
||||
multiSpaceRe, err := regexp.Compile(`\s{2,}`)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
@ -79,12 +256,370 @@ func MakeHubServer(args *Args) *Server {
|
|||
log.Fatal(err)
|
||||
}
|
||||
|
||||
s := &Server {
|
||||
GrpcServer: grpcServer,
|
||||
Args: args,
|
||||
MultiSpaceRe: multiSpaceRe,
|
||||
WeirdCharsRe: weirdCharsRe,
|
||||
var lbcdClient *lbcd.Client = nil
|
||||
if args.DaemonURL != nil && args.DaemonURL.Host != "" {
|
||||
var rpcCertificate []byte
|
||||
if args.DaemonCAPath != "" {
|
||||
rpcCertificate, err = ioutil.ReadFile(args.DaemonCAPath)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to read SSL certificate from path: %v", args.DaemonCAPath)
|
||||
}
|
||||
}
|
||||
log.Warnf("connecting to lbcd daemon at %v...", args.DaemonURL.Host)
|
||||
password, _ := args.DaemonURL.User.Password()
|
||||
cfg := &lbcd.ConnConfig{
|
||||
Host: args.DaemonURL.Host,
|
||||
User: args.DaemonURL.User.Username(),
|
||||
Pass: password,
|
||||
HTTPPostMode: true,
|
||||
DisableTLS: rpcCertificate == nil,
|
||||
Certificates: rpcCertificate,
|
||||
}
|
||||
lbcdClient, err = lbcd.New(cfg, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("lbcd daemon connection failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var esClient *elastic.Client = nil
|
||||
if !args.DisableEs {
|
||||
esUrl := args.EsHost + ":" + fmt.Sprintf("%d", args.EsPort)
|
||||
opts := []elastic.ClientOptionFunc{
|
||||
elastic.SetSniff(true),
|
||||
elastic.SetSnifferTimeoutStartup(time.Second * 60),
|
||||
elastic.SetSnifferTimeout(time.Second * 60),
|
||||
elastic.SetURL(esUrl),
|
||||
}
|
||||
if args.Debug {
|
||||
opts = append(opts, elastic.SetTraceLog(golog.New(os.Stderr, "[[ELASTIC]]", 0)))
|
||||
}
|
||||
esClient, err = elastic.NewClient(opts...)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
cache := ttlcache.NewCache()
|
||||
err = cache.SetTTL(time.Duration(args.CacheTTL) * time.Minute)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s256 := sha256.New()
|
||||
var refreshDelta = time.Second * time.Duration(args.RefreshDelta)
|
||||
if args.Debug {
|
||||
refreshDelta = time.Second * 0
|
||||
}
|
||||
|
||||
numPeers := new(int64)
|
||||
*numPeers = 0
|
||||
numSubs := new(int64)
|
||||
*numSubs = 0
|
||||
|
||||
//TODO: is this the right place to load the db?
|
||||
var myDB *db.ReadOnlyDBColumnFamily
|
||||
if !args.DisableResolve {
|
||||
myDB, err = LoadDatabase(args, grp)
|
||||
if err != nil {
|
||||
log.Warning(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Determine which chain to use based on db and cli values
|
||||
dbChain := (*chaincfg.Params)(nil)
|
||||
if myDB != nil && myDB.LastState != nil {
|
||||
// The chain params can be inferred from DBStateValue.
|
||||
switch myDB.LastState.Genesis.Hash {
|
||||
case *chaincfg.MainNetParams.GenesisHash:
|
||||
dbChain = &chaincfg.MainNetParams
|
||||
case *chaincfg.TestNet3Params.GenesisHash:
|
||||
dbChain = &chaincfg.TestNet3Params
|
||||
case *chaincfg.RegressionNetParams.GenesisHash:
|
||||
dbChain = &chaincfg.RegressionNetParams
|
||||
}
|
||||
}
|
||||
cliChain := (*chaincfg.Params)(nil)
|
||||
if args.Chain != nil {
|
||||
switch *args.Chain {
|
||||
case chaincfg.MainNetParams.Name:
|
||||
cliChain = &chaincfg.MainNetParams
|
||||
case chaincfg.TestNet3Params.Name, "testnet":
|
||||
cliChain = &chaincfg.TestNet3Params
|
||||
case chaincfg.RegressionNetParams.Name:
|
||||
cliChain = &chaincfg.RegressionNetParams
|
||||
}
|
||||
}
|
||||
chain := chaincfg.MainNetParams
|
||||
if dbChain != nil && cliChain != nil {
|
||||
if dbChain != cliChain {
|
||||
log.Warnf("network: %v (from db) conflicts with %v (from cli)", dbChain.Name, cliChain.Name)
|
||||
}
|
||||
chain = *dbChain
|
||||
} else if dbChain != nil {
|
||||
chain = *dbChain
|
||||
} else if cliChain != nil {
|
||||
chain = *cliChain
|
||||
}
|
||||
log.Infof("network: %v", chain.Name)
|
||||
|
||||
args.GenesisHash = chain.GenesisHash.String()
|
||||
|
||||
sessionGrp := stop.New(grp)
|
||||
|
||||
s := &Server{
|
||||
GrpcServer: grpcServer,
|
||||
Args: args,
|
||||
MultiSpaceRe: multiSpaceRe,
|
||||
WeirdCharsRe: weirdCharsRe,
|
||||
DB: myDB,
|
||||
Chain: &chain,
|
||||
DaemonClient: lbcdClient,
|
||||
EsClient: esClient,
|
||||
QueryCache: cache,
|
||||
S256: &s256,
|
||||
LastRefreshCheck: time.Now(),
|
||||
RefreshDelta: refreshDelta,
|
||||
NumESRefreshes: 0,
|
||||
PeerServers: make(map[string]*Peer),
|
||||
PeerServersMut: sync.RWMutex{},
|
||||
NumPeerServers: numPeers,
|
||||
PeerSubs: make(map[string]*Peer),
|
||||
PeerSubsMut: sync.RWMutex{},
|
||||
NumPeerSubs: numSubs,
|
||||
ExternalIP: net.IPv4(127, 0, 0, 1),
|
||||
HeightSubs: make(map[net.Addr]net.Conn),
|
||||
HeightSubsMut: sync.RWMutex{},
|
||||
NotifierChan: make(chan interface{}, 1),
|
||||
Grp: grp,
|
||||
sessionManager: nil,
|
||||
}
|
||||
// FIXME: HACK
|
||||
s.sessionManager = newSessionManager(s, myDB, args, sessionGrp, &chain, lbcdClient)
|
||||
|
||||
// Start up our background services
|
||||
if !args.DisableResolve && !args.DisableRocksDBRefresh {
|
||||
log.Info("Running detect changes")
|
||||
myDB.RunDetectChanges(s.NotifierChan)
|
||||
}
|
||||
if !args.DisableBlockingAndFiltering {
|
||||
myDB.RunGetBlocksAndFilters()
|
||||
}
|
||||
if !args.DisableStartPrometheus {
|
||||
go s.prometheusEndpoint(fmt.Sprintf("%d", s.Args.PrometheusPort), "metrics")
|
||||
}
|
||||
if !args.DisableStartUDP {
|
||||
go func() {
|
||||
err := s.UDPServer(s.Args.Port)
|
||||
if err != nil {
|
||||
log.Errorf("UDP Server (%d) failed! %v", s.Args.Port, err)
|
||||
}
|
||||
}()
|
||||
if s.Args.JSONRPCPort != 0 {
|
||||
go func() {
|
||||
err := s.UDPServer(s.Args.JSONRPCPort)
|
||||
if err != nil {
|
||||
log.Errorf("UDP Server (%d) failed! %v", s.Args.JSONRPCPort, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
if !args.DisableStartNotifier {
|
||||
go func() {
|
||||
err := s.NotifierServer()
|
||||
if err != nil {
|
||||
log.Println("Notifier Server failed!", err)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
err := s.RunNotifier()
|
||||
if err != nil {
|
||||
log.Println("RunNotifier failed!", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if !args.DisableStartJSONRPC {
|
||||
go func() {
|
||||
err := s.StartJsonRPC()
|
||||
if err != nil {
|
||||
log.Println("JSONRPC Server failed!", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Load peers from disk and subscribe to one if there are any
|
||||
if !args.DisableLoadPeers {
|
||||
go func() {
|
||||
err := s.loadPeers()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
||||
// prometheusEndpoint is a goroutine which start up a prometheus endpoint
|
||||
// for this hub to allow for metric tracking.
|
||||
func (s *Server) prometheusEndpoint(port string, endpoint string) {
|
||||
http.Handle("/"+endpoint, promhttp.Handler())
|
||||
log.Printf("listening on :%s /%s\n", port, endpoint)
|
||||
err := http.ListenAndServe(":"+port, nil)
|
||||
log.Fatalln("Shouldn't happen??!?!", err)
|
||||
}
|
||||
|
||||
// Hello is a grpc endpoint to allow another hub to tell us about itself.
|
||||
// The passed message includes information about the other hub, and all
|
||||
// of its peers which are added to the knowledge of this hub.
|
||||
func (s *Server) Hello(ctx context.Context, args *pb.HelloMessage) (*pb.HelloMessage, error) {
|
||||
metrics.RequestsCount.With(prometheus.Labels{"method": "hello"}).Inc()
|
||||
port := args.Port
|
||||
host := args.Host
|
||||
newPeer := &Peer{
|
||||
Address: host,
|
||||
Port: port,
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
log.Println(newPeer)
|
||||
|
||||
err := s.addPeer(newPeer, false, true)
|
||||
// They just contacted us, so this shouldn't happen
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
s.mergePeers(args.Servers)
|
||||
s.writePeers()
|
||||
s.notifyPeerSubs(newPeer)
|
||||
|
||||
return s.makeHelloMessage(), nil
|
||||
}
|
||||
|
||||
// PeerSubscribe adds a peer hub to the list of subscribers to update about
|
||||
// new peers.
|
||||
func (s *Server) PeerSubscribe(ctx context.Context, in *pb.ServerMessage) (*pb.StringValue, error) {
|
||||
metrics.RequestsCount.With(prometheus.Labels{"method": "peer_subscribe"}).Inc()
|
||||
var msg = "Success"
|
||||
peer := &Peer{
|
||||
Address: in.Address,
|
||||
Port: in.Port,
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
|
||||
if _, loaded := s.PeerSubsLoadOrStore(peer); !loaded {
|
||||
s.incNumSubs()
|
||||
metrics.PeersSubscribed.Inc()
|
||||
} else {
|
||||
msg = "Already subscribed"
|
||||
}
|
||||
|
||||
return &pb.StringValue{Value: msg}, nil
|
||||
}
|
||||
|
||||
// AddPeer is a grpc endpoint to tell this hub about another hub in the network.
|
||||
func (s *Server) AddPeer(ctx context.Context, args *pb.ServerMessage) (*pb.StringValue, error) {
|
||||
metrics.RequestsCount.With(prometheus.Labels{"method": "add_peer"}).Inc()
|
||||
var msg = "Success"
|
||||
newPeer := &Peer{
|
||||
Address: args.Address,
|
||||
Port: args.Port,
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
err := s.addPeer(newPeer, true, true)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
msg = "Failed"
|
||||
}
|
||||
return &pb.StringValue{Value: msg}, err
|
||||
}
|
||||
|
||||
// Ping is a grpc endpoint that returns a short message.
|
||||
func (s *Server) Ping(ctx context.Context, args *pb.EmptyMessage) (*pb.StringValue, error) {
|
||||
metrics.RequestsCount.With(prometheus.Labels{"method": "ping"}).Inc()
|
||||
return &pb.StringValue{Value: "Hello, world!"}, nil
|
||||
}
|
||||
|
||||
// Version is a grpc endpoint to get this hub's version.
|
||||
func (s *Server) Version(ctx context.Context, args *pb.EmptyMessage) (*pb.StringValue, error) {
|
||||
metrics.RequestsCount.With(prometheus.Labels{"method": "version"}).Inc()
|
||||
return &pb.StringValue{Value: getVersion()}, nil
|
||||
}
|
||||
|
||||
func (s *Server) Height(ctx context.Context, args *pb.EmptyMessage) (*pb.UInt32Value, error) {
|
||||
metrics.RequestsCount.With(prometheus.Labels{"method": "height"}).Inc()
|
||||
if s.DB != nil {
|
||||
return &pb.UInt32Value{Value: s.DB.LastState.Height}, nil
|
||||
} else {
|
||||
return &pb.UInt32Value{Value: 0}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// HeightSubscribe takes a height to wait for the server to reach and waits until it reaches that
|
||||
// height or higher and returns the current height. If the db is off it will return 0.
|
||||
func (s *Server) HeightSubscribe(arg *pb.UInt32Value, stream pb.Hub_HeightSubscribeServer) error {
|
||||
metrics.RequestsCount.With(prometheus.Labels{"method": "height"}).Inc()
|
||||
if s.DB != nil {
|
||||
want := arg.Value
|
||||
for s.DB.LastState.Height < want {
|
||||
if s.DB.LastState.Height >= want {
|
||||
err := stream.Send(&pb.UInt32Value{Value: s.DB.LastState.Height})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
} else {
|
||||
if err := stream.Send(&pb.UInt32Value{Value: 0}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HeightHashSubscribe takes a height to wait for the server to reach and waits until it reaches that
|
||||
// height or higher and returns the current height. If the db is off it will return 0.
|
||||
func (s *Server) HeightHashSubscribe() error {
|
||||
metrics.RequestsCount.With(prometheus.Labels{"method": "height_hash"}).Inc()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resolve is the gRPC endpoint for resolve.
|
||||
func (s *Server) Resolve(ctx context.Context, args *pb.StringArray) (*pb.Outputs, error) {
|
||||
return InternalResolve(args.Value, s.DB)
|
||||
}
|
||||
|
||||
// InternalResolve takes an array of urls and resolves them to their transactions.
|
||||
func InternalResolve(urls []string, DB *db.ReadOnlyDBColumnFamily) (*pb.Outputs, error) {
|
||||
if DB == nil {
|
||||
return nil, errors.New("db is nil")
|
||||
// return nil, nil
|
||||
}
|
||||
metrics.RequestsCount.With(prometheus.Labels{"method": "resolve"}).Inc()
|
||||
|
||||
allTxos := make([]*pb.Output, 0)
|
||||
allExtraTxos := make([]*pb.Output, 0)
|
||||
|
||||
for _, url := range urls {
|
||||
res := DB.Resolve(url)
|
||||
txos, extraTxos, err := res.ToOutputs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: there may be a more efficient way to do this.
|
||||
allTxos = append(allTxos, txos...)
|
||||
allExtraTxos = append(allExtraTxos, extraTxos...)
|
||||
}
|
||||
|
||||
res := &pb.Outputs{
|
||||
Txos: allTxos,
|
||||
ExtraTxos: allExtraTxos,
|
||||
Total: uint32(len(allTxos) + len(allExtraTxos)),
|
||||
Offset: 0, //TODO
|
||||
Blocked: nil, //TODO
|
||||
BlockedTotal: 0, //TODO
|
||||
}
|
||||
|
||||
log.Warn(res)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
|
19
server/server_test_pkg.go
Normal file
19
server/server_test_pkg.go
Normal file
|
@ -0,0 +1,19 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"github.com/lbryio/herald.go/db"
|
||||
"github.com/lbryio/lbcd/chaincfg"
|
||||
"github.com/lbryio/lbry.go/v3/extras/stop"
|
||||
)
|
||||
|
||||
func (s *Server) AddPeerExported() func(*Peer, bool, bool) error {
|
||||
return s.addPeer
|
||||
}
|
||||
|
||||
func (s *Server) GetNumPeersExported() func() int64 {
|
||||
return s.getNumPeers
|
||||
}
|
||||
|
||||
func NewSessionManagerExported(server *Server, db *db.ReadOnlyDBColumnFamily, args *Args, grp *stop.Group, chain *chaincfg.Params) *sessionManager {
|
||||
return newSessionManager(server, db, args, grp, chain, nil)
|
||||
}
|
631
server/session.go
Normal file
631
server/session.go
Normal file
|
@ -0,0 +1,631 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/lbryio/herald.go/db"
|
||||
"github.com/lbryio/herald.go/internal"
|
||||
"github.com/lbryio/lbcd/chaincfg"
|
||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||
lbcd "github.com/lbryio/lbcd/rpcclient"
|
||||
"github.com/lbryio/lbcd/wire"
|
||||
"github.com/lbryio/lbry.go/v3/extras/stop"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type headerNotification struct {
|
||||
internal.HeightHash
|
||||
blockHeaderElectrum *BlockHeaderElectrum
|
||||
blockHeaderStr string
|
||||
}
|
||||
|
||||
type hashXNotification struct {
|
||||
hashX [HASHX_LEN]byte
|
||||
status []byte
|
||||
statusStr string
|
||||
}
|
||||
|
||||
type peerNotification struct {
|
||||
address string
|
||||
port string
|
||||
}
|
||||
|
||||
type session struct {
|
||||
id uintptr
|
||||
addr net.Addr
|
||||
conn net.Conn
|
||||
// hashXSubs maps hashX to the original subscription key (address or scripthash)
|
||||
hashXSubs map[[HASHX_LEN]byte]string
|
||||
// headersSub indicates header subscription
|
||||
headersSub bool
|
||||
// peersSub indicates peer subscription
|
||||
peersSub bool
|
||||
// headersSubRaw indicates the header subscription mode
|
||||
headersSubRaw bool
|
||||
// client provides the ability to send notifications
|
||||
client rpc.ClientCodec
|
||||
clientSeq uint64
|
||||
// lastRecv records time of last incoming data
|
||||
lastRecv time.Time
|
||||
// lastSend records time of last outgoing data
|
||||
lastSend time.Time
|
||||
}
|
||||
|
||||
func (s *session) doNotify(notification interface{}) {
|
||||
var method string
|
||||
var params interface{}
|
||||
switch note := notification.(type) {
|
||||
case headerNotification:
|
||||
if !s.headersSub {
|
||||
return
|
||||
}
|
||||
heightHash := note.HeightHash
|
||||
method = "blockchain.headers.subscribe"
|
||||
if s.headersSubRaw {
|
||||
header := note.blockHeaderStr
|
||||
if len(header) == 0 {
|
||||
header = hex.EncodeToString(note.BlockHeader[:])
|
||||
}
|
||||
params = &HeadersSubscribeRawResp{
|
||||
Hex: header,
|
||||
Height: uint32(heightHash.Height),
|
||||
}
|
||||
} else {
|
||||
header := note.blockHeaderElectrum
|
||||
if header == nil { // not initialized
|
||||
header = newBlockHeaderElectrum((*[HEADER_SIZE]byte)(note.BlockHeader), uint32(heightHash.Height))
|
||||
}
|
||||
params = header
|
||||
}
|
||||
case hashXNotification:
|
||||
orig, ok := s.hashXSubs[note.hashX]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if len(orig) == 64 {
|
||||
method = "blockchain.scripthash.subscribe"
|
||||
} else {
|
||||
method = "blockchain.address.subscribe"
|
||||
}
|
||||
status := note.statusStr
|
||||
if len(status) == 0 {
|
||||
status = hex.EncodeToString(note.status)
|
||||
}
|
||||
params = []string{orig, status}
|
||||
case peerNotification:
|
||||
if !s.peersSub {
|
||||
return
|
||||
}
|
||||
method = "server.peers.subscribe"
|
||||
params = []string{note.address, note.port}
|
||||
|
||||
default:
|
||||
log.Warnf("unknown notification type: %v", notification)
|
||||
return
|
||||
}
|
||||
// Send the notification.
|
||||
s.clientSeq += 1
|
||||
req := &rpc.Request{
|
||||
ServiceMethod: method,
|
||||
Seq: s.clientSeq,
|
||||
}
|
||||
err := s.client.WriteRequest(req, params)
|
||||
if err != nil {
|
||||
log.Warnf("error: %v", err)
|
||||
}
|
||||
// Bump last send time.
|
||||
s.lastSend = time.Now()
|
||||
}
|
||||
|
||||
type sessionMap map[uintptr]*session
|
||||
|
||||
type sessionManager struct {
|
||||
// sessionsMut protects sessions, headerSubs, hashXSubs state
|
||||
sessionsMut sync.RWMutex
|
||||
sessions sessionMap
|
||||
// sessionsWait sync.WaitGroup
|
||||
grp *stop.Group
|
||||
sessionsMax int
|
||||
sessionTimeout time.Duration
|
||||
manageTicker *time.Ticker
|
||||
db *db.ReadOnlyDBColumnFamily
|
||||
args *Args
|
||||
server *Server
|
||||
chain *chaincfg.Params
|
||||
lbcd *lbcd.Client
|
||||
// peerSubs are sessions subscribed via 'blockchain.peers.subscribe'
|
||||
peerSubs sessionMap
|
||||
// headerSubs are sessions subscribed via 'blockchain.headers.subscribe'
|
||||
headerSubs sessionMap
|
||||
// hashXSubs are sessions subscribed via 'blockchain.{address,scripthash}.subscribe'
|
||||
hashXSubs map[[HASHX_LEN]byte]sessionMap
|
||||
}
|
||||
|
||||
func newSessionManager(server *Server, db *db.ReadOnlyDBColumnFamily, args *Args, grp *stop.Group, chain *chaincfg.Params, lbcd *lbcd.Client) *sessionManager {
|
||||
return &sessionManager{
|
||||
sessions: make(sessionMap),
|
||||
grp: grp,
|
||||
sessionsMax: args.MaxSessions,
|
||||
sessionTimeout: time.Duration(args.SessionTimeout) * time.Second,
|
||||
manageTicker: time.NewTicker(time.Duration(max(5, args.SessionTimeout/20)) * time.Second),
|
||||
db: db,
|
||||
args: args,
|
||||
server: server,
|
||||
chain: chain,
|
||||
lbcd: lbcd,
|
||||
peerSubs: make(sessionMap),
|
||||
headerSubs: make(sessionMap),
|
||||
hashXSubs: make(map[[HASHX_LEN]byte]sessionMap),
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *sessionManager) start() {
|
||||
sm.grp.Add(1)
|
||||
go sm.manage()
|
||||
}
|
||||
|
||||
func (sm *sessionManager) stop() {
|
||||
sm.sessionsMut.Lock()
|
||||
defer sm.sessionsMut.Unlock()
|
||||
sm.headerSubs = make(sessionMap)
|
||||
sm.hashXSubs = make(map[[HASHX_LEN]byte]sessionMap)
|
||||
for _, sess := range sm.sessions {
|
||||
sess.client.Close()
|
||||
sess.conn.Close()
|
||||
}
|
||||
sm.sessions = make(sessionMap)
|
||||
}
|
||||
|
||||
func (sm *sessionManager) manage() {
|
||||
for {
|
||||
sm.sessionsMut.Lock()
|
||||
for _, sess := range sm.sessions {
|
||||
if time.Since(sess.lastRecv) > sm.sessionTimeout {
|
||||
sm.removeSessionLocked(sess)
|
||||
log.Infof("session %v timed out", sess.addr.String())
|
||||
}
|
||||
}
|
||||
sm.sessionsMut.Unlock()
|
||||
// Wait for next management clock tick.
|
||||
select {
|
||||
case <-sm.grp.Ch():
|
||||
sm.grp.Done()
|
||||
return
|
||||
case <-sm.manageTicker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *sessionManager) addSession(conn net.Conn) *session {
|
||||
sm.sessionsMut.Lock()
|
||||
sess := &session{
|
||||
addr: conn.RemoteAddr(),
|
||||
conn: conn,
|
||||
hashXSubs: make(map[[11]byte]string),
|
||||
client: jsonrpc.NewClientCodec(conn),
|
||||
lastRecv: time.Now(),
|
||||
}
|
||||
sess.id = uintptr(unsafe.Pointer(sess))
|
||||
sm.sessions[sess.id] = sess
|
||||
sm.sessionsMut.Unlock()
|
||||
|
||||
// Create a new RPC server. These services are linked to the
|
||||
// session, which allows RPC handlers to know the session for
|
||||
// each request and update subscriptions.
|
||||
s1 := rpc.NewServer()
|
||||
|
||||
// Register "server.{features,banner,version}" handlers.
|
||||
serverSvc := &ServerService{sm.args}
|
||||
err := s1.RegisterName("server", serverSvc)
|
||||
if err != nil {
|
||||
log.Errorf("RegisterName: %v\n", err)
|
||||
}
|
||||
|
||||
// Register "server.peers" handlers.
|
||||
peersSvc := &PeersService{Server: sm.server}
|
||||
err = s1.RegisterName("server.peers", peersSvc)
|
||||
if err != nil {
|
||||
log.Errorf("RegisterName: %v\n", err)
|
||||
}
|
||||
|
||||
// Register "blockchain.claimtrie.*"" handlers.
|
||||
claimtrieSvc := &ClaimtrieService{sm.db, sm.server}
|
||||
err = s1.RegisterName("blockchain.claimtrie", claimtrieSvc)
|
||||
if err != nil {
|
||||
log.Errorf("RegisterName: %v\n", err)
|
||||
}
|
||||
|
||||
// Register "blockchain.{block,address,scripthash,transaction}.*" handlers.
|
||||
blockchainSvc := &BlockchainBlockService{sm.db, sm.chain}
|
||||
err = s1.RegisterName("blockchain.block", blockchainSvc)
|
||||
if err != nil {
|
||||
log.Errorf("RegisterName: %v\n", err)
|
||||
goto fail
|
||||
}
|
||||
err = s1.RegisterName("blockchain.headers", &BlockchainHeadersService{sm.db, sm.chain, sm, sess})
|
||||
if err != nil {
|
||||
log.Errorf("RegisterName: %v\n", err)
|
||||
goto fail
|
||||
}
|
||||
err = s1.RegisterName("blockchain.address", &BlockchainAddressService{sm.db, sm.chain, sm, sess})
|
||||
if err != nil {
|
||||
log.Errorf("RegisterName: %v\n", err)
|
||||
goto fail
|
||||
}
|
||||
err = s1.RegisterName("blockchain.scripthash", &BlockchainScripthashService{sm.db, sm.chain, sm, sess})
|
||||
if err != nil {
|
||||
log.Errorf("RegisterName: %v\n", err)
|
||||
goto fail
|
||||
}
|
||||
err = s1.RegisterName("blockchain.transaction", &BlockchainTransactionService{sm.db, sm.chain, sm})
|
||||
if err != nil {
|
||||
log.Errorf("RegisterName: %v\n", err)
|
||||
goto fail
|
||||
}
|
||||
|
||||
sm.grp.Add(1)
|
||||
go func() {
|
||||
s1.ServeCodec(&sessionServerCodec{jsonrpc.NewServerCodec(newJsonPatchingCodec(conn)), sess})
|
||||
log.Infof("session %v goroutine exit", sess.addr.String())
|
||||
sm.grp.Done()
|
||||
}()
|
||||
return sess
|
||||
|
||||
fail:
|
||||
sm.removeSession(sess)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *sessionManager) removeSession(sess *session) {
|
||||
sm.sessionsMut.Lock()
|
||||
defer sm.sessionsMut.Unlock()
|
||||
sm.removeSessionLocked(sess)
|
||||
}
|
||||
|
||||
func (sm *sessionManager) removeSessionLocked(sess *session) {
|
||||
if sess.headersSub {
|
||||
delete(sm.headerSubs, sess.id)
|
||||
}
|
||||
for hashX := range sess.hashXSubs {
|
||||
subs, ok := sm.hashXSubs[hashX]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
delete(subs, sess.id)
|
||||
}
|
||||
delete(sm.sessions, sess.id)
|
||||
sess.client.Close()
|
||||
sess.conn.Close()
|
||||
}
|
||||
|
||||
func (sm *sessionManager) broadcastTx(rawTx []byte) (*chainhash.Hash, error) {
|
||||
var msgTx wire.MsgTx
|
||||
err := msgTx.Deserialize(bytes.NewReader(rawTx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sm.lbcd.SendRawTransaction(&msgTx, false)
|
||||
}
|
||||
|
||||
func (sm *sessionManager) peersSubscribe(sess *session, subscribe bool) {
|
||||
sm.sessionsMut.Lock()
|
||||
defer sm.sessionsMut.Unlock()
|
||||
if subscribe {
|
||||
sm.peerSubs[sess.id] = sess
|
||||
sess.peersSub = true
|
||||
return
|
||||
}
|
||||
delete(sm.peerSubs, sess.id)
|
||||
sess.peersSub = false
|
||||
}
|
||||
|
||||
func (sm *sessionManager) headersSubscribe(sess *session, raw bool, subscribe bool) {
|
||||
sm.sessionsMut.Lock()
|
||||
defer sm.sessionsMut.Unlock()
|
||||
if subscribe {
|
||||
sm.headerSubs[sess.id] = sess
|
||||
sess.headersSub = true
|
||||
sess.headersSubRaw = raw
|
||||
return
|
||||
}
|
||||
delete(sm.headerSubs, sess.id)
|
||||
sess.headersSub = false
|
||||
sess.headersSubRaw = false
|
||||
}
|
||||
|
||||
func (sm *sessionManager) hashXSubscribe(sess *session, hashX []byte, original string, subscribe bool) {
|
||||
sm.sessionsMut.Lock()
|
||||
defer sm.sessionsMut.Unlock()
|
||||
var key [HASHX_LEN]byte
|
||||
copy(key[:], hashX)
|
||||
subs, ok := sm.hashXSubs[key]
|
||||
if subscribe {
|
||||
if !ok {
|
||||
subs = make(sessionMap)
|
||||
sm.hashXSubs[key] = subs
|
||||
}
|
||||
subs[sess.id] = sess
|
||||
sess.hashXSubs[key] = original
|
||||
return
|
||||
}
|
||||
if ok {
|
||||
delete(subs, sess.id)
|
||||
if len(subs) == 0 {
|
||||
delete(sm.hashXSubs, key)
|
||||
}
|
||||
}
|
||||
delete(sess.hashXSubs, key)
|
||||
}
|
||||
|
||||
func (sm *sessionManager) doNotify(notification interface{}) {
|
||||
switch note := notification.(type) {
|
||||
case internal.HeightHash:
|
||||
// The HeightHash notification translates to headerNotification.
|
||||
notification = &headerNotification{HeightHash: note}
|
||||
}
|
||||
|
||||
sm.sessionsMut.RLock()
|
||||
var subsCopy sessionMap
|
||||
switch note := notification.(type) {
|
||||
case headerNotification:
|
||||
log.Infof("header notification @ %#v", note)
|
||||
subsCopy = sm.headerSubs
|
||||
if len(subsCopy) > 0 {
|
||||
hdr := [HEADER_SIZE]byte{}
|
||||
copy(hdr[:], note.BlockHeader)
|
||||
note.blockHeaderElectrum = newBlockHeaderElectrum(&hdr, uint32(note.Height))
|
||||
note.blockHeaderStr = hex.EncodeToString(note.BlockHeader[:])
|
||||
}
|
||||
case hashXNotification:
|
||||
log.Infof("hashX notification @ %#v", note)
|
||||
hashXSubs, ok := sm.hashXSubs[note.hashX]
|
||||
if ok {
|
||||
subsCopy = hashXSubs
|
||||
}
|
||||
if len(subsCopy) > 0 {
|
||||
note.statusStr = hex.EncodeToString(note.status)
|
||||
}
|
||||
case peerNotification:
|
||||
subsCopy = sm.peerSubs
|
||||
default:
|
||||
log.Warnf("unknown notification type: %v", notification)
|
||||
}
|
||||
sm.sessionsMut.RUnlock()
|
||||
|
||||
// Deliver notification to relevant sessions.
|
||||
for _, sess := range subsCopy {
|
||||
sess.doNotify(notification)
|
||||
}
|
||||
|
||||
// Produce secondary hashXNotification(s) corresponding to the headerNotification.
|
||||
switch note := notification.(type) {
|
||||
case headerNotification:
|
||||
touched, err := sm.db.GetTouchedHashXs(uint32(note.Height))
|
||||
if err != nil {
|
||||
log.Errorf("failed to get touched hashXs at height %v, error: %v", note.Height, err)
|
||||
break
|
||||
}
|
||||
for _, hashX := range touched {
|
||||
hashXstatus, err := sm.db.GetStatus(hashX)
|
||||
if err != nil {
|
||||
log.Errorf("failed to get status of hashX %v, error: %v", hashX, err)
|
||||
continue
|
||||
}
|
||||
note2 := hashXNotification{}
|
||||
copy(note2.hashX[:], hashX)
|
||||
note2.status = hashXstatus
|
||||
sm.doNotify(note2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type sessionServerCodec struct {
|
||||
rpc.ServerCodec
|
||||
sess *session
|
||||
}
|
||||
|
||||
// ReadRequestHeader provides ability to rewrite the incoming
|
||||
// request "method" field. For example:
|
||||
//
|
||||
// blockchain.block.get_header -> blockchain.block.Get_header
|
||||
// blockchain.address.listunspent -> blockchain.address.Listunspent
|
||||
//
|
||||
// This makes the "method" string compatible with rpc.Server
|
||||
// requirements.
|
||||
func (c *sessionServerCodec) ReadRequestHeader(req *rpc.Request) error {
|
||||
log.Infof("from %v receive header", c.sess.addr.String())
|
||||
err := c.ServerCodec.ReadRequestHeader(req)
|
||||
if err != nil {
|
||||
log.Warnf("error: %v", err)
|
||||
return err
|
||||
}
|
||||
log.Infof("from %v receive header: %#v", c.sess.addr.String(), *req)
|
||||
rawMethod := req.ServiceMethod
|
||||
parts := strings.Split(rawMethod, ".")
|
||||
if len(parts) < 2 {
|
||||
return fmt.Errorf("blockchain rpc: service/method ill-formed: %q", rawMethod)
|
||||
}
|
||||
service := strings.Join(parts[0:len(parts)-1], ".")
|
||||
method := parts[len(parts)-1]
|
||||
if len(method) < 1 {
|
||||
return fmt.Errorf("blockchain rpc: method ill-formed: %q", method)
|
||||
}
|
||||
method = strings.ToUpper(string(method[0])) + string(method[1:])
|
||||
req.ServiceMethod = service + "." + method
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadRequestBody wraps the regular implementation, but updates session stats too.
|
||||
func (c *sessionServerCodec) ReadRequestBody(params any) error {
|
||||
log.Infof("from %v receive body", c.sess.addr.String())
|
||||
err := c.ServerCodec.ReadRequestBody(params)
|
||||
if err != nil {
|
||||
log.Warnf("error: %v", err)
|
||||
return err
|
||||
}
|
||||
log.Infof("from %v receive body: %#v", c.sess.addr.String(), params)
|
||||
// Bump last receive time.
|
||||
c.sess.lastRecv = time.Now()
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteResponse wraps the regular implementation, but updates session stats too.
|
||||
func (c *sessionServerCodec) WriteResponse(resp *rpc.Response, reply any) error {
|
||||
log.Infof("respond to %v", c.sess.addr.String())
|
||||
err := c.ServerCodec.WriteResponse(resp, reply)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Bump last send time.
|
||||
c.sess.lastSend = time.Now()
|
||||
return err
|
||||
}
|
||||
|
||||
// serverRequest is a duplicate of serverRequest from
|
||||
// net/rpc/jsonrpc/server.go with an added Version which
|
||||
// we can check.
|
||||
type serverRequest struct {
|
||||
Version string `json:"jsonrpc"`
|
||||
Method string `json:"method"`
|
||||
Params *json.RawMessage `json:"params"`
|
||||
Id *json.RawMessage `json:"id"`
|
||||
}
|
||||
|
||||
// serverResponse is a duplicate of serverResponse from
|
||||
// net/rpc/jsonrpc/server.go with an added Version which
|
||||
// we can set at will.
|
||||
type serverResponse struct {
|
||||
Version string `json:"jsonrpc"`
|
||||
Id *json.RawMessage `json:"id"`
|
||||
Result any `json:"result,omitempty"`
|
||||
Error any `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// jsonPatchingCodec is able to intercept the JSON requests/responses
|
||||
// and tweak them. Currently, it appears we need to make several changes:
|
||||
// 1) add "jsonrpc": "2.0" (or "jsonrpc": "1.0") in response
|
||||
// 2) add newline to frame response
|
||||
// 3) add "params": [] when "params" is missing
|
||||
// 4) replace params ["arg1", "arg2", ...] with [["arg1", "arg2", ...]]
|
||||
type jsonPatchingCodec struct {
|
||||
conn net.Conn
|
||||
inBuffer *bytes.Buffer
|
||||
dec *json.Decoder
|
||||
enc *json.Encoder
|
||||
outBuffer *bytes.Buffer
|
||||
}
|
||||
|
||||
func newJsonPatchingCodec(conn net.Conn) *jsonPatchingCodec {
|
||||
buf1, buf2 := bytes.NewBuffer(nil), bytes.NewBuffer(nil)
|
||||
return &jsonPatchingCodec{
|
||||
conn: conn,
|
||||
inBuffer: buf1,
|
||||
dec: json.NewDecoder(buf1),
|
||||
enc: json.NewEncoder(buf2),
|
||||
outBuffer: buf2,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *jsonPatchingCodec) Read(p []byte) (n int, err error) {
|
||||
if c.outBuffer.Len() > 0 {
|
||||
// Return remaining decoded bytes.
|
||||
return c.outBuffer.Read(p)
|
||||
}
|
||||
// Buffer contents consumed. Try to decode more JSON.
|
||||
|
||||
// Read until framing newline. This allows us to print the raw request.
|
||||
for !bytes.ContainsAny(c.inBuffer.Bytes(), "\n") {
|
||||
var buf [1024]byte
|
||||
n, err = c.conn.Read(buf[:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
c.inBuffer.Write(buf[:n])
|
||||
}
|
||||
log.Infof("raw request: %v", c.inBuffer.String())
|
||||
|
||||
var req serverRequest
|
||||
err = c.dec.Decode(&req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if req.Params != nil {
|
||||
n := len(*req.Params)
|
||||
if n < 2 || (*req.Params)[0] != '[' && (*req.Params)[n-1] != ']' {
|
||||
// This is an error, but we're not going to try to correct it.
|
||||
goto encode
|
||||
}
|
||||
// FIXME: The heuristics here don't cover all possibilities.
|
||||
// For example: [{obj1}, {obj2}] or ["foo,bar"] would not
|
||||
// be handled correctly.
|
||||
bracketed := (*req.Params)[1 : n-1]
|
||||
n = len(bracketed)
|
||||
if n > 1 && (bracketed[0] == '{' || bracketed[0] == '[') {
|
||||
// Probable single object or list argument.
|
||||
goto encode
|
||||
}
|
||||
// The params look like ["arg1", "arg2", "arg3", ...].
|
||||
// We're in trouble because our jsonrpc library does not
|
||||
// handle this. So pack these args in an inner list.
|
||||
// The handler method will receive ONE list argument.
|
||||
params := json.RawMessage(fmt.Sprintf("[[%s]]", bracketed))
|
||||
req.Params = ¶ms
|
||||
} else {
|
||||
// Add empty argument list if params omitted.
|
||||
params := json.RawMessage("[]")
|
||||
req.Params = ¶ms
|
||||
}
|
||||
|
||||
encode:
|
||||
// Encode the request. This allows us to print the patched request.
|
||||
buf, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
log.Infof("patched request: %v", string(buf))
|
||||
|
||||
err = c.enc.Encode(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return c.outBuffer.Read(p)
|
||||
}
|
||||
|
||||
func (c *jsonPatchingCodec) Write(p []byte) (n int, err error) {
|
||||
log.Infof("raw response: %v", string(p))
|
||||
var resp serverResponse
|
||||
err = json.Unmarshal(p, &resp)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Add "jsonrpc": "2.0" if missing.
|
||||
if len(resp.Version) == 0 {
|
||||
resp.Version = "2.0"
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
log.Infof("patched response: %v", string(buf))
|
||||
|
||||
// Add newline for framing.
|
||||
return c.conn.Write(append(buf, '\n'))
|
||||
}
|
||||
|
||||
func (c *jsonPatchingCodec) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
255
server/udp.go
Normal file
255
server/udp.go
Normal file
|
@ -0,0 +1,255 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
pb "github.com/lbryio/herald.go/protobuf/go"
|
||||
)
|
||||
|
||||
const maxBufferSize = 1024
|
||||
|
||||
// genesis blocktime (which is actually wrong)
|
||||
// magic constant for the UDPPing protocol. The above comment is taken from
|
||||
// the python code this was implemented off of.
|
||||
// https://github.com/lbryio/lbry-sdk/blob/7d49b046d44a4b7067d5dc1d6cd65ff0475c71c8/lbry/wallet/server/udp.py#L12
|
||||
const magic = 1446058291
|
||||
const protocolVersion = 1
|
||||
const defaultFlags = 0b00000000
|
||||
const availableFlag = 0b00000001
|
||||
|
||||
// SPVPing is a struct for the format of how to ping another hub over udp.
|
||||
// format b'!lB64s'
|
||||
type SPVPing struct {
|
||||
magic uint32
|
||||
version byte
|
||||
padding []byte //64
|
||||
}
|
||||
|
||||
// SPVPong is a struct for the return pong from another hub server.
|
||||
// format b'!BBL32s4sH'
|
||||
type SPVPong struct {
|
||||
protocolVersion byte
|
||||
flags byte
|
||||
height uint32
|
||||
tip []byte // 32
|
||||
srcAddrRaw []byte // 4
|
||||
country uint16
|
||||
}
|
||||
|
||||
// encodeSPVPing creates a slice of bytes to ping another hub with
|
||||
// over udp.
|
||||
func encodeSPVPing() []byte {
|
||||
data := make([]byte, 69)
|
||||
|
||||
binary.BigEndian.PutUint32(data, magic)
|
||||
data[4] = protocolVersion
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// decodeSPVPing takes a slice of bytes and decodes an SPVPing struct from them.
|
||||
func decodeSPVPing(data []byte) *SPVPing {
|
||||
if len(data) < 69 {
|
||||
return nil
|
||||
}
|
||||
|
||||
parsedMagic := binary.BigEndian.Uint32(data)
|
||||
parsedProtocalVersion := data[4]
|
||||
return &SPVPing{
|
||||
magic: parsedMagic,
|
||||
version: parsedProtocalVersion,
|
||||
}
|
||||
}
|
||||
|
||||
// Encode is a function for SPVPong structs to encode them into bytes for
|
||||
// sending over udp.
|
||||
func (pong *SPVPong) Encode() []byte {
|
||||
data := make([]byte, 44)
|
||||
|
||||
data[0] = pong.protocolVersion
|
||||
data[1] = pong.flags
|
||||
binary.BigEndian.PutUint32(data[2:], pong.height)
|
||||
copy(data[6:], pong.tip)
|
||||
copy(data[38:], pong.srcAddrRaw)
|
||||
binary.BigEndian.PutUint16(data[42:], pong.country)
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// makeSPVPong creates an SPVPong struct according to given parameters.
|
||||
func makeSPVPong(flags int, height int, tip []byte, sourceAddr string, country string) *SPVPong {
|
||||
byteAddr := EncodeAddress(sourceAddr)
|
||||
var countryInt int32
|
||||
var ok bool
|
||||
if countryInt, ok = pb.Location_Country_value[country]; !ok {
|
||||
countryInt = int32(pb.Location_UNKNOWN_COUNTRY)
|
||||
}
|
||||
return &SPVPong{
|
||||
protocolVersion: protocolVersion,
|
||||
flags: byte(flags),
|
||||
height: uint32(height),
|
||||
tip: tip,
|
||||
srcAddrRaw: byteAddr,
|
||||
country: uint16(countryInt),
|
||||
}
|
||||
}
|
||||
|
||||
// decodeSPVPong takes a slice of bytes and decodes an SPVPong struct
|
||||
// from it.
|
||||
func decodeSPVPong(data []byte) *SPVPong {
|
||||
if len(data) < 44 {
|
||||
return nil
|
||||
}
|
||||
|
||||
parsedProtocalVersion := data[0]
|
||||
flags := data[1]
|
||||
height := binary.BigEndian.Uint32(data[2:])
|
||||
tip := make([]byte, 32)
|
||||
copy(tip, data[6:38])
|
||||
srcRawAddr := make([]byte, 4)
|
||||
copy(srcRawAddr, data[38:42])
|
||||
country := binary.BigEndian.Uint16(data[42:])
|
||||
return &SPVPong{
|
||||
protocolVersion: parsedProtocalVersion,
|
||||
flags: flags,
|
||||
height: height,
|
||||
tip: tip,
|
||||
srcAddrRaw: srcRawAddr,
|
||||
country: country,
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeAddress takes an ipv4 address and encodes it into bytes for the hub
|
||||
// Ping/Pong protocol.
|
||||
func EncodeAddress(addr string) []byte {
|
||||
parts := strings.Split(addr, ".")
|
||||
|
||||
if len(parts) != 4 {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
data := make([]byte, 4)
|
||||
for i, part := range parts {
|
||||
x, err := strconv.Atoi(part)
|
||||
if err != nil || x > 255 {
|
||||
return []byte{}
|
||||
}
|
||||
data[i] = byte(x)
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// DecodeAddress gets the string ipv4 address from an SPVPong struct.
|
||||
func (pong *SPVPong) DecodeAddress() net.IP {
|
||||
return net.IPv4(
|
||||
pong.srcAddrRaw[0],
|
||||
pong.srcAddrRaw[1],
|
||||
pong.srcAddrRaw[2],
|
||||
pong.srcAddrRaw[3],
|
||||
)
|
||||
}
|
||||
|
||||
func (pong *SPVPong) DecodeCountry() string {
|
||||
return pb.Location_Country_name[int32(pong.country)]
|
||||
}
|
||||
|
||||
func (pong *SPVPong) DecodeProtocolVersion() int {
|
||||
return int(pong.protocolVersion)
|
||||
}
|
||||
|
||||
func (pong *SPVPong) DecodeHeight() int {
|
||||
return int(pong.height)
|
||||
}
|
||||
|
||||
func (pong *SPVPong) DecodeTip() []byte {
|
||||
return pong.tip
|
||||
}
|
||||
|
||||
func (pong *SPVPong) DecodeFlags() byte {
|
||||
return pong.flags
|
||||
}
|
||||
|
||||
// UDPPing sends a ping over udp to another hub and returns the ip address of
|
||||
// this hub.
|
||||
func UDPPing(ip, port string) (*SPVPong, error) {
|
||||
address := ip + ":" + port
|
||||
addr, err := net.ResolveUDPAddr("udp", address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conn, err := net.DialUDP("udp", nil, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer conn.Close()
|
||||
|
||||
_, err = conn.Write(encodeSPVPing())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buffer := make([]byte, maxBufferSize)
|
||||
deadline := time.Now().Add(time.Second)
|
||||
err = conn.SetReadDeadline(deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n, _, err := conn.ReadFromUDP(buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pong := decodeSPVPong(buffer[:n])
|
||||
|
||||
if pong == nil {
|
||||
return nil, fmt.Errorf("Pong decoding failed")
|
||||
}
|
||||
|
||||
return pong, nil
|
||||
}
|
||||
|
||||
// UDPServer is a goroutine that starts an udp server that implements the hubs
|
||||
// Ping/Pong protocol to find out about each other without making full TCP
|
||||
// connections.
|
||||
func (s *Server) UDPServer(port int) error {
|
||||
address := ":" + strconv.Itoa(port)
|
||||
|
||||
tip := make([]byte, 32)
|
||||
addr, err := net.ResolveUDPAddr("udp", address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer conn.Close()
|
||||
|
||||
buffer := make([]byte, maxBufferSize)
|
||||
for {
|
||||
//TODO verify ping
|
||||
_, addr, err := conn.ReadFromUDP(buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sAddr := addr.IP.String()
|
||||
pong := makeSPVPong(defaultFlags|availableFlag, 0, tip, sAddr, s.Args.Country)
|
||||
data := pong.Encode()
|
||||
|
||||
_, err = conn.WriteToUDP(data, addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
85
server/udp_test.go
Normal file
85
server/udp_test.go
Normal file
|
@ -0,0 +1,85 @@
|
|||
package server_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
server "github.com/lbryio/herald.go/server"
|
||||
)
|
||||
|
||||
// TestUDPPing tests UDPPing correctness against prod server.
|
||||
func TestUDPPing(t *testing.T) {
|
||||
args := server.MakeDefaultTestArgs()
|
||||
args.DisableStartUDP = true
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
wantIP string
|
||||
wantCountry string
|
||||
wantProtocolVersion int
|
||||
wantHeightMin int
|
||||
wantFlags byte
|
||||
}{
|
||||
{
|
||||
name: "Correctly parse information from production server.",
|
||||
wantIP: "SETME",
|
||||
wantCountry: "US",
|
||||
wantProtocolVersion: 1,
|
||||
wantHeightMin: 1060000,
|
||||
wantFlags: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
toAddr := "spv17.lbry.com"
|
||||
toPort := "50001"
|
||||
|
||||
pong, err := server.UDPPing(toAddr, toPort)
|
||||
if err != nil {
|
||||
t.Skipf("ping failed: %v", err)
|
||||
}
|
||||
gotCountry := pong.DecodeCountry()
|
||||
|
||||
res, err := exec.Command("dig", "@resolver4.opendns.com", "myip.opendns.com", "+short").Output()
|
||||
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
digIP := strings.TrimSpace(string(res))
|
||||
udpIP := pong.DecodeAddress().String()
|
||||
tt.wantIP = digIP
|
||||
|
||||
log.Println("Height:", pong.DecodeHeight())
|
||||
log.Printf("Flags: %x\n", pong.DecodeFlags())
|
||||
log.Println("ProtocolVersion:", pong.DecodeProtocolVersion())
|
||||
log.Printf("Tip: %x\n", pong.DecodeTip())
|
||||
|
||||
gotHeight := pong.DecodeHeight()
|
||||
gotProtocolVersion := pong.DecodeProtocolVersion()
|
||||
gotFlags := pong.DecodeFlags()
|
||||
gotIP := udpIP
|
||||
|
||||
if gotIP != tt.wantIP {
|
||||
t.Errorf("ip: got: '%s', want: '%s'\n", gotIP, tt.wantIP)
|
||||
}
|
||||
if gotCountry != tt.wantCountry {
|
||||
t.Errorf("country: got: '%s', want: '%s'\n", gotCountry, tt.wantCountry)
|
||||
}
|
||||
if gotHeight < tt.wantHeightMin {
|
||||
t.Errorf("height: got: %d, want >=: %d\n", gotHeight, tt.wantHeightMin)
|
||||
}
|
||||
if gotProtocolVersion != tt.wantProtocolVersion {
|
||||
t.Errorf("protocolVersion: got: %d, want: %d\n", gotProtocolVersion, tt.wantProtocolVersion)
|
||||
}
|
||||
if gotFlags != tt.wantFlags {
|
||||
t.Errorf("flags: got: %d, want: %d\n", gotFlags, tt.wantFlags)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
73
signal.go
Normal file
73
signal.go
Normal file
|
@ -0,0 +1,73 @@
|
|||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/lbryio/lbry.go/v3/extras/stop"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// shutdownRequestChannel is used to initiate shutdown from one of the
|
||||
// subsystems using the same code paths as when an interrupt signal is received.
|
||||
var shutdownRequestChannel = make(stop.Chan)
|
||||
|
||||
// interruptSignals defines the default signals to catch in order to do a proper
|
||||
// shutdown. This may be modified during init depending on the platform.
|
||||
var interruptSignals = []os.Signal{os.Interrupt}
|
||||
|
||||
// interruptListener listens for OS Signals such as SIGINT (Ctrl+C) and shutdown
|
||||
// requests from shutdownRequestChannel. It returns a channel that is closed
|
||||
// when either signal is received.
|
||||
func interruptListener() <-chan struct{} {
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
interruptChannel := make(chan os.Signal, 1)
|
||||
signal.Notify(interruptChannel, interruptSignals...)
|
||||
|
||||
// Listen for initial shutdown signal and close the returned
|
||||
// channel to notify the caller.
|
||||
select {
|
||||
case sig := <-interruptChannel:
|
||||
log.Infof("Received signal (%s). Shutting down...",
|
||||
sig)
|
||||
|
||||
case <-shutdownRequestChannel:
|
||||
log.Info("Shutdown requested. Shutting down...")
|
||||
}
|
||||
close(c)
|
||||
|
||||
// Listen for repeated signals and display a message so the user
|
||||
// knows the shutdown is in progress and the process is not
|
||||
// hung.
|
||||
for {
|
||||
select {
|
||||
case sig := <-interruptChannel:
|
||||
log.Infof("Received signal (%s). Already "+
|
||||
"shutting down...", sig)
|
||||
case <-shutdownRequestChannel:
|
||||
log.Info("Shutdown requested. Already " +
|
||||
"shutting down...")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// interruptRequested returns true when the channel returned by
|
||||
// interruptListener was closed. This simplifies early shutdown slightly since
|
||||
// the caller can just use an if statement instead of a select.
|
||||
func interruptRequested(interrupted <-chan struct{}) bool {
|
||||
select {
|
||||
case <-interrupted:
|
||||
return true
|
||||
default:
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
18
signalsigterm.go
Normal file
18
signalsigterm.go
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// initsignals sets the signals to be caught by the signal handler
|
||||
func initsignals() {
|
||||
interruptSignals = []os.Signal{os.Interrupt, syscall.SIGTERM}
|
||||
}
|
11
testdata/B.csv
vendored
Normal file
11
testdata/B.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
B,
|
||||
4200000031a2e262d60074f07330d7187907e5b02be8f9b3c60cdc03d776314912,0200000001d922db1c8020a8a101ccab3a9dff62eccd8660c9351bf91af8dc481544395821010000006a473044022047bbe0eec4931aa332ac089dfb56bac095e4e2f11258f95f8dc11f2d915399b802207cbe15f5c1ad882ef7cf7c987c5380a56613c7bbfd56e4a8e4161a46fd522a84012103e1a29d4cb998f7a6a165f1a2aaa524f319a59beb4f9336f542175ddecc6cac03feffffff0200ca9a3b000000001976a914028b4111c923a411ba165760cacea097b9b0b77588ac567d1329400000001976a914bc3826102bebb5ab7d88cb080b5234b67aac787888ac4ad50c00
|
||||
420000004e91edda0f9cd3bcef9565a31e6bbbd34c731483e03ec7d8819158ac30,02000000018fb565983ddfc193da15f68414712779a7de703babef37c1b30fb8956abbd034010000006a4730440220011f6c22ecafdfc03fde256c0676b7d840e8aacd9e75a02fde40862451d97b7f022072a8093343d900d5543a00e648352a7c7cf2389d89a5863d33c34e44f3acaa590121036be4ec00fc805765e824a604fecc376cdc33cd3a8d175fad63980e16e79d8809feffffff026e39190d000000001976a9146911aebb1ec343bb432aa3edf9f08e4f110a541e88ac40787d01000000001976a914ea2faaf77fab748cddca231f9d528c43c2f3a78e88ac02340f00
|
||||
420000008070865693cd82ed0f59896e34973adbff0583fb8a1293919591446075,02000000017c0ac8ee47ebdcbf4c36a0f1ff0a77347658099b734e65bd662ca510b91385b2000000006a473044022023da5a18003e6db9144b691ea77cb267b911678e15cecab4d22c52b4ced6ad3302203ba4e2cb7d1ad2c88aa89827767cea76c1805a366598fc53513807f3e5db78cd0121022c949b389ff428dc58ca8f0803dab7cde26cbc0a12b26ef758853c1266f9e543feffffff023dabb942020000001976a91440ee9a09269bdd2f3532b8bc18d792cd435de79488ac002d3101000000001976a914b4158a6a1b7505068bc5df3b0f76a02cc38014d788ac9a880f00
|
||||
420000009c24d4d9187749a1f8f6d6c0f92a5e98817f5efcd427a5593344a45a2e,0200000001bda00a971cebc8727690779e84d00ba4d7393032ff0d75e11909c3ede2c7f48c010000006a473044022077d431faca3342ec9cb7701b96490cb67e5b1b3c70efb8bf6f92aa183321c71202207a9de1cb8508264f97bf2cae258571acef93d520d4743fa71b332684f99c082d0121030c29115cd040efdb5b3617e180074e78c2c9a75613c32701241ad9f8985f3abcfeffffff0240787d01000000001976a914c154d27978ee4c3002ad934e858e63c58550b1ae88acc47d6ab0070000001976a914d1ea863ff9311b80837f43732d4f26b7e183862088aca36c0d00
|
||||
42000000cee66b136a85596df24ece60bbd1392f70204fd2f144f059e5195ee3c9,0100000001f5013c21bd5c858f7eed0df58e9471e148d8924962d7cadd1b775358029076f9010000006a47304402205348d7b22894f7ed8f8d44fa73a65ea9d8124e4b0b88d816b0f8b6714b897da702204093e051c09c86c33b6dcd4285670a1d442622f5b687ac028514e5b44718fd4e012103c633b45b4d49c485c67b7d96920fa6a26ae30a07c0fb9f0ae8f85c00a19b8825feffffff0273e25769000000001976a914999d5b0e3d5efcf601c711127b91841afbf5c37a88acf6268a0d090000001976a914816f8bd268c614b3ae8b564fa531ece6e1d0297188ac522f0200
|
||||
42000000e0bf96accd4eda4d871c5bc8c0ebb14509b896a867095f9b419f9b04f2,0200000001d17c8a33cddad3b6a1f34248e9457b359c59e80a5541c017fd5ab1c8b21f0d2b010000006a473044022036102bd4d4e28909a17e189d23d640fd0f1111f45eb5639f68da70317e04831602201a317ae0af0930d27c280a91ced77c4c6dccda7eb297f7bf73023106762e88c8012102913606374e2b055d06fb4fb8250ca65ca66ecf4154916722e11cf3487555b2f0feffffff0240787d01000000001976a914c13e9599dafeb20e2e7cf994941d44e1e1e88ce588ace9cba36d020000001976a9149db9398216b2ad3726aa8d5a3391a20d460f494388ac7ac40e00
|
||||
420000011daab2a9f45cca3d0bac6fee69324af4757b1c4e8c3a362efbb6e8a09d,02000000019161a7c7a985bd44afafebe044df38df82ecdba417ea3244235572257b201068000000006a473044022077ed0bb436d9a7bd1a83f680cef81d9b8cbd2b74de310f32859c49077e009f90022020faea4599352a9d42c50b7a4eeeda1ba31121e5a0817ae0b650f35ae79a06d6012102ad6c243af5779b873b171b79cbe65cda7cf8c5d35865ecaf796912e888150c3cfeffffff02f8c2bc74040000001976a91420195c8e1c81eb3bb03452d855591d4099db08d588ac00c2eb0b000000001976a9148bc70b6256cff9f4e645bb1431548aad757ba56388ac1ec30b00
|
||||
4200000150116856ce8ce06604b9584e61afb3b613a8a5a512ab260e7f0cbe5496,010000000183843af3333d4b751745b90321b1e7c9f32b23299ac6ed160ef1d7f079a7b5f8000000006a4730440220761f40ffdc87d217f84adb43132a78f9db3a16bf9303876c6a871abb253823be02202357dadaed6701134e68f2fe41bb883da82d550c431fb1f36ef6b97ad2c3e19e012103d77e3266f1ed4533e3bd5464aebedbf0a3d2b7ec677cca6996a8548d73e50af6feffffff1147164d1f000000001976a91466f157283bbc18c616f825c2ad2d9c6108a0e96f88acd680a626000000001976a914ae0ee727bf91e4f9f70664a8a0936d7ff81d3d5888ac9b425a1e000000001976a9146fd65fe059c85fe1cb704efc80c81f0275ab7cb188ac71222212000000001976a914c2daa42a9414bc6b70b395b79d8b54556c918a2288ac0549ce27000000001976a91438b1ce845765a1a749117bbd1c6fea9adcf2784088acb420011e000000001976a914213a5c1975e979b60a24bf72809f3d7205ee925888ac58c50621000000001976a914fc6a1f55c1bb2dd382df728d75c893406bfadb5d88ac9183621e000000001976a914ad70928b870c76b899cf9d66f1ee5b258976721088ac19eb3a0c000000001976a91421ba33ab978890434c8cfd31db9789fe5af0e73c88acb755ea1e000000001976a914c2003ddf455851be985e349830dc0d7dc619205c88ac125a3f27000000001976a91411b58fd67f6b389a5bfd592db92fdcfe613214aa88acd674713f000000001976a914514f9d058cad9251b816ae609b847582e759519188ac205e3007000000001976a91417066054003dd148297c6f1922db8bcbe185ba3b88acb562931f000000001976a9149481776b356d441567601fc713e9516ccf85ebb688ace2c8184e000000001976a914dd705e9e15c0c0e0a714f6b79b8618aa840f9b2c88acc3fb0ba6050000001976a91419bef2dcb05c86662c08ef2848819a685dd0dcdc88ac5d258c06000000001976a9142479394dc4ef5e61bcb9c60ef8f3f27dffa57fdb88ac20fb0700
|
||||
420000015ba8df5c3ed80d0fc364136de02e3ba9d9550cbb1ecef03b97fcdf0621,0100000001e50a9a7f3bcd2d4c0c3dc60d96b3e008a247e80e0b114d34b38399ac6ed6ea620c0000006b4830450221009b8beec62aac1e7096071b3fbbaa4cc1846ef8781ea8a9a2a8d6c1d918f359b6022074f04dd3da2c67e2e293c4a1eaf69931512a060af6665740e6084f03a6fbff22012103b5f96b51c7b567b747b9cc14e46daab5200acc37052e149e213939e103f69aa6ffffffff0134b7f505000000001976a914e75d6c70d420ffaef75def1a94469f2dc6546a3588ac00000000
|
||||
42000001d4a53fc92321415631862a791f8680241ed172e579534713f68a6869ba,01000000025b09f3192aa1f9e436b4b282143f0668acd6fb7c90cdbfef0a69d84d036e702a000000006b483045022100df7687542decdaaabf1bb67b1f6809a697f54264a09c8e9dc33c5527fd6ef16302200fab534e44fad5d86a65a645a0b8ebfb9644dd3ecac65b8745706076a430c2dc0121029d7cd706d4da4a71441a1dc4a6109fdb481112d749f19174cb025e95bfafbc94ffffffff5b09f3192aa1f9e436b4b282143f0668acd6fb7c90cdbfef0a69d84d036e702a010000006a47304402204849b7a22292f1dd6af210da076b523330640e5bb31673d3dfd394067d17b23902201813fd1f1a91399b64b0cb0c309d41aa2c066f69403ca90ca046af6c609182810121029d7cd706d4da4a71441a1dc4a6109fdb481112d749f19174cb025e95bfafbc94ffffffff0240420f0000000000c6b70b406372617a7964696e676f14f9407ef9b87c18020796dd434fad2bca5c727acc4c8700125a0a583056301006072a8648ce3d020106052b8104000a03420004b906153f96ff0567979424eca1a19691df7d23be28901c248b6fe7986cf740f7631f5a36f6d03d26bd97e37e83e1320808400202dd4dbad81467f1570ff763d152282a2668747470733a2f2f737065652e63682f312f623530383737333436313932626433382e6a70676d6d76a91439ed14bafdb75ee4f06f134de8aee0b845c3bd3588ac40a01000000000001976a91439ed14bafdb75ee4f06f134de8aee0b845c3bd3588ac00000000
|
|
11
testdata/C.csv
vendored
Normal file
11
testdata/C.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
C,
|
||||
4300000000,63f4346a4db34fdfce29a70f5e8d11f065f6b91602b7036c7f22f3a03b28899c
|
||||
4300000001,246cb85843ac936d55388f2ff288b011add5b1b20cca9cfd19a403ca2c9ecbde
|
||||
4300000002,0044e1258b865d262587c28ff98853bc52bb31266230c1c648cc9004047a5428
|
||||
4300000003,bbf8980e3f7604896821203bf62f97f311124da1fbb95bf523fcfdb356ad19c9
|
||||
4300000004,1a650b9b7b9d132e257ff6b336ba7cd96b1796357c4fc8dd7d0bd1ff1de057d5
|
||||
4300000005,6d694b93a2bb5ac23a13ed6749a789ca751cf73d5982c459e0cd9d5d303da74c
|
||||
4300000006,b57808c188b7315583cf120fe89de923583bc7a8ebff03189145b86bf859b21b
|
||||
4300000007,a6a5b330e816242d54c8586ba9b6d63c19d921171ef3d4525b8ffc635742e83a
|
||||
4300000008,b8447f415279dffe8a09afe6f6d5e335a2f6911fce8e1d1866723d5e5e8a5306
|
||||
4300000009,558daee5a4a55fe03d912e35c7b6b0bc19ece82fd5bcb685bc36f2bc381babfd
|
|
11
testdata/D.csv
vendored
Normal file
11
testdata/D.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
D,
|
||||
44000100ffffffffffffd8ef002741130000,a51d5c567412654e6d741114fea6fb851dec7380
|
||||
44000101ffffffffffffd8ef002741140001,11158037afca9c2efabc3dff55e352bf1f5634c5
|
||||
44000102ffffffffffffd8ef002741150000,a4a575934de77d8ec8589595d8cd91857e3cf5ba
|
||||
44000103ffffffffffffd8ef002741160001,f595a21fb597bd030defefda3df9f8f4a3e0cb86
|
||||
44000104ffffffffffffd8ef002741170000,682ccb0518a6bd00c955949d9ef330d3ac18cb80
|
||||
44000105ffffffffffffd8ef002741180000,078a435851bf97c5cc36e8b03e3208a30d27679f
|
||||
44000106ffffffffffffd8ef002741190000,1a4a75246a766cf21a629f619bc5bcb531de7a5a
|
||||
44000107ffffffffffffd8ef0027411a0000,b144ad496b29b9c12c316f319773adcdd4c9bce2
|
||||
44000108ffffffffffffd8ef0027411b0001,af2a09232fc6bf664088d65da42fe0345b458960
|
||||
44000109ffffffffffffd8ef0027411c0001,7b2ab15758c519116fb9ba9331a3b9ee7530831f
|
|
11
testdata/E.csv
vendored
Normal file
11
testdata/E.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
E,
|
||||
45000000a420c44374f4f399ab4807fa1901eefc87,0297ec2100000297ec21000000000000000f42400100246167656e63652d64c3a974727569742c2d6e6f7576656175782d736b696e732d6c65616b
|
||||
45000000c27eef5ea69e0d73f118826c7e326bb469,00371d66000000371d660000000000001dcd650001000e4d696e696174757265486f757365
|
||||
4500000110e40894573f528c393fbcec7a472ec853,01516b32000001516b3200000000000000989680010021696f2d3137372d4e6f616d2d43686f6d736b792d6f6e2d434f494e54454c50524f
|
||||
4500000324e40fcb63a0b517a3660645e9bd99244a,030bb6ba0000030bb6ba000000000000000f424001001b436f6e616e2d4578696c65732d526169642d4561726c792d457261
|
||||
45000003d1538a0f19f5cd4bc1a62cc294f5c89934,011c7c990000011c7c99000000000000000f424001001130322d636172726167652d68616e646c65
|
||||
45000008d47beeff8325e795a8604226145b01702b,02dbb2a2000002dbb2a2000000000000000186a001001039643336363661336161313231376138
|
||||
4500000906499e073e94370ceff37cb21c28212444,0369842d00000369842d000000000000000186a001001033333465356465363139306534323466
|
||||
45000009c3172e034a255f3c03566dca84bb9f046a,0225c69c000002251b0f0000000000000007a120010028617574686f722d73746f726965732d706f64636173742d657069736f64652d3734332d6b6172656e
|
||||
45000009ca6e0caaaef16872b4bd4f6f1b8c2363e2,02b16956000002b16956000000000000000f4240010027554e2d504f55522d43454e542d28312d292d28536f72616c2c2d4162c3a963c3a9646169726529
|
||||
4500000ad9ded2e15d18987900d09e9b29ef33d03e,02c972b3000002c972b3000000000000000186a0010006313331333333
|
|
2
testdata/E_resolve.csv
vendored
Normal file
2
testdata/E_resolve.csv
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
E,,
|
||||
E,452556ed1cab9d17f2a9392030a9ad7f5d138f11bd,006284e300000061ec7c0000000000000007a1200000134053747978686578656e68616d6d6572363636
|
|
11
testdata/F.csv
vendored
Normal file
11
testdata/F.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
F,
|
||||
460001000161002741130000,002741130000
|
||||
46000100026135002741130000,002741130000
|
||||
4600010003613531002741130000,002741130000
|
||||
460001000461353164002741130000,002741130000
|
||||
46000100056135316435002741130000,002741130000
|
||||
4600010006613531643563002741130000,002741130000
|
||||
460001000761353164356335002741130000,002741130000
|
||||
46000100086135316435633536002741130000,002741130000
|
||||
4600010009613531643563353637002741130000,002741130000
|
||||
460001000a61353164356335363734002741130000,002741130000
|
|
42
testdata/FULL_resolve.csv
vendored
Normal file
42
testdata/FULL_resolve.csv
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
EFGIJPRSVWXZas,,
|
||||
E,452556ed1cab9d17f2a9392030a9ad7f5d138f11bd,006284e300000061ec7c0000000000000007a1200000134053747978686578656e68616d6d6572363636
|
||||
F,4600134073747978686578656e68616d6d657236363601320061ec7c0000,006284e30000
|
||||
F,4600134073747978686578656e68616d6d6572363636013503e4d2e60000,03e4d2e60000
|
||||
F,4600134073747978686578656e68616d6d657236363601630382eee90000,0382eee90000
|
||||
F,4600134073747978686578656e68616d6d65723636360232350061ec7c0000,006284e30000
|
||||
F,4600134073747978686578656e68616d6d657236363602356603e4d2e60000,03e4d2e60000
|
||||
F,4600134073747978686578656e68616d6d65723636360263330382eee90000,0382eee90000
|
||||
F,4600134073747978686578656e68616d6d6572363636033235350061ec7c0000,006284e30000
|
||||
F,4600134073747978686578656e68616d6d65723636360335666103e4d2e60000,03e4d2e60000
|
||||
F,4600134073747978686578656e68616d6d6572363636036333610382eee90000,0382eee90000
|
||||
F,4600134073747978686578656e68616d6d657236363604323535360061ec7c0000,006284e30000
|
||||
G,47006284e30000,2556ed1cab9d17f2a9392030a9ad7f5d138f11bd00134073747978686578656e68616d6d6572363636
|
||||
I,499a0ed686ecdad9b6cb965c4d6681c02f0bbc66a60369e2b20000,2556ed1cab9d17f2a9392030a9ad7f5d138f11bd
|
||||
J,4a2556ed1cab9d17f2a9392030a9ad7f5d138f11bd0013612d73747265616d2d696e2d7665726d6f6e740069a2570000,ee29ca5c44313a2827fd35252b72557334635749
|
||||
P,5000134073747978686578656e68616d6d6572363636,2556ed1cab9d17f2a9392030a9ad7f5d138f11bd000a6471
|
||||
R,5201006284e30000,000a6b652556ed1cab9d17f2a9392030a9ad7f5d138f11bd00134073747978686578656e68616d6d6572363636
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a6b67006286030000,0000007615cbad28
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a706a0063105c0000,000000000bebc200
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a73ea006367550000,0000000005f5e100
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a7d63006469750000,0000000db0b7c894
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a7ebf00648c480000,00000000b2d05e00
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a810e0064ccc00000,000000003b9aca00
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a825b006503cf0000,00000002bf52c92c
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a88930066814a0000,00000000dc887a34
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a88f900669d240000,0000000005f5e100
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a88f900669d260000,000000001dcd6500
|
||||
V,56000009ca6e0caaaef16872b4bd4f6f1b8c2363e2,dbdfb6cd5e83baf342eaab8b19662ed0c71aae9a
|
||||
W,572556ed1cab9d17f2a9392030a9ad7f5d138f11bd00812cb90000,fa3a1c918fafd094083240fd54a3c8577b7f1094
|
||||
W,572556ed1cab9d17f2a9392030a9ad7f5d138f11bd00812ce70000,1b845565203eca16cb6135e6fb70d4d2cec4ee9b
|
||||
W,572556ed1cab9d17f2a9392030a9ad7f5d138f11bd00812ce90000,c2bfc30ebdf2511a2a9a22b463f80d1f751ee38c
|
||||
W,572556ed1cab9d17f2a9392030a9ad7f5d138f11bd00812d3f0000,f9c6adebfb970aa9ab1cac21f82eb007b2421a20
|
||||
W,572556ed1cab9d17f2a9392030a9ad7f5d138f11bd00e7b0800000,a3cfb4a2a4b7efda98d5f680d6dbc30b4ebb328b
|
||||
W,57255761310145baa958b5587d9b5571423e5a0d3c0208ba650000,2ae0dadba7d5931105ca2e5cb1c12ec61100b9b5
|
||||
W,57255761310145baa958b5587d9b5571423e5a0d3c0208dc150000,a9389febb41d9a1c63deef395273b903caf4a18d
|
||||
W,57255761310145baa958b5587d9b5571423e5a0d3c0208e3eb0000,68ab6c0cdd615540062b6f6d637f8b47ab0e615b
|
||||
W,57255761310145baa958b5587d9b5571423e5a0d3c0208f7210000,3d8ee0471ae8751e016b62dca9cee5cfebc9b30d
|
||||
W,57255761310145baa958b5587d9b5571423e5a0d3c02090a7b0000,0a059f3e94ed2c5a9d43986f0f14cf29f02d01ce
|
||||
X,58006284e3,54e14ff0c404c29b3d39ae4d249435f167d5cd4ce5a428ecb745b3df1c8e3dde
|
||||
Z,5a2556ed1cab9d17f2a9392030a9ad7f5d138f11bd,00000e56
|
||||
a,612556ed1cab9d17f2a9392030a9ad7f5d138f11bd,000007df178c203c
|
||||
s,73,9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f46300105bec03f782718ccd27260ce980e7d3d0b5c5f7be1517027b68104109128a34d1cc562f32008e00105bef0014f734000700105befffffffffffffffff00105bec
|
|
11
testdata/F_cat.csv
vendored
Normal file
11
testdata/F_cat.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
F,,
|
||||
F,460003636174013000201c740000,00201c740000
|
||||
F,4600036361740130002d4eb10000,002d4eb10000
|
||||
F,46000363617401300035f0460000,0035f0460000
|
||||
F,460003636174013000817df60000,00817df60000
|
||||
F,46000363617401300090d0f30000,009102470000
|
||||
F,460003636174013000a009af0000,00a009af0000
|
||||
F,460003636174013000a082a60000,00a082a60000
|
||||
F,460003636174013000a6a0d60000,00f2c75d0000
|
||||
F,460003636174013000a9111e0000,00b72e480000
|
||||
F,460003636174013000ab038e0000,00ab0ba60000
|
|
11
testdata/F_resolve.csv
vendored
Normal file
11
testdata/F_resolve.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
F,,
|
||||
F,4600134073747978686578656e68616d6d657236363601320061ec7c0000,006284e30000
|
||||
F,4600134073747978686578656e68616d6d6572363636013503e4d2e60000,03e4d2e60000
|
||||
F,4600134073747978686578656e68616d6d657236363601630382eee90000,0382eee90000
|
||||
F,4600134073747978686578656e68616d6d65723636360232350061ec7c0000,006284e30000
|
||||
F,4600134073747978686578656e68616d6d657236363602356603e4d2e60000,03e4d2e60000
|
||||
F,4600134073747978686578656e68616d6d65723636360263330382eee90000,0382eee90000
|
||||
F,4600134073747978686578656e68616d6d6572363636033235350061ec7c0000,006284e30000
|
||||
F,4600134073747978686578656e68616d6d65723636360335666103e4d2e60000,03e4d2e60000
|
||||
F,4600134073747978686578656e68616d6d6572363636036333610382eee90000,0382eee90000
|
||||
F,4600134073747978686578656e68616d6d657236363604323535360061ec7c0000,006284e30000
|
|
11
testdata/G.csv
vendored
Normal file
11
testdata/G.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
G,
|
||||
4700162aa70000,c78ac4c326cd43cdc0c844b7cea13659449ab3e40015746573742d70686f746f2d7374726173626f757267
|
||||
4700162f600000,ebf95f7fdb89db5467bb1b88ea3b0f0f7ee5ce360003636e63
|
||||
47001630960000,a6f91a86837ab84a4cf0d2dcbe94704a528cf820000f776f6e646572776f6d616e31393933
|
||||
47001635e60000,9673cc2a1aac64d7b2742705abfb09fca30d7e0500056d6d61736b
|
||||
47001638a80000,c39342066646dc50f1a9954b41684d157b035dac00036f6e65
|
||||
47001645ef0001,4689c1ccb4420309f93ab98799b28c49fa4d3809000a65617379737472656574
|
||||
470016529a0000,f1628d66ae52295590b72b9a0b3a3527642a532600137465737470756230332d32312d323031372d32
|
||||
470016529d0000,a4c61ced261ab571bdb3410ae140bec6c31f14ce00117465737470756230332d32312d32303137
|
||||
47001655960000,2327bcb6d7578a2669e416b5aa185fe14ee8e03e00056569676874
|
||||
47001664200000,f69099600bdca9b062ba60432dba3c0ca2241167002c686973746f72792d6f662d6672696564726963682d69692d6f662d707275737369612d766f6c756d652d3137
|
|
11
testdata/G_2.csv
vendored
Normal file
11
testdata/G_2.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
G,,
|
||||
G,4700162aa70000,c78ac4c326cd43cdc0c844b7cea13659449ab3e40015746573742d70686f746f2d7374726173626f757267
|
||||
G,4700162f600000,ebf95f7fdb89db5467bb1b88ea3b0f0f7ee5ce360003636e63
|
||||
G,47001630960000,a6f91a86837ab84a4cf0d2dcbe94704a528cf820000f776f6e646572776f6d616e31393933
|
||||
G,47001635e60000,9673cc2a1aac64d7b2742705abfb09fca30d7e0500056d6d61736b
|
||||
G,47001638a80000,c39342066646dc50f1a9954b41684d157b035dac00036f6e65
|
||||
G,47001645ef0001,4689c1ccb4420309f93ab98799b28c49fa4d3809000a65617379737472656574
|
||||
G,470016529a0000,f1628d66ae52295590b72b9a0b3a3527642a532600137465737470756230332d32312d323031372d32
|
||||
G,470016529d0000,a4c61ced261ab571bdb3410ae140bec6c31f14ce00117465737470756230332d32312d32303137
|
||||
G,47001655960000,2327bcb6d7578a2669e416b5aa185fe14ee8e03e00056569676874
|
||||
G,47001664200000,f69099600bdca9b062ba60432dba3c0ca2241167002c686973746f72792d6f662d6672696564726963682d69692d6f662d707275737369612d766f6c756d652d3137
|
|
2
testdata/G_resolve.csv
vendored
Normal file
2
testdata/G_resolve.csv
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
G,,
|
||||
G,47006284e30000,2556ed1cab9d17f2a9392030a9ad7f5d138f11bd00134073747978686578656e68616d6d6572363636
|
|
11
testdata/H.csv
vendored
Normal file
11
testdata/H.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
H,
|
||||
4800000000,010000000000000000000000000000000000000000000000000000000000000000000000cc59e59ff97ac092b55e423aa5495151ed6fb80570a5bb78cd5bd1c3821c21b8010000000000000000000000000000000000000000000000000000000000000033193156ffff001f07050000
|
||||
4800000001,0000002063f4346a4db34fdfce29a70f5e8d11f065f6b91602b7036c7f22f3a03b28899cba888e2f9c037f831046f8ad09f6d378f79c728d003b177a64d29621f481da5d01000000000000000000000000000000000000000000000000000000000000003c406b5746e1001f5b4f0000
|
||||
4800000002,00000020246cb85843ac936d55388f2ff288b011add5b1b20cca9cfd19a403ca2c9ecbde09d8734d81b5f2eb1b653caf17491544ddfbc72f2f4c0c3f22a3362db5ba9d4701000000000000000000000000000000000000000000000000000000000000003d406b57ffff001f4ff20000
|
||||
4800000003,000000200044e1258b865d262587c28ff98853bc52bb31266230c1c648cc9004047a5428e285dbf24334585b9a924536a717160ee185a86d1eeb7b19684538685eca761a01000000000000000000000000000000000000000000000000000000000000003d406b5746e1001fce9c0100
|
||||
4800000004,00000020bbf8980e3f7604896821203bf62f97f311124da1fbb95bf523fcfdb356ad19c9d83cf1408debbd631950b7a95b0c940772119cd8a615a3d44601568713fec80c01000000000000000000000000000000000000000000000000000000000000003e406b573dc6001fec7b0000
|
||||
4800000005,000000201a650b9b7b9d132e257ff6b336ba7cd96b1796357c4fc8dd7d0bd1ff1de057d547638e54178dbdddf2e81a3b7566860e5264df6066755f9760a893f5caecc57901000000000000000000000000000000000000000000000000000000000000003e406b5773ae001fcf770000
|
||||
4800000006,000000206d694b93a2bb5ac23a13ed6749a789ca751cf73d5982c459e0cd9d5d303da74cec91627e0dba856b933983425d7f72958e8f974682632a0fa2acee9cfd81940101000000000000000000000000000000000000000000000000000000000000003e406b578399001f225c0100
|
||||
4800000007,00000020b57808c188b7315583cf120fe89de923583bc7a8ebff03189145b86bf859b21ba3c4a19948a1263722c45c5601fd10a7aea7cf73bfa45e060508f109155e80ab01000000000000000000000000000000000000000000000000000000000000003f406b571787001f08160700
|
||||
4800000008,00000020a6a5b330e816242d54c8586ba9b6d63c19d921171ef3d4525b8ffc635742e83a0fc2da46cf0de0057c1b9fc93d997105ff6cf2c8c43269b446c1dbf5ac18be8c010000000000000000000000000000000000000000000000000000000000000040406b570ae1761edd8f0300
|
||||
4800000009,00000020b8447f415279dffe8a09afe6f6d5e335a2f6911fce8e1d1866723d5e5e8a53067356a733f87e592ea133328792dd9d676ed83771c8ff0f519928ce752f159ba6010000000000000000000000000000000000000000000000000000000000000040406b57139d681ed40d0000
|
|
11
testdata/I.csv
vendored
Normal file
11
testdata/I.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
I,
|
||||
49000000a420c44374f4f399ab4807fa1901eefc870297ec210000,a02a093c607b6772907f923cf15014397146874b
|
||||
49000000c27eef5ea69e0d73f118826c7e326bb46900371d660000,b237333ca0e44b0a91429a20420ed9bc6ea56a53
|
||||
4900000110e40894573f528c393fbcec7a472ec85301516b320000,f2a31cf5c4c3cd75a5c839d6b05947dfeb1970bb
|
||||
4900000324e40fcb63a0b517a3660645e9bd99244a030bb6ba0000,6b450166f56b44370d026ea2f70eb1be3454cd3f
|
||||
49000003d1538a0f19f5cd4bc1a62cc294f5c89934011c7c990000,ed6c15e48a78f5113fef78847525a5f664f76362
|
||||
49000008d47beeff8325e795a8604226145b01702b02dbb2a20000,f2cf43b86b9d70175dc22dbb9ff7806241d90780
|
||||
4900000906499e073e94370ceff37cb21c282124440369842d0000,f2cf43b86b9d70175dc22dbb9ff7806241d90780
|
||||
49000009c3172e034a255f3c03566dca84bb9f046a0225c69c0000,dd3944bc7ae2717606e8e3ae3561f83befe484ea
|
||||
49000009ca6e0caaaef16872b4bd4f6f1b8c2363e202b169560000,2fdbbf06aab844bb80ee6748244cadc5d698411e
|
||||
4900000ad9ded2e15d18987900d09e9b29ef33d03e02c972b30000,f6ac161e5f4100b6bb22544460b7dfc2af9b1ec3
|
|
2
testdata/I_resolve.csv
vendored
Normal file
2
testdata/I_resolve.csv
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
I,,
|
||||
I,499a0ed686ecdad9b6cb965c4d6681c02f0bbc66a60369e2b20000,2556ed1cab9d17f2a9392030a9ad7f5d138f11bd
|
|
11
testdata/J.csv
vendored
Normal file
11
testdata/J.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
J,
|
||||
4a00009f35397ada0476b04c67978ad081b50833ed0005676866676800201f7f0000,2febc9f39e70fac69ce7504dc7fb9523c9617c68
|
||||
4a0000cbef248847373c999de142bc2d7da4d014100013646f6d656e7a61696e2d657374616661646f720358fe5f0000,2b15c03bb512d84e7450b0fbdbc4db1f9b454137
|
||||
4a0000eff19123a0b3087a0f059a5e0c10d74377560010636176652d73746f72792d7974702d3102fb1edf0000,dc07a33c5e8db91e5f27ea6cfc6415677c834d34
|
||||
4a0000eff19123a0b3087a0f059a5e0c10d7437756001b636c6f646f72212d67616d652d6f662d7468726f6e65732d79747002fb1ed60000,c869ff2f82cc7d28d63cfe672de46042898d6a90
|
||||
4a0000eff19123a0b3087a0f059a5e0c10d74377560020636f6e636c6176652d6f662d7468652d63686f73656e2d28776f772d6266612902fb1ecf0000,2d3225c7800a8e8098aa63c9a2c5ddd6ac30f968
|
||||
4a0000eff19123a0b3087a0f059a5e0c10d7437756002077686f27732d746861742d706f6b656d6f6e2d697427732d636174657270696502fb1ed00000,627769e3f4f577261c3aec4addd05890747a5c8b
|
||||
4a0000eff19123a0b3087a0f059a5e0c10d7437756002077686f27732d746861742d706f6b656d6f6e2d697427732d70696b616368752102fb1ed40000,45d8a2b4f512f4eb8bf6409454975235343d4b40
|
||||
4a0000eff19123a0b3087a0f059a5e0c10d7437756002162726f636b2d74726965732d746f2d736176652d736861796d696e2d7573696e670341a2c40000,2c9d219c6ce4596cffb250950c3d94938f8df91b
|
||||
4a0000eff19123a0b3087a0f059a5e0c10d74377560021706d64322d6578706c6f726572732d6f662d736b792d706c61797468726f7567680315ca790000,be33e079423884c11969b3fd8f7586dee8e3a73f
|
||||
4a0000eff19123a0b3087a0f059a5e0c10d74377560021706f6b656d6f6e2d756e6974652d736561736f6e2d322d286c6f6c6e6f7065212903d81d740000,236c0428226580eaffee6beed1b89fdff7c4e582
|
|
2
testdata/J_resolve.csv
vendored
Normal file
2
testdata/J_resolve.csv
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
J,,
|
||||
J,4a2556ed1cab9d17f2a9392030a9ad7f5d138f11bd0013612d73747265616d2d696e2d7665726d6f6e740069a2570000,ee29ca5c44313a2827fd35252b72557334635749
|
|
11
testdata/K.csv
vendored
Normal file
11
testdata/K.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
K,
|
||||
4b00000324e40fcb63a0b517a3660645e9bd99244a030bc8a50000,0000000001312d00
|
||||
4b000023415fc7ba8a470f0cdf4a66bffacd5ba97902c6e2aa0126,0000000001ea252a
|
||||
4b000023415fc7ba8a470f0cdf4a66bffacd5ba97902c6e7d20151,0000000005be6f7e
|
||||
4b000023415fc7ba8a470f0cdf4a66bffacd5ba97902c6eeb6014d,0000000001eac106
|
||||
4b000023415fc7ba8a470f0cdf4a66bffacd5ba97902c6eed00037,0000000001eac106
|
||||
4b000023415fc7ba8a470f0cdf4a66bffacd5ba97902c6f4f00133,0000000001eac106
|
||||
4b000023415fc7ba8a470f0cdf4a66bffacd5ba97902c6f4f001e5,0000000001eac106
|
||||
4b000023415fc7ba8a470f0cdf4a66bffacd5ba97902c6f9b6013e,0000000001eac106
|
||||
4b000023415fc7ba8a470f0cdf4a66bffacd5ba97902c6f9b7012f,0000000003d5820c
|
||||
4b000023415fc7ba8a470f0cdf4a66bffacd5ba97902c6f9b9001c,0000000003d5820c
|
|
11
testdata/L.csv
vendored
Normal file
11
testdata/L.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
L,
|
||||
4c000059610000,04c7d5e2360f10ab8e28d5d831abb29b72cea3a8
|
||||
4c0000596d0000,04c7d5e2360f10ab8e28d5d831abb29b72cea3a8
|
||||
4c00029e100000,a467b70b0a9ddba924d0a115206fe7c1cb25a346
|
||||
4c000449380000,c6ddef5e005606bd816177c7f0cba2404c719131
|
||||
4c000864bb0000,32d4fc78396c239f5c1a0a041242eebb26367509
|
||||
4c000c380b0000,c2ba0ad053f45d77ae569a1b5c407bc213365fda
|
||||
4c001030b40000,467513d3a6eed0114964d751cd85ed49c8e3af4e
|
||||
4c0011116c0000,d565af1863ccf28e9d90f8730e40b5ee1d72258d
|
||||
4c001111e90000,1e81ee06e8293438fbcbe9196494a69be0426c8b
|
||||
4c0016c4180001,49cb931d20a96e17348aabbc28b5838e1a650d8c
|
|
2
testdata/M.csv
vendored
Normal file
2
testdata/M.csv
vendored
Normal file
File diff suppressed because one or more lines are too long
11
testdata/N.csv
vendored
Normal file
11
testdata/N.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
N,
|
||||
4e00000031a2e262d60074f07330d7187907e5b02be8f9b3c60cdc03d776314912,01376ce8
|
||||
4e0000004e91edda0f9cd3bcef9565a31e6bbbd34c731483e03ec7d8819158ac30,030ee002
|
||||
4e0000008070865693cd82ed0f59896e34973adbff0583fb8a1293919591446075,03518017
|
||||
4e0000009c24d4d9187749a1f8f6d6c0f92a5e98817f5efcd427a5593344a45a2e,019436d7
|
||||
4e000000cee66b136a85596df24ece60bbd1392f70204fd2f144f059e5195ee3c9,00169e07
|
||||
4e000000e0bf96accd4eda4d871c5bc8c0ebb14509b896a867095f9b419f9b04f2,02bcc37a
|
||||
4e0000011daab2a9f45cca3d0bac6fee69324af4757b1c4e8c3a362efbb6e8a09d,00c4c1e3
|
||||
4e00000150116856ce8ce06604b9584e61afb3b613a8a5a512ab260e7f0cbe5496,003a51fa
|
||||
4e0000015ba8df5c3ed80d0fc364136de02e3ba9d9550cbb1ecef03b97fcdf0621,00d725d0
|
||||
4e000001d4a53fc92321415631862a791f8680241ed172e579534713f68a6869ba,025e8166
|
|
11
testdata/O.csv
vendored
Normal file
11
testdata/O.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
O,
|
||||
4f00222fbd00162aa70000,c78ac4c326cd43cdc0c844b7cea13659449ab3e40015746573742d70686f746f2d7374726173626f757267
|
||||
4f002230a700162f600000,ebf95f7fdb89db5467bb1b88ea3b0f0f7ee5ce360003636e63
|
||||
4f002230e3001630960000,a6f91a86837ab84a4cf0d2dcbe94704a528cf820000f776f6e646572776f6d616e31393933
|
||||
4f002231e8001635e60000,9673cc2a1aac64d7b2742705abfb09fca30d7e0500056d6d61736b
|
||||
4f00223246001638a80000,c39342066646dc50f1a9954b41684d157b035dac00036f6e65
|
||||
4f00223494001645ef0001,4689c1ccb4420309f93ab98799b28c49fa4d3809000a65617379737472656574
|
||||
4f002236df0016529a0000,f1628d66ae52295590b72b9a0b3a3527642a532600137465737470756230332d32312d323031372d32
|
||||
4f002236e10016529d0000,a4c61ced261ab571bdb3410ae140bec6c31f14ce00117465737470756230332d32312d32303137
|
||||
4f00223774001655960000,2327bcb6d7578a2669e416b5aa185fe14ee8e03e00056569676874
|
||||
4f00223a04001664200000,f69099600bdca9b062ba60432dba3c0ca2241167002c686973746f72792d6f662d6672696564726963682d69692d6f662d707275737369612d766f6c756d652d3137
|
|
11
testdata/P.csv
vendored
Normal file
11
testdata/P.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
P,
|
||||
50000100,a51d5c567412654e6d741114fea6fb851dec73800004831f
|
||||
50000101,11158037afca9c2efabc3dff55e352bf1f5634c50004831f
|
||||
50000102,a4a575934de77d8ec8589595d8cd91857e3cf5ba0004831f
|
||||
50000103,f595a21fb597bd030defefda3df9f8f4a3e0cb860004831f
|
||||
50000104,682ccb0518a6bd00c955949d9ef330d3ac18cb800004831f
|
||||
50000105,078a435851bf97c5cc36e8b03e3208a30d27679f0004831f
|
||||
50000106,1a4a75246a766cf21a629f619bc5bcb531de7a5a0004831f
|
||||
50000107,b144ad496b29b9c12c316f319773adcdd4c9bce20004831f
|
||||
50000108,af2a09232fc6bf664088d65da42fe0345b4589600004831f
|
||||
50000109,7b2ab15758c519116fb9ba9331a3b9ee7530831f0004831f
|
|
2
testdata/P_resolve.csv
vendored
Normal file
2
testdata/P_resolve.csv
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
P,,
|
||||
P,5000134073747978686578656e68616d6d6572363636,2556ed1cab9d17f2a9392030a9ad7f5d138f11bd000a6471
|
|
11
testdata/Q.csv
vendored
Normal file
11
testdata/Q.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
Q,
|
||||
5100002e5002000059610000,04c7d5e2360f10ab8e28d5d831abb29b72cea3a8000c697473616469736173746572
|
||||
5100002e52020000596d0000,04c7d5e2360f10ab8e28d5d831abb29b72cea3a8000c697473616469736173746572
|
||||
5100005d570200025e360000,32d4fc78396c239f5c1a0a041242eebb2636750900036f6e65
|
||||
5100005d590200025e570001,c923ae766c269535048c06674261e546004375ab000374776f
|
||||
5100005d5a0200025e6a0001,c923ae766c269535048c06674261e546004375ab000374776f
|
||||
51000061e50200029e100000,a467b70b0a9ddba924d0a115206fe7c1cb25a3460003707567
|
||||
51000080a002000449380000,c6ddef5e005606bd816177c7f0cba2404c7191310006757465737432
|
||||
510000c3c102000864bb0000,32d4fc78396c239f5c1a0a041242eebb2636750900036f6e65
|
||||
510000fd8002000c380b0000,c2ba0ad053f45d77ae569a1b5c407bc213365fda00037a6564
|
||||
510001556c02001030b40000,467513d3a6eed0114964d751cd85ed49c8e3af4e000b776172616e647065616365
|
|
11
testdata/R.csv
vendored
Normal file
11
testdata/R.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
R,
|
||||
520100162aa70000,00021b3dc78ac4c326cd43cdc0c844b7cea13659449ab3e40015746573742d70686f746f2d7374726173626f757267
|
||||
520100162f600000,00021c27ebf95f7fdb89db5467bb1b88ea3b0f0f7ee5ce360003636e63
|
||||
5201001630960000,00021c63a6f91a86837ab84a4cf0d2dcbe94704a528cf820000f776f6e646572776f6d616e31393933
|
||||
5201001635e60000,00021d689673cc2a1aac64d7b2742705abfb09fca30d7e0500056d6d61736b
|
||||
5201001638a80000,00021f4bc39342066646dc50f1a9954b41684d157b035dac00036f6e65
|
||||
5201001645ef0001,000220144689c1ccb4420309f93ab98799b28c49fa4d3809000a65617379737472656574
|
||||
52010016529a0000,0002225ff1628d66ae52295590b72b9a0b3a3527642a532600137465737470756230332d32312d323031372d32
|
||||
52010016529d0000,00022261a4c61ced261ab571bdb3410ae140bec6c31f14ce00117465737470756230332d32312d32303137
|
||||
5201001655960000,000222f42327bcb6d7578a2669e416b5aa185fe14ee8e03e00056569676874
|
||||
5201001664200000,00022584f69099600bdca9b062ba60432dba3c0ca2241167002c686973746f72792d6f662d6672696564726963682d69692d6f662d707275737369612d766f6c756d652d3137
|
|
2
testdata/R_resolve.csv
vendored
Normal file
2
testdata/R_resolve.csv
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
R,,
|
||||
R,5201006284e30000,000a6b652556ed1cab9d17f2a9392030a9ad7f5d138f11bd00134073747978686578656e68616d6d6572363636
|
|
11
testdata/S.csv
vendored
Normal file
11
testdata/S.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
S,
|
||||
53000000a420c44374f4f399ab4807fa1901eefc8701000e94ad0297ec210000,00000000000f4240
|
||||
53000000c27eef5ea69e0d73f118826c7e326bb46901000773de00371d660000,000000001dcd6500
|
||||
5300000110e40894573f528c393fbcec7a472ec85301000d069c01516b320000,0000000000989680
|
||||
5300000324e40fcb63a0b517a3660645e9bd99244a01000f2fd8030bb6ba0000,00000000000f4240
|
||||
5300000324e40fcb63a0b517a3660645e9bd99244a02000f2ff4030bc8a50000,0000000001312d00
|
||||
53000003d1538a0f19f5cd4bc1a62cc294f5c8993401000c816a011c7c990000,00000000000f4240
|
||||
53000008d47beeff8325e795a8604226145b01702b01000ef1ed02dbb2a20000,00000000000186a0
|
||||
5300000906499e073e94370ceff37cb21c2821244401000fa7c40369842d0000,00000000000186a0
|
||||
53000009c3172e034a255f3c03566dca84bb9f046a01000e07020225c69c0000,000000000007a120
|
||||
53000009ca6e0caaaef16872b4bd4f6f1b8c2363e201000eb5af02b169560000,00000000000f4240
|
|
11
testdata/S_resolve.csv
vendored
Normal file
11
testdata/S_resolve.csv
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
S,,
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd01000a6b67006286030000,0000007615cbad28
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd01000a706a0063105c0000,000000000bebc200
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a73ea006367550000,0000000005f5e100
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a7d63006469750000,0000000db0b7c894
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a7ebf00648c480000,00000000b2d05e00
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a810e0064ccc00000,000000003b9aca00
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a825b006503cf0000,00000002bf52c92c
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a88930066814a0000,00000000dc887a34
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a88f900669d240000,0000000005f5e100
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a88f900669d260000,000000001dcd6500
|
|
18
testdata/Si_resolve.csv
vendored
Normal file
18
testdata/Si_resolve.csv
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
Si,,
|
||||
S,53000000a420c44374f4f399ab4807fa1901eefc8701000e94ad0297ec210000,00000000000f4240
|
||||
S,53000000c27eef5ea69e0d73f118826c7e326bb46901000773de00371d660000,000000001dcd6500
|
||||
S,5300000110e40894573f528c393fbcec7a472ec85301000d069c01516b320000,0000000000989680
|
||||
S,5300000324e40fcb63a0b517a3660645e9bd99244a01000f2fd8030bb6ba0000,00000000000f4240
|
||||
S,5300000324e40fcb63a0b517a3660645e9bd99244a02000f2ff4030bc8a50000,0000000001312d00
|
||||
S,5300000324e40fcb63a0b517a3660645e9bd99244a02000f2ff6030bc8b00000,0000000000000003
|
||||
S,5300000324e40fcb63a0b517a3660645e9bd99244a02000f2ff7030bc8b10000,0000000000000002
|
||||
S,5300000324e40fcb63a0b517a3660645e9bd99244a02000f2ff9030bc8cf0000,0000000000000001
|
||||
S,53000003d1538a0f19f5cd4bc1a62cc294f5c8993401000c816a011c7c990000,00000000000f4240
|
||||
S,53000008d47beeff8325e795a8604226145b01702b01000ef1ed02dbb2a20000,00000000000186a0
|
||||
S,5300000906499e073e94370ceff37cb21c2821244401000fa7c40369842d0000,00000000000186a0
|
||||
S,5300000906499e073e94370ceff37cb21c2821244402000fa7c403698fff0000,00000000000000a1
|
||||
S,5300000906499e073e94370ceff37cb21c2821244402000fa7c80369f0010000,000000000000000f
|
||||
S,53000009c3172e034a255f3c03566dca84bb9f046a01000e07020225c69c0000,000000000007a120
|
||||
S,53000009ca6e0caaaef16872b4bd4f6f1b8c2363e201000eb5af02b169560000,00000000000f4240
|
||||
i,6900000324e40fcb63a0b517a3660645e9bd99244a,0000000001406f460000000001312d06
|
||||
i,6900000906499e073e94370ceff37cb21c28212444,000000000001875000000000000000b0
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue