Refactor db functions into three files

This commit is contained in:
Jeffrey Picard 2022-02-28 12:56:22 -05:00
parent 0ee35bbeee
commit 2658bf50ff
3 changed files with 942 additions and 923 deletions

923
db/db.go
View file

@ -8,12 +8,10 @@ import (
"math" "math"
"os" "os"
"sort" "sort"
"strings"
"time" "time"
"github.com/lbryio/hub/db/prefixes" "github.com/lbryio/hub/db/prefixes"
"github.com/lbryio/lbry.go/v2/extras/util" "github.com/lbryio/lbry.go/v2/extras/util"
lbryurl "github.com/lbryio/lbry.go/v2/url"
"github.com/linxGnu/grocksdb" "github.com/linxGnu/grocksdb"
) )
@ -251,927 +249,6 @@ func BisectRight(arr []uint32, val uint32) uint32 {
return uint32(i) return uint32(i)
} }
func GetExpirationHeight(lastUpdatedHeight uint32) uint32 {
return GetExpirationHeightFull(lastUpdatedHeight, false)
}
func GetExpirationHeightFull(lastUpdatedHeight uint32, extended bool) uint32 {
if extended {
return lastUpdatedHeight + NExtendedClaimExpirationTime
}
if lastUpdatedHeight < NExtendedClaimExpirationForkHeight {
return lastUpdatedHeight + NOriginalClaimExpirationTime
}
return lastUpdatedHeight + NExtendedClaimExpirationTime
}
// EnsureHandle is a helper function to ensure that the db has a handle to the given column family.
func EnsureHandle(db *ReadOnlyDBColumnFamily, prefix byte) (*grocksdb.ColumnFamilyHandle, error) {
cfName := string(prefix)
handle := db.Handles[cfName]
if handle == nil {
return nil, fmt.Errorf("%s handle not found", cfName)
}
return handle, nil
}
//
// DB Get functions
//
func GetClaimsInChannelCount(db *ReadOnlyDBColumnFamily, channelHash []byte) (uint32, error) {
handle, err := EnsureHandle(db, prefixes.ChannelCount)
if err != nil {
return 0, err
}
key := prefixes.NewChannelCountKey(channelHash)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return 0, err
} else if slice.Size() == 0 {
return 0, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.ChannelCountValueUnpack(rawValue)
return value.Count, nil
}
func GetShortClaimIdUrl(db *ReadOnlyDBColumnFamily, name string, normalizedName string, claimHash []byte, rootTxNum uint32, rootPosition uint16) (string, error) {
prefix := []byte{prefixes.ClaimShortIdPrefix}
handle, err := EnsureHandle(db, prefixes.ClaimShortIdPrefix)
if err != nil {
return "", err
}
claimId := hex.EncodeToString(claimHash)
claimIdLen := len(claimId)
for prefixLen := 0; prefixLen < 10; prefixLen++ {
var j int = prefixLen + 1
if j > claimIdLen {
j = claimIdLen
}
partialClaimId := claimId[:j]
partialKey := prefixes.NewClaimShortIDKey(normalizedName, partialClaimId)
log.Printf("partialKey: %#v\n", partialKey)
keyPrefix := prefixes.ClaimShortIDKeyPackPartial(partialKey, 2)
// Prefix and handle
options := NewIterateOptions().WithPrefix(prefix).WithCfHandle(handle)
// Start and stop bounds
options = options.WithStart(keyPrefix)
// Don't include the key
options = options.WithIncludeValue(false)
ch := IterCF(db.DB, options)
row := <-ch
if row == nil {
continue
}
key := row.Key.(*prefixes.ClaimShortIDKey)
if key.RootTxNum == rootTxNum && key.RootPosition == rootPosition {
return fmt.Sprintf("%s#%s", name, key.PartialClaimId), nil
}
}
return "", nil
}
func GetRepost(db *ReadOnlyDBColumnFamily, claimHash []byte) ([]byte, error) {
handle, err := EnsureHandle(db, prefixes.Repost)
if err != nil {
return nil, err
}
key := prefixes.NewRepostKey(claimHash)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return nil, err
} else if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.RepostValueUnpack(rawValue)
return value.RepostedClaimHash, nil
}
func GetRepostedCount(db *ReadOnlyDBColumnFamily, claimHash []byte) (int, error) {
handle, err := EnsureHandle(db, prefixes.RepostedClaim)
if err != nil {
return 0, err
}
key := prefixes.NewRepostedKey(claimHash)
keyPrefix := prefixes.RepostedKeyPackPartial(key, 1)
// Prefix and handle
options := NewIterateOptions().WithPrefix(keyPrefix).WithCfHandle(handle)
// Start and stop bounds
// options = options.WithStart(keyPrefix)
// Don't include the key
options = options.WithIncludeValue(false)
var i int = 0
ch := IterCF(db.DB, options)
for range ch {
i++
}
return i, nil
}
func GetChannelForClaim(db *ReadOnlyDBColumnFamily, claimHash []byte, txNum uint32, position uint16) ([]byte, error) {
handle, err := EnsureHandle(db, prefixes.ClaimToChannel)
if err != nil {
return nil, err
}
key := prefixes.NewClaimToChannelKey(claimHash, txNum, position)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return nil, err
} else if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.ClaimToChannelValueUnpack(rawValue)
return value.SigningHash, nil
}
func GetActiveAmount(db *ReadOnlyDBColumnFamily, claimHash []byte, txoType uint8, height uint32) (uint64, error) {
handle, err := EnsureHandle(db, prefixes.ActiveAmount)
if err != nil {
return 0, err
}
startKey := prefixes.NewActiveAmountKey(claimHash, txoType, 0)
endKey := prefixes.NewActiveAmountKey(claimHash, txoType, height)
startKeyRaw := prefixes.ActiveAmountKeyPackPartial(startKey, 3)
endKeyRaw := prefixes.ActiveAmountKeyPackPartial(endKey, 3)
// Prefix and handle
options := NewIterateOptions().WithPrefix([]byte{prefixes.ActiveAmount}).WithCfHandle(handle)
// Start and stop bounds
options = options.WithStart(startKeyRaw).WithStop(endKeyRaw)
// Don't include the key
options = options.WithIncludeKey(false).WithIncludeValue(true)
ch := IterCF(db.DB, options)
var sum uint64 = 0
for kv := range ch {
sum += kv.Value.(*prefixes.ActiveAmountValue).Amount
}
return sum, nil
}
func GetEffectiveAmount(db *ReadOnlyDBColumnFamily, claimHash []byte, supportOnly bool) (uint64, error) {
supportAmount, err := GetActiveAmount(db, claimHash, prefixes.ACTIVATED_SUPPORT_TXO_TYPE, db.Height+1)
if err != nil {
return 0, err
}
if supportOnly {
return supportAmount, nil
}
activationAmount, err := GetActiveAmount(db, claimHash, prefixes.ACTIVATED_CLAIM_TXO_TYPE, db.Height+1)
if err != nil {
return 0, err
}
return activationAmount + supportAmount, nil
}
func GetSupportAmount(db *ReadOnlyDBColumnFamily, claimHash []byte) (uint64, error) {
handle, err := EnsureHandle(db, prefixes.SupportAmount)
if err != nil {
return 0, err
}
key := prefixes.NewSupportAmountKey(claimHash)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return 0, err
} else if slice.Size() == 0 {
return 0, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.SupportAmountValueUnpack(rawValue)
return value.Amount, nil
}
func GetTxHash(db *ReadOnlyDBColumnFamily, txNum uint32) ([]byte, error) {
/*
if self._cache_all_tx_hashes:
return self.total_transactions[tx_num]
return self.prefix_db.tx_hash.get(tx_num, deserialize_value=False)
*/
// TODO: caching
handle, err := EnsureHandle(db, prefixes.TxHash)
if err != nil {
return nil, err
}
key := prefixes.NewTxHashKey(txNum)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return nil, err
}
if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
return rawValue, nil
}
func GetActivation(db *ReadOnlyDBColumnFamily, txNum uint32, postition uint16) (uint32, error) {
return GetActivationFull(db, txNum, postition, false)
}
func GetActivationFull(db *ReadOnlyDBColumnFamily, txNum uint32, postition uint16, isSupport bool) (uint32, error) {
var typ uint8
handle, err := EnsureHandle(db, prefixes.ActivatedClaimAndSupport)
if err != nil {
return 0, err
}
if isSupport {
typ = prefixes.ACTIVATED_SUPPORT_TXO_TYPE
} else {
typ = prefixes.ACTIVATED_CLAIM_TXO_TYPE
}
key := prefixes.NewActivationKey(typ, txNum, postition)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return 0, err
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.ActivationValueUnpack(rawValue)
// Does this need to explicitly return an int64, in case the uint32 overflows the max of an int?
return value.Height, nil
}
func GetCachedClaimTxo(db *ReadOnlyDBColumnFamily, claim []byte) (*prefixes.ClaimToTXOValue, error) {
// TODO: implement cache
handle, err := EnsureHandle(db, prefixes.ClaimToTXO)
if err != nil {
return nil, err
}
key := prefixes.NewClaimToTXOKey(claim)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return nil, err
}
if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.ClaimToTXOValueUnpack(rawValue)
return value, nil
}
func GetControllingClaim(db *ReadOnlyDBColumnFamily, name string) (*prefixes.ClaimTakeoverValue, error) {
handle, err := EnsureHandle(db, prefixes.ClaimTakeover)
if err != nil {
return nil, err
}
key := prefixes.NewClaimTakeoverKey(name)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return nil, err
}
if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.ClaimTakeoverValueUnpack(rawValue)
return value, nil
}
func FsGetClaimByHash(db *ReadOnlyDBColumnFamily, claimHash []byte) (*ResolveResult, error) {
claim, err := GetCachedClaimTxo(db, claimHash)
if err != nil {
return nil, err
}
activation, err := GetActivation(db, claim.TxNum, claim.Position)
if err != nil {
return nil, err
}
log.Printf("%#v\n%#v\n%#v\n", claim, hex.EncodeToString(claimHash), activation)
return PrepareResolveResult(
db,
claim.TxNum,
claim.Position,
claimHash,
claim.Name,
claim.RootTxNum,
claim.RootPosition,
activation,
claim.ChannelSignatureIsValid,
)
}
func GetDBState(db *ReadOnlyDBColumnFamily) (*prefixes.DBStateValue, error) {
handle, err := EnsureHandle(db, prefixes.DBState)
if err != nil {
return nil, err
}
key := prefixes.NewDBStateKey()
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return nil, err
} else if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.DBStateValueUnpack(rawValue)
return value, nil
}
func ClaimShortIdIter(db *ReadOnlyDBColumnFamily, normalizedName string, claimId string) <-chan *prefixes.PrefixRowKV {
handle, err := EnsureHandle(db, prefixes.ClaimShortIdPrefix)
if err != nil {
return nil
}
key := prefixes.NewClaimShortIDKey(normalizedName, claimId)
var rawKeyPrefix []byte = nil
if claimId != "" {
rawKeyPrefix = prefixes.ClaimShortIDKeyPackPartial(key, 2)
} else {
rawKeyPrefix = prefixes.ClaimShortIDKeyPackPartial(key, 1)
}
options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix)
options = options.WithIncludeValue(true) //.WithIncludeStop(true)
ch := IterCF(db.DB, options)
return ch
}
func GetCachedClaimHash(db *ReadOnlyDBColumnFamily, txNum uint32, position uint16) (*prefixes.TXOToClaimValue, error) {
// TODO: implement cache
handle, err := EnsureHandle(db, prefixes.TXOToClaim)
if err != nil {
return nil, err
}
key := prefixes.NewTXOToClaimKey(txNum, position)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return nil, err
} else if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.TXOToClaimValueUnpack(rawValue)
return value, nil
}
// GetBlockerHash get the hash of the blocker or filterer of the claim.
// TODO: this currently converts the byte arrays to strings, which is not
// very efficient. Might want to figure out a better way to do this.
func GetBlockerHash(db *ReadOnlyDBColumnFamily, claimHash, repostedClaimHash, channelHash []byte) ([]byte, []byte, error) {
claimHashStr := string(claimHash)
respostedClaimHashStr := string(repostedClaimHash)
channelHashStr := string(channelHash)
var blockedHash []byte = nil
var filteredHash []byte = nil
blockedHash = db.BlockedStreams[claimHashStr]
if blockedHash == nil {
blockedHash = db.BlockedStreams[respostedClaimHashStr]
}
if blockedHash == nil {
blockedHash = db.BlockedChannels[claimHashStr]
}
if blockedHash == nil {
blockedHash = db.BlockedChannels[respostedClaimHashStr]
}
if blockedHash == nil {
blockedHash = db.BlockedChannels[channelHashStr]
}
filteredHash = db.FilteredStreams[claimHashStr]
if filteredHash == nil {
filteredHash = db.FilteredStreams[respostedClaimHashStr]
}
if filteredHash == nil {
filteredHash = db.FilteredChannels[claimHashStr]
}
if filteredHash == nil {
filteredHash = db.FilteredChannels[respostedClaimHashStr]
}
if filteredHash == nil {
filteredHash = db.FilteredChannels[channelHashStr]
}
return blockedHash, filteredHash, nil
}
//
// Resolve functions
//
// PrepareResolveResult prepares a ResolveResult to return
func PrepareResolveResult(
db *ReadOnlyDBColumnFamily,
txNum uint32,
position uint16,
claimHash []byte,
name string,
rootTxNum uint32,
rootPosition uint16,
activationHeight uint32,
signatureValid bool) (*ResolveResult, error) {
normalizedName := util.NormalizeName(name)
controllingClaim, err := GetControllingClaim(db, normalizedName)
if err != nil {
return nil, err
}
txHash, err := GetTxHash(db, txNum)
if err != nil {
return nil, err
}
height := BisectRight(db.TxCounts, txNum)
createdHeight := BisectRight(db.TxCounts, rootTxNum)
lastTakeoverHeight := controllingClaim.Height
expirationHeight := GetExpirationHeight(height)
supportAmount, err := GetSupportAmount(db, claimHash)
if err != nil {
return nil, err
}
claimToTxo, err := GetCachedClaimTxo(db, claimHash)
if err != nil {
return nil, err
}
claimAmount := claimToTxo.Amount
effectiveAmount, err := GetEffectiveAmount(db, claimHash, false)
if err != nil {
return nil, err
}
channelHash, err := GetChannelForClaim(db, claimHash, txNum, position)
if err != nil {
return nil, err
}
repostedClaimHash, err := GetRepost(db, claimHash)
if err != nil {
return nil, err
}
shortUrl, err := GetShortClaimIdUrl(db, name, normalizedName, claimHash, txNum, rootPosition)
if err != nil {
return nil, err
}
var canonicalUrl string = shortUrl
claimsInChannel, err := GetClaimsInChannelCount(db, claimHash)
if err != nil {
return nil, err
}
if channelHash != nil {
// Ignore error because we already have this set if this doesn't work
channelVals, _ := GetCachedClaimTxo(db, channelHash)
log.Printf("channelVals: %#v\n", channelVals)
if channelVals != nil {
channelShortUrl, _ := GetShortClaimIdUrl(
db,
channelVals.Name,
channelVals.NormalizedName(),
channelHash, channelVals.RootTxNum,
channelVals.RootPosition,
)
canonicalUrl = fmt.Sprintf("%s/%s", channelShortUrl, shortUrl)
}
}
reposted, err := GetRepostedCount(db, claimHash)
if err != nil {
return nil, err
}
isControlling := bytes.Equal(controllingClaim.ClaimHash, claimHash)
return &ResolveResult{
Name: name,
NormalizedName: normalizedName,
ClaimHash: claimHash,
TxNum: txNum,
Position: position,
TxHash: txHash,
Height: height,
Amount: claimAmount,
ShortUrl: shortUrl,
IsControlling: isControlling,
CanonicalUrl: canonicalUrl,
CreationHeight: createdHeight,
ActivationHeight: activationHeight,
ExpirationHeight: expirationHeight,
EffectiveAmount: effectiveAmount,
SupportAmount: supportAmount,
Reposted: reposted,
LastTakeoverHeight: lastTakeoverHeight,
ClaimsInChannel: claimsInChannel,
ChannelHash: channelHash,
RepostedClaimHash: repostedClaimHash,
SignatureValid: signatureValid,
}, nil
}
func ResolveParsedUrl(db *ReadOnlyDBColumnFamily, parsed *PathSegment) (*ResolveResult, error) {
normalizedName := util.NormalizeName(parsed.name)
if (parsed.amountOrder == -1 && parsed.claimId == "") || parsed.amountOrder == 1 {
controlling, err := GetControllingClaim(db, normalizedName)
if err != nil {
return nil, err
}
if controlling == nil {
return nil, nil
}
return FsGetClaimByHash(db, controlling.ClaimHash)
}
var amountOrder int = int(math.Max(float64(parsed.amountOrder), 1))
log.Println("amountOrder:", amountOrder)
if parsed.claimId != "" {
if len(parsed.claimId) == 40 {
claimHash, err := hex.DecodeString(parsed.claimId)
if err != nil {
return nil, err
}
// Maybe don't use caching version, when I actually implement the cache
claimTxo, err := GetCachedClaimTxo(db, claimHash)
if err != nil {
return nil, err
}
if claimTxo == nil || claimTxo.NormalizedName() != normalizedName {
return nil, nil
}
activation, err := GetActivation(db, claimTxo.TxNum, claimTxo.Position)
if err != nil {
return nil, err
}
return PrepareResolveResult(
db,
claimTxo.TxNum,
claimTxo.Position,
claimHash,
claimTxo.Name,
claimTxo.RootTxNum,
claimTxo.RootPosition,
activation,
claimTxo.ChannelSignatureIsValid,
)
}
log.Println("nomalizedName:", normalizedName)
log.Println("claimId:", parsed.claimId)
var j int = 10
if len(parsed.claimId) < j {
j = len(parsed.claimId)
}
ch := ClaimShortIdIter(db, normalizedName, parsed.claimId[:j])
row := <-ch
key := row.Key.(*prefixes.ClaimShortIDKey)
claimTxo := row.Value.(*prefixes.ClaimShortIDValue)
fullClaimHash, err := GetCachedClaimHash(db, claimTxo.TxNum, claimTxo.Position)
if err != nil {
return nil, err
}
c, err := GetCachedClaimTxo(db, fullClaimHash.ClaimHash)
if err != nil {
return nil, err
}
nonNormalizedName := c.Name
signatureIsValid := c.ChannelSignatureIsValid
activation, err := GetActivation(db, claimTxo.TxNum, claimTxo.Position)
if err != nil {
return nil, err
}
return PrepareResolveResult(
db,
claimTxo.TxNum,
claimTxo.Position,
fullClaimHash.ClaimHash,
nonNormalizedName,
key.RootTxNum,
key.RootPosition,
activation,
signatureIsValid,
)
}
return nil, nil
}
func ResolveClaimInChannel(db *ReadOnlyDBColumnFamily, channelHash []byte, normalizedName string) (*ResolveResult, error) {
handle, err := EnsureHandle(db, prefixes.ChannelToClaim)
if err != nil {
return nil, err
}
key := prefixes.NewChannelToClaimKey(channelHash, normalizedName)
rawKeyPrefix := prefixes.ChannelToClaimKeyPackPartial(key, 2)
options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix)
options = options.WithIncludeValue(true) //.WithIncludeStop(true)
ch := IterCF(db.DB, options)
// TODO: what's a good default size for this?
var candidates []*ResolveResult = make([]*ResolveResult, 0, 100)
var i = 0
for row := range ch {
key := row.Key.(*prefixes.ChannelToClaimKey)
stream := row.Value.(*prefixes.ChannelToClaimValue)
effectiveAmount, err := GetEffectiveAmount(db, stream.ClaimHash, false)
if err != nil {
return nil, err
}
if i == 0 || candidates[i-1].Amount == effectiveAmount {
candidates = append(
candidates,
&ResolveResult{
TxNum: key.TxNum,
Position: key.Position,
ClaimHash: stream.ClaimHash,
Amount: effectiveAmount,
ChannelHash: channelHash,
NormalizedName: normalizedName,
},
)
i++
} else {
break
}
}
log.Printf("candidates: %#v\n", candidates)
if len(candidates) == 0 {
return nil, nil
} else {
// return list(sorted(candidates, key=lambda item: item[1]))[0]
sort.Slice(candidates, func(i, j int) bool {
return candidates[i].Amount < candidates[j].Amount
})
return candidates[0], nil
}
}
func Resolve(db *ReadOnlyDBColumnFamily, url string) *ExpandedResolveResult {
var res = &ExpandedResolveResult{
Stream: nil,
Channel: nil,
Repost: nil,
RepostedChannel: nil,
}
var channel *PathSegment = nil
var stream *PathSegment = nil
parsed, err := lbryurl.Parse(url, false)
if err != nil {
res.Stream = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
log.Printf("parsed: %#v\n", parsed)
// has stream in channel
if strings.Compare(parsed.StreamName, "") != 0 && strings.Compare(parsed.ClaimName, "") != 0 {
channel = &PathSegment{
name: parsed.ClaimName,
claimId: parsed.ChannelClaimId,
amountOrder: parsed.PrimaryBidPosition,
}
stream = &PathSegment{
name: parsed.StreamName,
claimId: parsed.StreamClaimId,
amountOrder: parsed.SecondaryBidPosition,
}
} else if strings.Compare(parsed.ClaimName, "") != 0 {
channel = &PathSegment{
name: parsed.ClaimName,
claimId: parsed.ChannelClaimId,
amountOrder: parsed.PrimaryBidPosition,
}
} else if strings.Compare(parsed.StreamName, "") != 0 {
stream = &PathSegment{
name: parsed.StreamName,
claimId: parsed.StreamClaimId,
amountOrder: parsed.SecondaryBidPosition,
}
}
log.Printf("channel: %#v\n", channel)
log.Printf("stream: %#v\n", stream)
var resolvedChannel *ResolveResult = nil
var resolvedStream *ResolveResult = nil
if channel != nil {
resolvedChannel, err = ResolveParsedUrl(db, channel)
if err != nil {
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
} else if resolvedChannel == nil {
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{fmt.Errorf("could not find channel in \"%s\"", url)},
}
return res
}
}
log.Printf("resolvedChannel: %#v\n", resolvedChannel)
log.Printf("resolvedChannel.TxHash: %s\n", hex.EncodeToString(resolvedChannel.TxHash))
log.Printf("resolvedChannel.ClaimHash: %s\n", hex.EncodeToString(resolvedChannel.ClaimHash))
log.Printf("resolvedChannel.ChannelHash: %s\n", hex.EncodeToString(resolvedChannel.ChannelHash))
log.Printf("stream %#v\n", stream)
if stream != nil {
if resolvedChannel != nil {
streamClaim, err := ResolveClaimInChannel(db, resolvedChannel.ClaimHash, stream.Normalized())
log.Printf("streamClaim %#v\n", streamClaim)
if streamClaim != nil {
log.Printf("streamClaim.ClaimHash: %s\n", hex.EncodeToString(streamClaim.ClaimHash))
log.Printf("streamClaim.ChannelHash: %s\n", hex.EncodeToString(streamClaim.ChannelHash))
}
// TODO: Confirm error case
if err != nil {
res.Stream = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
if streamClaim != nil {
resolvedStream, err = FsGetClaimByHash(db, streamClaim.ClaimHash)
// TODO: Confirm error case
if err != nil {
res.Stream = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
}
} else {
resolvedStream, err = ResolveParsedUrl(db, stream)
// TODO: Confirm error case
if err != nil {
res.Stream = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
if channel == nil && resolvedChannel == nil && resolvedStream != nil && len(resolvedStream.ChannelHash) > 0 {
resolvedChannel, err = FsGetClaimByHash(db, resolvedStream.ChannelHash)
// TODO: Confirm error case
if err != nil {
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
}
}
if resolvedStream == nil {
res.Stream = &optionalResolveResultOrError{
err: &ResolveError{fmt.Errorf("could not find stream in \"%s\"", url)},
}
return res
}
}
// Getting blockers and filters
var repost *ResolveResult = nil
var repostedChannel *ResolveResult = nil
log.Printf("about to get blockers and filters: %#v, %#v\n", resolvedChannel, resolvedStream)
if resolvedStream != nil || resolvedChannel != nil {
var claim *ResolveResult = nil
var claimHash []byte = nil
var respostedClaimHash []byte = nil
var blockerHash []byte = nil
if resolvedStream != nil {
claim = resolvedStream
claimHash = resolvedStream.ClaimHash
respostedClaimHash = resolvedStream.RepostedClaimHash
} else {
claim = resolvedChannel
claimHash = resolvedChannel.ClaimHash
}
blockerHash, _, err = GetBlockerHash(db, claimHash, respostedClaimHash, claim.ChannelHash)
log.Printf("blockerHash: %s\n", hex.EncodeToString(blockerHash))
if err != nil {
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
if blockerHash != nil {
reasonRow, err := FsGetClaimByHash(db, blockerHash)
if err != nil {
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{fmt.Errorf("%s, %v, %v", url, blockerHash, reasonRow)},
}
return res
}
if claim.RepostedClaimHash != nil {
repost, err = FsGetClaimByHash(db, claim.RepostedClaimHash)
if err != nil {
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
if repost != nil && repost.ChannelHash != nil && repost.SignatureValid {
repostedChannel, err = FsGetClaimByHash(db, repost.ChannelHash)
if err != nil {
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
}
}
}
res.Channel = &optionalResolveResultOrError{
res: resolvedChannel,
}
res.Stream = &optionalResolveResultOrError{
res: resolvedStream,
}
res.Repost = &optionalResolveResultOrError{
res: repost,
}
res.RepostedChannel = &optionalResolveResultOrError{
res: repostedChannel,
}
log.Printf("parsed: %+v\n", parsed)
return res
}
// //
// Iterators / db construction functions // Iterators / db construction functions
// //

465
db/db_get.go Normal file
View file

@ -0,0 +1,465 @@
package db
import (
"encoding/hex"
"fmt"
"log"
"github.com/lbryio/hub/db/prefixes"
"github.com/linxGnu/grocksdb"
)
func GetExpirationHeight(lastUpdatedHeight uint32) uint32 {
return GetExpirationHeightFull(lastUpdatedHeight, false)
}
func GetExpirationHeightFull(lastUpdatedHeight uint32, extended bool) uint32 {
if extended {
return lastUpdatedHeight + NExtendedClaimExpirationTime
}
if lastUpdatedHeight < NExtendedClaimExpirationForkHeight {
return lastUpdatedHeight + NOriginalClaimExpirationTime
}
return lastUpdatedHeight + NExtendedClaimExpirationTime
}
// EnsureHandle is a helper function to ensure that the db has a handle to the given column family.
func EnsureHandle(db *ReadOnlyDBColumnFamily, prefix byte) (*grocksdb.ColumnFamilyHandle, error) {
cfName := string(prefix)
handle := db.Handles[cfName]
if handle == nil {
return nil, fmt.Errorf("%s handle not found", cfName)
}
return handle, nil
}
//
// DB Get functions
//
func GetClaimsInChannelCount(db *ReadOnlyDBColumnFamily, channelHash []byte) (uint32, error) {
handle, err := EnsureHandle(db, prefixes.ChannelCount)
if err != nil {
return 0, err
}
key := prefixes.NewChannelCountKey(channelHash)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return 0, err
} else if slice.Size() == 0 {
return 0, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.ChannelCountValueUnpack(rawValue)
return value.Count, nil
}
func GetShortClaimIdUrl(db *ReadOnlyDBColumnFamily, name string, normalizedName string, claimHash []byte, rootTxNum uint32, rootPosition uint16) (string, error) {
prefix := []byte{prefixes.ClaimShortIdPrefix}
handle, err := EnsureHandle(db, prefixes.ClaimShortIdPrefix)
if err != nil {
return "", err
}
claimId := hex.EncodeToString(claimHash)
claimIdLen := len(claimId)
for prefixLen := 0; prefixLen < 10; prefixLen++ {
var j int = prefixLen + 1
if j > claimIdLen {
j = claimIdLen
}
partialClaimId := claimId[:j]
partialKey := prefixes.NewClaimShortIDKey(normalizedName, partialClaimId)
log.Printf("partialKey: %#v\n", partialKey)
keyPrefix := prefixes.ClaimShortIDKeyPackPartial(partialKey, 2)
// Prefix and handle
options := NewIterateOptions().WithPrefix(prefix).WithCfHandle(handle)
// Start and stop bounds
options = options.WithStart(keyPrefix)
// Don't include the key
options = options.WithIncludeValue(false)
ch := IterCF(db.DB, options)
row := <-ch
if row == nil {
continue
}
key := row.Key.(*prefixes.ClaimShortIDKey)
if key.RootTxNum == rootTxNum && key.RootPosition == rootPosition {
return fmt.Sprintf("%s#%s", name, key.PartialClaimId), nil
}
}
return "", nil
}
func GetRepost(db *ReadOnlyDBColumnFamily, claimHash []byte) ([]byte, error) {
handle, err := EnsureHandle(db, prefixes.Repost)
if err != nil {
return nil, err
}
key := prefixes.NewRepostKey(claimHash)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return nil, err
} else if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.RepostValueUnpack(rawValue)
return value.RepostedClaimHash, nil
}
func GetRepostedCount(db *ReadOnlyDBColumnFamily, claimHash []byte) (int, error) {
handle, err := EnsureHandle(db, prefixes.RepostedClaim)
if err != nil {
return 0, err
}
key := prefixes.NewRepostedKey(claimHash)
keyPrefix := prefixes.RepostedKeyPackPartial(key, 1)
// Prefix and handle
options := NewIterateOptions().WithPrefix(keyPrefix).WithCfHandle(handle)
// Start and stop bounds
// options = options.WithStart(keyPrefix)
// Don't include the key
options = options.WithIncludeValue(false)
var i int = 0
ch := IterCF(db.DB, options)
for range ch {
i++
}
return i, nil
}
func GetChannelForClaim(db *ReadOnlyDBColumnFamily, claimHash []byte, txNum uint32, position uint16) ([]byte, error) {
handle, err := EnsureHandle(db, prefixes.ClaimToChannel)
if err != nil {
return nil, err
}
key := prefixes.NewClaimToChannelKey(claimHash, txNum, position)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return nil, err
} else if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.ClaimToChannelValueUnpack(rawValue)
return value.SigningHash, nil
}
func GetActiveAmount(db *ReadOnlyDBColumnFamily, claimHash []byte, txoType uint8, height uint32) (uint64, error) {
handle, err := EnsureHandle(db, prefixes.ActiveAmount)
if err != nil {
return 0, err
}
startKey := prefixes.NewActiveAmountKey(claimHash, txoType, 0)
endKey := prefixes.NewActiveAmountKey(claimHash, txoType, height)
startKeyRaw := prefixes.ActiveAmountKeyPackPartial(startKey, 3)
endKeyRaw := prefixes.ActiveAmountKeyPackPartial(endKey, 3)
// Prefix and handle
options := NewIterateOptions().WithPrefix([]byte{prefixes.ActiveAmount}).WithCfHandle(handle)
// Start and stop bounds
options = options.WithStart(startKeyRaw).WithStop(endKeyRaw)
// Don't include the key
options = options.WithIncludeKey(false).WithIncludeValue(true)
ch := IterCF(db.DB, options)
var sum uint64 = 0
for kv := range ch {
sum += kv.Value.(*prefixes.ActiveAmountValue).Amount
}
return sum, nil
}
func GetEffectiveAmount(db *ReadOnlyDBColumnFamily, claimHash []byte, supportOnly bool) (uint64, error) {
supportAmount, err := GetActiveAmount(db, claimHash, prefixes.ACTIVATED_SUPPORT_TXO_TYPE, db.Height+1)
if err != nil {
return 0, err
}
if supportOnly {
return supportAmount, nil
}
activationAmount, err := GetActiveAmount(db, claimHash, prefixes.ACTIVATED_CLAIM_TXO_TYPE, db.Height+1)
if err != nil {
return 0, err
}
return activationAmount + supportAmount, nil
}
func GetSupportAmount(db *ReadOnlyDBColumnFamily, claimHash []byte) (uint64, error) {
handle, err := EnsureHandle(db, prefixes.SupportAmount)
if err != nil {
return 0, err
}
key := prefixes.NewSupportAmountKey(claimHash)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return 0, err
} else if slice.Size() == 0 {
return 0, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.SupportAmountValueUnpack(rawValue)
return value.Amount, nil
}
func GetTxHash(db *ReadOnlyDBColumnFamily, txNum uint32) ([]byte, error) {
/*
if self._cache_all_tx_hashes:
return self.total_transactions[tx_num]
return self.prefix_db.tx_hash.get(tx_num, deserialize_value=False)
*/
// TODO: caching
handle, err := EnsureHandle(db, prefixes.TxHash)
if err != nil {
return nil, err
}
key := prefixes.NewTxHashKey(txNum)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return nil, err
}
if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
return rawValue, nil
}
func GetActivation(db *ReadOnlyDBColumnFamily, txNum uint32, postition uint16) (uint32, error) {
return GetActivationFull(db, txNum, postition, false)
}
func GetActivationFull(db *ReadOnlyDBColumnFamily, txNum uint32, postition uint16, isSupport bool) (uint32, error) {
var typ uint8
handle, err := EnsureHandle(db, prefixes.ActivatedClaimAndSupport)
if err != nil {
return 0, err
}
if isSupport {
typ = prefixes.ACTIVATED_SUPPORT_TXO_TYPE
} else {
typ = prefixes.ACTIVATED_CLAIM_TXO_TYPE
}
key := prefixes.NewActivationKey(typ, txNum, postition)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return 0, err
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.ActivationValueUnpack(rawValue)
// Does this need to explicitly return an int64, in case the uint32 overflows the max of an int?
return value.Height, nil
}
func GetCachedClaimTxo(db *ReadOnlyDBColumnFamily, claim []byte) (*prefixes.ClaimToTXOValue, error) {
// TODO: implement cache
handle, err := EnsureHandle(db, prefixes.ClaimToTXO)
if err != nil {
return nil, err
}
key := prefixes.NewClaimToTXOKey(claim)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return nil, err
}
if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.ClaimToTXOValueUnpack(rawValue)
return value, nil
}
func GetControllingClaim(db *ReadOnlyDBColumnFamily, name string) (*prefixes.ClaimTakeoverValue, error) {
handle, err := EnsureHandle(db, prefixes.ClaimTakeover)
if err != nil {
return nil, err
}
key := prefixes.NewClaimTakeoverKey(name)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return nil, err
}
if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.ClaimTakeoverValueUnpack(rawValue)
return value, nil
}
func FsGetClaimByHash(db *ReadOnlyDBColumnFamily, claimHash []byte) (*ResolveResult, error) {
claim, err := GetCachedClaimTxo(db, claimHash)
if err != nil {
return nil, err
}
activation, err := GetActivation(db, claim.TxNum, claim.Position)
if err != nil {
return nil, err
}
log.Printf("%#v\n%#v\n%#v\n", claim, hex.EncodeToString(claimHash), activation)
return PrepareResolveResult(
db,
claim.TxNum,
claim.Position,
claimHash,
claim.Name,
claim.RootTxNum,
claim.RootPosition,
activation,
claim.ChannelSignatureIsValid,
)
}
func GetDBState(db *ReadOnlyDBColumnFamily) (*prefixes.DBStateValue, error) {
handle, err := EnsureHandle(db, prefixes.DBState)
if err != nil {
return nil, err
}
key := prefixes.NewDBStateKey()
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return nil, err
} else if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.DBStateValueUnpack(rawValue)
return value, nil
}
func ClaimShortIdIter(db *ReadOnlyDBColumnFamily, normalizedName string, claimId string) <-chan *prefixes.PrefixRowKV {
handle, err := EnsureHandle(db, prefixes.ClaimShortIdPrefix)
if err != nil {
return nil
}
key := prefixes.NewClaimShortIDKey(normalizedName, claimId)
var rawKeyPrefix []byte = nil
if claimId != "" {
rawKeyPrefix = prefixes.ClaimShortIDKeyPackPartial(key, 2)
} else {
rawKeyPrefix = prefixes.ClaimShortIDKeyPackPartial(key, 1)
}
options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix)
options = options.WithIncludeValue(true) //.WithIncludeStop(true)
ch := IterCF(db.DB, options)
return ch
}
func GetCachedClaimHash(db *ReadOnlyDBColumnFamily, txNum uint32, position uint16) (*prefixes.TXOToClaimValue, error) {
// TODO: implement cache
handle, err := EnsureHandle(db, prefixes.TXOToClaim)
if err != nil {
return nil, err
}
key := prefixes.NewTXOToClaimKey(txNum, position)
rawKey := key.PackKey()
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
if err != nil {
return nil, err
} else if slice.Size() == 0 {
return nil, nil
}
rawValue := make([]byte, len(slice.Data()))
copy(rawValue, slice.Data())
value := prefixes.TXOToClaimValueUnpack(rawValue)
return value, nil
}
// GetBlockerHash get the hash of the blocker or filterer of the claim.
// TODO: this currently converts the byte arrays to strings, which is not
// very efficient. Might want to figure out a better way to do this.
func GetBlockerHash(db *ReadOnlyDBColumnFamily, claimHash, repostedClaimHash, channelHash []byte) ([]byte, []byte, error) {
claimHashStr := string(claimHash)
respostedClaimHashStr := string(repostedClaimHash)
channelHashStr := string(channelHash)
var blockedHash []byte = nil
var filteredHash []byte = nil
blockedHash = db.BlockedStreams[claimHashStr]
if blockedHash == nil {
blockedHash = db.BlockedStreams[respostedClaimHashStr]
}
if blockedHash == nil {
blockedHash = db.BlockedChannels[claimHashStr]
}
if blockedHash == nil {
blockedHash = db.BlockedChannels[respostedClaimHashStr]
}
if blockedHash == nil {
blockedHash = db.BlockedChannels[channelHashStr]
}
filteredHash = db.FilteredStreams[claimHashStr]
if filteredHash == nil {
filteredHash = db.FilteredStreams[respostedClaimHashStr]
}
if filteredHash == nil {
filteredHash = db.FilteredChannels[claimHashStr]
}
if filteredHash == nil {
filteredHash = db.FilteredChannels[respostedClaimHashStr]
}
if filteredHash == nil {
filteredHash = db.FilteredChannels[channelHashStr]
}
return blockedHash, filteredHash, nil
}

477
db/db_resolve.go Normal file
View file

@ -0,0 +1,477 @@
package db
import (
"bytes"
"encoding/hex"
"fmt"
"log"
"math"
"sort"
"strings"
"github.com/lbryio/hub/db/prefixes"
"github.com/lbryio/lbry.go/v2/extras/util"
lbryurl "github.com/lbryio/lbry.go/v2/url"
)
// PrepareResolveResult prepares a ResolveResult to return
func PrepareResolveResult(
db *ReadOnlyDBColumnFamily,
txNum uint32,
position uint16,
claimHash []byte,
name string,
rootTxNum uint32,
rootPosition uint16,
activationHeight uint32,
signatureValid bool) (*ResolveResult, error) {
normalizedName := util.NormalizeName(name)
controllingClaim, err := GetControllingClaim(db, normalizedName)
if err != nil {
return nil, err
}
txHash, err := GetTxHash(db, txNum)
if err != nil {
return nil, err
}
height := BisectRight(db.TxCounts, txNum)
createdHeight := BisectRight(db.TxCounts, rootTxNum)
lastTakeoverHeight := controllingClaim.Height
expirationHeight := GetExpirationHeight(height)
supportAmount, err := GetSupportAmount(db, claimHash)
if err != nil {
return nil, err
}
claimToTxo, err := GetCachedClaimTxo(db, claimHash)
if err != nil {
return nil, err
}
claimAmount := claimToTxo.Amount
effectiveAmount, err := GetEffectiveAmount(db, claimHash, false)
if err != nil {
return nil, err
}
channelHash, err := GetChannelForClaim(db, claimHash, txNum, position)
if err != nil {
return nil, err
}
repostedClaimHash, err := GetRepost(db, claimHash)
if err != nil {
return nil, err
}
shortUrl, err := GetShortClaimIdUrl(db, name, normalizedName, claimHash, txNum, rootPosition)
if err != nil {
return nil, err
}
var canonicalUrl string = shortUrl
claimsInChannel, err := GetClaimsInChannelCount(db, claimHash)
if err != nil {
return nil, err
}
if channelHash != nil {
// Ignore error because we already have this set if this doesn't work
channelVals, _ := GetCachedClaimTxo(db, channelHash)
log.Printf("channelVals: %#v\n", channelVals)
if channelVals != nil {
channelShortUrl, _ := GetShortClaimIdUrl(
db,
channelVals.Name,
channelVals.NormalizedName(),
channelHash, channelVals.RootTxNum,
channelVals.RootPosition,
)
canonicalUrl = fmt.Sprintf("%s/%s", channelShortUrl, shortUrl)
}
}
reposted, err := GetRepostedCount(db, claimHash)
if err != nil {
return nil, err
}
isControlling := bytes.Equal(controllingClaim.ClaimHash, claimHash)
return &ResolveResult{
Name: name,
NormalizedName: normalizedName,
ClaimHash: claimHash,
TxNum: txNum,
Position: position,
TxHash: txHash,
Height: height,
Amount: claimAmount,
ShortUrl: shortUrl,
IsControlling: isControlling,
CanonicalUrl: canonicalUrl,
CreationHeight: createdHeight,
ActivationHeight: activationHeight,
ExpirationHeight: expirationHeight,
EffectiveAmount: effectiveAmount,
SupportAmount: supportAmount,
Reposted: reposted,
LastTakeoverHeight: lastTakeoverHeight,
ClaimsInChannel: claimsInChannel,
ChannelHash: channelHash,
RepostedClaimHash: repostedClaimHash,
SignatureValid: signatureValid,
}, nil
}
func ResolveParsedUrl(db *ReadOnlyDBColumnFamily, parsed *PathSegment) (*ResolveResult, error) {
normalizedName := util.NormalizeName(parsed.name)
if (parsed.amountOrder == -1 && parsed.claimId == "") || parsed.amountOrder == 1 {
controlling, err := GetControllingClaim(db, normalizedName)
if err != nil {
return nil, err
}
if controlling == nil {
return nil, nil
}
return FsGetClaimByHash(db, controlling.ClaimHash)
}
var amountOrder int = int(math.Max(float64(parsed.amountOrder), 1))
log.Println("amountOrder:", amountOrder)
if parsed.claimId != "" {
if len(parsed.claimId) == 40 {
claimHash, err := hex.DecodeString(parsed.claimId)
if err != nil {
return nil, err
}
// Maybe don't use caching version, when I actually implement the cache
claimTxo, err := GetCachedClaimTxo(db, claimHash)
if err != nil {
return nil, err
}
if claimTxo == nil || claimTxo.NormalizedName() != normalizedName {
return nil, nil
}
activation, err := GetActivation(db, claimTxo.TxNum, claimTxo.Position)
if err != nil {
return nil, err
}
return PrepareResolveResult(
db,
claimTxo.TxNum,
claimTxo.Position,
claimHash,
claimTxo.Name,
claimTxo.RootTxNum,
claimTxo.RootPosition,
activation,
claimTxo.ChannelSignatureIsValid,
)
}
log.Println("nomalizedName:", normalizedName)
log.Println("claimId:", parsed.claimId)
var j int = 10
if len(parsed.claimId) < j {
j = len(parsed.claimId)
}
ch := ClaimShortIdIter(db, normalizedName, parsed.claimId[:j])
row := <-ch
key := row.Key.(*prefixes.ClaimShortIDKey)
claimTxo := row.Value.(*prefixes.ClaimShortIDValue)
fullClaimHash, err := GetCachedClaimHash(db, claimTxo.TxNum, claimTxo.Position)
if err != nil {
return nil, err
}
c, err := GetCachedClaimTxo(db, fullClaimHash.ClaimHash)
if err != nil {
return nil, err
}
nonNormalizedName := c.Name
signatureIsValid := c.ChannelSignatureIsValid
activation, err := GetActivation(db, claimTxo.TxNum, claimTxo.Position)
if err != nil {
return nil, err
}
return PrepareResolveResult(
db,
claimTxo.TxNum,
claimTxo.Position,
fullClaimHash.ClaimHash,
nonNormalizedName,
key.RootTxNum,
key.RootPosition,
activation,
signatureIsValid,
)
}
return nil, nil
}
func ResolveClaimInChannel(db *ReadOnlyDBColumnFamily, channelHash []byte, normalizedName string) (*ResolveResult, error) {
handle, err := EnsureHandle(db, prefixes.ChannelToClaim)
if err != nil {
return nil, err
}
key := prefixes.NewChannelToClaimKey(channelHash, normalizedName)
rawKeyPrefix := prefixes.ChannelToClaimKeyPackPartial(key, 2)
options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix)
options = options.WithIncludeValue(true) //.WithIncludeStop(true)
ch := IterCF(db.DB, options)
// TODO: what's a good default size for this?
var candidates []*ResolveResult = make([]*ResolveResult, 0, 100)
var i = 0
for row := range ch {
key := row.Key.(*prefixes.ChannelToClaimKey)
stream := row.Value.(*prefixes.ChannelToClaimValue)
effectiveAmount, err := GetEffectiveAmount(db, stream.ClaimHash, false)
if err != nil {
return nil, err
}
if i == 0 || candidates[i-1].Amount == effectiveAmount {
candidates = append(
candidates,
&ResolveResult{
TxNum: key.TxNum,
Position: key.Position,
ClaimHash: stream.ClaimHash,
Amount: effectiveAmount,
ChannelHash: channelHash,
NormalizedName: normalizedName,
},
)
i++
} else {
break
}
}
log.Printf("candidates: %#v\n", candidates)
if len(candidates) == 0 {
return nil, nil
} else {
// return list(sorted(candidates, key=lambda item: item[1]))[0]
sort.Slice(candidates, func(i, j int) bool {
return candidates[i].Amount < candidates[j].Amount
})
return candidates[0], nil
}
}
func Resolve(db *ReadOnlyDBColumnFamily, url string) *ExpandedResolveResult {
var res = &ExpandedResolveResult{
Stream: nil,
Channel: nil,
Repost: nil,
RepostedChannel: nil,
}
var channel *PathSegment = nil
var stream *PathSegment = nil
parsed, err := lbryurl.Parse(url, false)
if err != nil {
res.Stream = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
log.Printf("parsed: %#v\n", parsed)
// has stream in channel
if strings.Compare(parsed.StreamName, "") != 0 && strings.Compare(parsed.ClaimName, "") != 0 {
channel = &PathSegment{
name: parsed.ClaimName,
claimId: parsed.ChannelClaimId,
amountOrder: parsed.PrimaryBidPosition,
}
stream = &PathSegment{
name: parsed.StreamName,
claimId: parsed.StreamClaimId,
amountOrder: parsed.SecondaryBidPosition,
}
} else if strings.Compare(parsed.ClaimName, "") != 0 {
channel = &PathSegment{
name: parsed.ClaimName,
claimId: parsed.ChannelClaimId,
amountOrder: parsed.PrimaryBidPosition,
}
} else if strings.Compare(parsed.StreamName, "") != 0 {
stream = &PathSegment{
name: parsed.StreamName,
claimId: parsed.StreamClaimId,
amountOrder: parsed.SecondaryBidPosition,
}
}
log.Printf("channel: %#v\n", channel)
log.Printf("stream: %#v\n", stream)
var resolvedChannel *ResolveResult = nil
var resolvedStream *ResolveResult = nil
if channel != nil {
resolvedChannel, err = ResolveParsedUrl(db, channel)
if err != nil {
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
} else if resolvedChannel == nil {
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{fmt.Errorf("could not find channel in \"%s\"", url)},
}
return res
}
}
log.Printf("resolvedChannel: %#v\n", resolvedChannel)
log.Printf("resolvedChannel.TxHash: %s\n", hex.EncodeToString(resolvedChannel.TxHash))
log.Printf("resolvedChannel.ClaimHash: %s\n", hex.EncodeToString(resolvedChannel.ClaimHash))
log.Printf("resolvedChannel.ChannelHash: %s\n", hex.EncodeToString(resolvedChannel.ChannelHash))
log.Printf("stream %#v\n", stream)
if stream != nil {
if resolvedChannel != nil {
streamClaim, err := ResolveClaimInChannel(db, resolvedChannel.ClaimHash, stream.Normalized())
log.Printf("streamClaim %#v\n", streamClaim)
if streamClaim != nil {
log.Printf("streamClaim.ClaimHash: %s\n", hex.EncodeToString(streamClaim.ClaimHash))
log.Printf("streamClaim.ChannelHash: %s\n", hex.EncodeToString(streamClaim.ChannelHash))
}
// TODO: Confirm error case
if err != nil {
res.Stream = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
if streamClaim != nil {
resolvedStream, err = FsGetClaimByHash(db, streamClaim.ClaimHash)
// TODO: Confirm error case
if err != nil {
res.Stream = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
}
} else {
resolvedStream, err = ResolveParsedUrl(db, stream)
// TODO: Confirm error case
if err != nil {
res.Stream = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
if channel == nil && resolvedChannel == nil && resolvedStream != nil && len(resolvedStream.ChannelHash) > 0 {
resolvedChannel, err = FsGetClaimByHash(db, resolvedStream.ChannelHash)
// TODO: Confirm error case
if err != nil {
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
}
}
if resolvedStream == nil {
res.Stream = &optionalResolveResultOrError{
err: &ResolveError{fmt.Errorf("could not find stream in \"%s\"", url)},
}
return res
}
}
// Getting blockers and filters
var repost *ResolveResult = nil
var repostedChannel *ResolveResult = nil
log.Printf("about to get blockers and filters: %#v, %#v\n", resolvedChannel, resolvedStream)
if resolvedStream != nil || resolvedChannel != nil {
var claim *ResolveResult = nil
var claimHash []byte = nil
var respostedClaimHash []byte = nil
var blockerHash []byte = nil
if resolvedStream != nil {
claim = resolvedStream
claimHash = resolvedStream.ClaimHash
respostedClaimHash = resolvedStream.RepostedClaimHash
} else {
claim = resolvedChannel
claimHash = resolvedChannel.ClaimHash
}
blockerHash, _, err = GetBlockerHash(db, claimHash, respostedClaimHash, claim.ChannelHash)
log.Printf("blockerHash: %s\n", hex.EncodeToString(blockerHash))
if err != nil {
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
if blockerHash != nil {
reasonRow, err := FsGetClaimByHash(db, blockerHash)
if err != nil {
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{fmt.Errorf("%s, %v, %v", url, blockerHash, reasonRow)},
}
return res
}
if claim.RepostedClaimHash != nil {
repost, err = FsGetClaimByHash(db, claim.RepostedClaimHash)
if err != nil {
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
if repost != nil && repost.ChannelHash != nil && repost.SignatureValid {
repostedChannel, err = FsGetClaimByHash(db, repost.ChannelHash)
if err != nil {
res.Channel = &optionalResolveResultOrError{
err: &ResolveError{err},
}
return res
}
}
}
}
res.Channel = &optionalResolveResultOrError{
res: resolvedChannel,
}
res.Stream = &optionalResolveResultOrError{
res: resolvedStream,
}
res.Repost = &optionalResolveResultOrError{
res: repost,
}
res.RepostedChannel = &optionalResolveResultOrError{
res: repostedChannel,
}
log.Printf("parsed: %+v\n", parsed)
return res
}