fix anti-throttling
fix duplicates with multi-channels
This commit is contained in:
parent
bb76500f04
commit
fe55304184
5 changed files with 29 additions and 10 deletions
|
@ -104,7 +104,7 @@ services:
|
||||||
## Chainquery ##
|
## Chainquery ##
|
||||||
################
|
################
|
||||||
chainquery:
|
chainquery:
|
||||||
image: lbry/chainquery:master
|
image: lbry/chainquery:v2.0.8
|
||||||
restart: "no"
|
restart: "no"
|
||||||
ports:
|
ports:
|
||||||
- 6300:6300
|
- 6300:6300
|
||||||
|
|
|
@ -47,13 +47,13 @@ func GetIPPool() (*IPPool, error) {
|
||||||
if ipnet.IP.To16() != nil && govalidator.IsIPv6(ipnet.IP.String()) && !ipv6Added {
|
if ipnet.IP.To16() != nil && govalidator.IsIPv6(ipnet.IP.String()) && !ipv6Added {
|
||||||
pool = append(pool, throttledIP{
|
pool = append(pool, throttledIP{
|
||||||
IP: ipnet.IP.String(),
|
IP: ipnet.IP.String(),
|
||||||
LastUse: time.Time{},
|
LastUse: time.Now().Add(-5 * time.Minute),
|
||||||
})
|
})
|
||||||
ipv6Added = true
|
ipv6Added = true
|
||||||
} else if ipnet.IP.To4() != nil && govalidator.IsIPv4(ipnet.IP.String()) {
|
} else if ipnet.IP.To4() != nil && govalidator.IsIPv4(ipnet.IP.String()) {
|
||||||
pool = append(pool, throttledIP{
|
pool = append(pool, throttledIP{
|
||||||
IP: ipnet.IP.String(),
|
IP: ipnet.IP.String(),
|
||||||
LastUse: time.Time{},
|
LastUse: time.Now().Add(-5 * time.Minute),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -144,7 +144,7 @@ func (i *IPPool) nextIP() (*throttledIP, error) {
|
||||||
defer i.lock.Unlock()
|
defer i.lock.Unlock()
|
||||||
|
|
||||||
sort.Slice(i.ips, func(j, k int) bool {
|
sort.Slice(i.ips, func(j, k int) bool {
|
||||||
return i.ips[j].LastUse.Before(i.ips[j].LastUse)
|
return i.ips[j].LastUse.Before(i.ips[k].LastUse)
|
||||||
})
|
})
|
||||||
|
|
||||||
if !AllThrottled(i.ips) {
|
if !AllThrottled(i.ips) {
|
||||||
|
|
|
@ -9,14 +9,28 @@ func TestAll(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
ip, err := pool.GetIP()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Log(ip)
|
||||||
|
pool.ReleaseIP(ip)
|
||||||
|
ip2, err := pool.GetIP()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if ip == ip2 && len(pool.ips) > 1 {
|
||||||
|
t.Fatalf("the same IP was returned twice! %s, %s", ip, ip2)
|
||||||
|
}
|
||||||
|
t.Log(ip2)
|
||||||
|
pool.ReleaseIP(ip2)
|
||||||
|
|
||||||
for range pool.ips {
|
for range pool.ips {
|
||||||
ip, err := pool.GetIP()
|
_, err = pool.GetIP()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
t.Log(ip)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
next, err := pool.nextIP()
|
next, err := pool.nextIP()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("%s", err.Error())
|
t.Logf("%s", err.Error())
|
||||||
|
|
|
@ -192,7 +192,7 @@ func transferVideos(s *Sync) error {
|
||||||
|
|
||||||
var stream *jsonrpc.Claim = nil
|
var stream *jsonrpc.Claim = nil
|
||||||
for _, c := range streams.Items {
|
for _, c := range streams.Items {
|
||||||
if c.ClaimID != video.ClaimID {
|
if c.ClaimID != video.ClaimID || (c.SigningChannel != nil && c.SigningChannel.ClaimID != s.lbryChannelID) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
stream = &c
|
stream = &c
|
||||||
|
|
|
@ -675,8 +675,13 @@ func (s *Sync) getClaims(defaultOnly bool) ([]jsonrpc.Claim, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Prefix("cannot list claims", err)
|
return nil, errors.Prefix("cannot list claims", err)
|
||||||
}
|
}
|
||||||
|
items := make([]jsonrpc.Claim, 0, len(claims.Items))
|
||||||
return claims.Items, nil
|
for _, c := range claims.Items {
|
||||||
|
if c.SigningChannel != nil && c.SigningChannel.ClaimID == s.lbryChannelID {
|
||||||
|
items = append(items, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return items, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Sync) checkIntegrity() error {
|
func (s *Sync) checkIntegrity() error {
|
||||||
|
|
Loading…
Reference in a new issue