2018-03-07 02:15:44 +01:00
|
|
|
package dht
|
|
|
|
|
|
|
|
import (
|
2018-05-19 19:05:30 +02:00
|
|
|
"encoding/json"
|
2018-03-24 00:18:00 +01:00
|
|
|
"fmt"
|
2018-03-09 01:50:18 +01:00
|
|
|
"net"
|
2018-03-07 02:15:44 +01:00
|
|
|
"sort"
|
2018-05-19 19:05:30 +02:00
|
|
|
"strconv"
|
2018-03-24 00:18:00 +01:00
|
|
|
"strings"
|
|
|
|
"sync"
|
2018-05-13 22:02:46 +02:00
|
|
|
"time"
|
2018-03-09 01:50:18 +01:00
|
|
|
|
2018-05-24 23:49:43 +02:00
|
|
|
"github.com/lbryio/lbry.go/errors"
|
2018-06-25 22:49:40 +02:00
|
|
|
"github.com/lbryio/lbry.go/stop"
|
2018-06-14 17:48:02 +02:00
|
|
|
"github.com/lbryio/reflector.go/dht/bits"
|
2018-06-26 19:42:36 +02:00
|
|
|
"math/big"
|
2018-03-07 02:15:44 +01:00
|
|
|
)
|
|
|
|
|
2018-05-22 18:16:01 +02:00
|
|
|
// TODO: if routing table is ever empty (aka the node is isolated), it should re-bootstrap
|
|
|
|
|
|
|
|
// TODO: use a tree with bucket splitting instead of a fixed bucket list. include jack's optimization (see link in commit mesg)
|
|
|
|
// https://github.com/lbryio/lbry/pull/1211/commits/341b27b6d21ac027671d42458826d02735aaae41
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// peer is a contact with extra freshness information
|
|
|
|
type peer struct {
|
2018-05-19 19:05:30 +02:00
|
|
|
Contact Contact
|
|
|
|
LastActivity time.Time
|
|
|
|
NumFailures int
|
2018-05-13 22:02:46 +02:00
|
|
|
//<lastPublished>,
|
|
|
|
//<originallyPublished>
|
|
|
|
// <originalPublisherID>
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
func (p *peer) Touch() {
|
2018-05-19 19:05:30 +02:00
|
|
|
p.LastActivity = time.Now()
|
|
|
|
p.NumFailures = 0
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// ActiveSince returns whether a peer has responded in the last `d` duration
|
|
|
|
// this is used to check if the peer is "good", meaning that we believe the peer will respond to our requests
|
|
|
|
func (p *peer) ActiveInLast(d time.Duration) bool {
|
2018-06-19 19:47:13 +02:00
|
|
|
return time.Since(p.LastActivity) < d
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
2018-03-24 00:18:00 +01:00
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// IsBad returns whether a peer is "bad", meaning that it has failed to respond to multiple pings in a row
|
|
|
|
func (p *peer) IsBad(maxFalures int) bool {
|
2018-05-19 19:05:30 +02:00
|
|
|
return p.NumFailures >= maxFalures
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fail marks a peer as having failed to respond. It returns whether or not the peer should be removed from the routing table
|
|
|
|
func (p *peer) Fail() {
|
2018-05-19 19:05:30 +02:00
|
|
|
p.NumFailures++
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type bucket struct {
|
2018-06-26 19:42:36 +02:00
|
|
|
lock *sync.RWMutex
|
|
|
|
peers []peer
|
|
|
|
lastUpdate time.Time
|
|
|
|
bucketRange *bits.Range
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Len returns the number of peers in the bucket
|
|
|
|
func (b bucket) Len() int {
|
|
|
|
b.lock.RLock()
|
|
|
|
defer b.lock.RUnlock()
|
2018-05-19 19:05:30 +02:00
|
|
|
return len(b.peers)
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Contacts returns a slice of the bucket's contacts
|
|
|
|
func (b bucket) Contacts() []Contact {
|
|
|
|
b.lock.RLock()
|
|
|
|
defer b.lock.RUnlock()
|
2018-05-19 19:05:30 +02:00
|
|
|
contacts := make([]Contact, len(b.peers))
|
|
|
|
for i := range b.peers {
|
|
|
|
contacts[i] = b.peers[i].Contact
|
2018-03-24 00:18:00 +01:00
|
|
|
}
|
2018-05-13 22:02:46 +02:00
|
|
|
return contacts
|
2018-03-24 00:18:00 +01:00
|
|
|
}
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// UpdateContact marks a contact as having been successfully contacted. if insertIfNew and the contact is does not exist yet, it is inserted
|
|
|
|
func (b *bucket) UpdateContact(c Contact, insertIfNew bool) {
|
|
|
|
b.lock.Lock()
|
|
|
|
defer b.lock.Unlock()
|
|
|
|
|
2018-06-26 19:42:36 +02:00
|
|
|
// TODO: verify the peer is in the bucket key range
|
|
|
|
|
2018-05-19 19:05:30 +02:00
|
|
|
peerIndex := find(c.ID, b.peers)
|
|
|
|
if peerIndex >= 0 {
|
2018-05-13 22:02:46 +02:00
|
|
|
b.lastUpdate = time.Now()
|
2018-05-19 19:05:30 +02:00
|
|
|
b.peers[peerIndex].Touch()
|
|
|
|
moveToBack(b.peers, peerIndex)
|
2018-05-13 22:02:46 +02:00
|
|
|
|
|
|
|
} else if insertIfNew {
|
|
|
|
hasRoom := true
|
|
|
|
|
2018-05-19 19:05:30 +02:00
|
|
|
if len(b.peers) >= bucketSize {
|
2018-05-13 22:02:46 +02:00
|
|
|
hasRoom = false
|
2018-05-19 19:05:30 +02:00
|
|
|
for i := range b.peers {
|
|
|
|
if b.peers[i].IsBad(maxPeerFails) {
|
2018-05-13 22:02:46 +02:00
|
|
|
// TODO: Ping contact first. Only remove if it does not respond
|
2018-05-19 19:05:30 +02:00
|
|
|
b.peers = append(b.peers[:i], b.peers[i+1:]...)
|
2018-05-13 22:02:46 +02:00
|
|
|
hasRoom = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if hasRoom {
|
|
|
|
b.lastUpdate = time.Now()
|
2018-05-19 19:05:30 +02:00
|
|
|
peer := peer{Contact: c}
|
2018-05-13 22:02:46 +02:00
|
|
|
peer.Touch()
|
2018-05-19 19:05:30 +02:00
|
|
|
b.peers = append(b.peers, peer)
|
2018-04-05 22:05:28 +02:00
|
|
|
}
|
|
|
|
}
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
2018-04-05 22:05:28 +02:00
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// FailContact marks a contact as having failed, and removes it if it failed too many times
|
2018-06-14 17:48:02 +02:00
|
|
|
func (b *bucket) FailContact(id bits.Bitmap) {
|
2018-05-13 22:02:46 +02:00
|
|
|
b.lock.Lock()
|
|
|
|
defer b.lock.Unlock()
|
2018-05-19 19:05:30 +02:00
|
|
|
i := find(id, b.peers)
|
|
|
|
if i >= 0 {
|
2018-05-13 22:02:46 +02:00
|
|
|
// BEP5 says not to remove the contact until the bucket is full and you try to insert
|
2018-05-19 19:05:30 +02:00
|
|
|
b.peers[i].Fail()
|
2018-04-05 22:05:28 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// find returns the contact in the bucket, or nil if the bucket does not contain the contact
|
2018-06-14 17:48:02 +02:00
|
|
|
func find(id bits.Bitmap, peers []peer) int {
|
2018-05-19 19:05:30 +02:00
|
|
|
for i := range peers {
|
|
|
|
if peers[i].Contact.ID.Equals(id) {
|
|
|
|
return i
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
2018-04-25 00:12:17 +02:00
|
|
|
}
|
2018-05-19 19:05:30 +02:00
|
|
|
return -1
|
2018-04-25 00:12:17 +02:00
|
|
|
}
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// NeedsRefresh returns true if bucket has not been updated in the last `refreshInterval`, false otherwise
|
|
|
|
func (b *bucket) NeedsRefresh(refreshInterval time.Duration) bool {
|
|
|
|
b.lock.RLock()
|
|
|
|
defer b.lock.RUnlock()
|
2018-05-30 03:38:55 +02:00
|
|
|
return time.Since(b.lastUpdate) > refreshInterval
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
2018-05-19 19:05:30 +02:00
|
|
|
type routingTable struct {
|
2018-06-14 17:48:02 +02:00
|
|
|
id bits.Bitmap
|
2018-06-26 19:42:36 +02:00
|
|
|
buckets []bucket
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
2018-06-14 17:48:02 +02:00
|
|
|
func newRoutingTable(id bits.Bitmap) *routingTable {
|
2018-05-19 19:05:30 +02:00
|
|
|
var rt routingTable
|
2018-05-13 22:02:46 +02:00
|
|
|
rt.id = id
|
2018-06-19 20:06:35 +02:00
|
|
|
rt.reset()
|
|
|
|
return &rt
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rt *routingTable) reset() {
|
2018-06-26 19:42:36 +02:00
|
|
|
start := big.NewInt(0)
|
|
|
|
end := big.NewInt(1)
|
|
|
|
end.Lsh(end, bits.NumBits)
|
|
|
|
end.Sub(end, big.NewInt(1))
|
|
|
|
rt.buckets = []bucket{}
|
|
|
|
rt.buckets = append(rt.buckets, bucket{
|
|
|
|
peers: make([]peer, 0, bucketSize),
|
|
|
|
lock: &sync.RWMutex{},
|
|
|
|
bucketRange: &bits.Range{
|
|
|
|
Start: bits.FromBigP(start),
|
|
|
|
End: bits.FromBigP(end),
|
|
|
|
},
|
|
|
|
})
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
|
|
|
|
2018-05-19 19:05:30 +02:00
|
|
|
func (rt *routingTable) BucketInfo() string {
|
2018-05-13 22:02:46 +02:00
|
|
|
var bucketInfo []string
|
|
|
|
for i, b := range rt.buckets {
|
|
|
|
if b.Len() > 0 {
|
|
|
|
contacts := b.Contacts()
|
|
|
|
s := make([]string, len(contacts))
|
|
|
|
for j, c := range contacts {
|
2018-05-19 19:05:30 +02:00
|
|
|
s[j] = c.ID.HexShort()
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
bucketInfo = append(bucketInfo, fmt.Sprintf("Bucket %d: (%d) %s", i, len(contacts), strings.Join(s, ", ")))
|
|
|
|
}
|
2018-03-24 00:18:00 +01:00
|
|
|
}
|
2018-05-13 22:02:46 +02:00
|
|
|
if len(bucketInfo) == 0 {
|
|
|
|
return "buckets are empty"
|
|
|
|
}
|
|
|
|
return strings.Join(bucketInfo, "\n")
|
2018-03-24 00:18:00 +01:00
|
|
|
}
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// Update inserts or refreshes a contact
|
2018-05-19 19:05:30 +02:00
|
|
|
func (rt *routingTable) Update(c Contact) {
|
2018-06-26 19:42:36 +02:00
|
|
|
rt.insertContact(c)
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fresh refreshes a contact if its already in the routing table
|
2018-05-19 19:05:30 +02:00
|
|
|
func (rt *routingTable) Fresh(c Contact) {
|
|
|
|
rt.bucketFor(c.ID).UpdateContact(c, false)
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// FailContact marks a contact as having failed, and removes it if it failed too many times
|
2018-05-19 19:05:30 +02:00
|
|
|
func (rt *routingTable) Fail(c Contact) {
|
|
|
|
rt.bucketFor(c.ID).FailContact(c.ID)
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
2018-03-24 00:18:00 +01:00
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// GetClosest returns the closest `limit` contacts from the routing table
|
|
|
|
// It marks each bucket it accesses as having been accessed
|
2018-06-14 17:48:02 +02:00
|
|
|
func (rt *routingTable) GetClosest(target bits.Bitmap, limit int) []Contact {
|
2018-06-26 19:42:36 +02:00
|
|
|
toSort := []sortedContact{}
|
|
|
|
for _, b := range rt.buckets {
|
|
|
|
toSort = appendContacts(toSort, b, target)
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
|
|
|
sort.Sort(byXorDistance(toSort))
|
2018-06-26 19:42:36 +02:00
|
|
|
contacts := []Contact{}
|
2018-04-28 02:16:12 +02:00
|
|
|
for _, sorted := range toSort {
|
|
|
|
contacts = append(contacts, sorted.contact)
|
|
|
|
if len(contacts) >= limit {
|
2018-03-24 00:18:00 +01:00
|
|
|
break
|
|
|
|
}
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
2018-04-28 02:16:12 +02:00
|
|
|
return contacts
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
|
|
|
|
2018-06-14 17:48:02 +02:00
|
|
|
func appendContacts(contacts []sortedContact, b bucket, target bits.Bitmap) []sortedContact {
|
2018-05-13 22:02:46 +02:00
|
|
|
for _, contact := range b.Contacts() {
|
2018-05-19 19:05:30 +02:00
|
|
|
contacts = append(contacts, sortedContact{contact, contact.ID.Xor(target)})
|
2018-05-01 22:18:38 +02:00
|
|
|
}
|
|
|
|
return contacts
|
|
|
|
}
|
|
|
|
|
2018-04-28 02:16:12 +02:00
|
|
|
// Count returns the number of contacts in the routing table
|
2018-05-19 19:05:30 +02:00
|
|
|
func (rt *routingTable) Count() int {
|
2018-04-25 03:12:32 +02:00
|
|
|
count := 0
|
|
|
|
for _, bucket := range rt.buckets {
|
2018-06-21 21:05:45 +02:00
|
|
|
count += bucket.Len()
|
2018-04-25 03:12:32 +02:00
|
|
|
}
|
|
|
|
return count
|
|
|
|
}
|
|
|
|
|
2018-05-19 19:05:30 +02:00
|
|
|
// BucketRanges returns a slice of ranges, where the `start` of each range is the smallest id that can
|
|
|
|
// go in that bucket, and the `end` is the largest id
|
2018-06-19 19:47:13 +02:00
|
|
|
func (rt *routingTable) BucketRanges() []bits.Range {
|
|
|
|
ranges := make([]bits.Range, len(rt.buckets))
|
2018-06-26 19:42:36 +02:00
|
|
|
for i, b := range rt.buckets {
|
|
|
|
ranges[i] = *b.bucketRange
|
2018-05-19 19:05:30 +02:00
|
|
|
}
|
|
|
|
return ranges
|
|
|
|
}
|
|
|
|
|
2018-06-14 17:48:02 +02:00
|
|
|
func (rt *routingTable) bucketNumFor(target bits.Bitmap) int {
|
2018-05-01 22:18:38 +02:00
|
|
|
if rt.id.Equals(target) {
|
|
|
|
panic("routing table does not have a bucket for its own id")
|
|
|
|
}
|
2018-06-26 19:42:36 +02:00
|
|
|
distance := target.Xor(rt.id)
|
|
|
|
for i, b := range rt.buckets {
|
|
|
|
if b.bucketRange.Start.Cmp(distance) <= 0 && b.bucketRange.End.Cmp(distance) >= 0 {
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
panic("target value overflows the key space")
|
2018-05-01 22:18:38 +02:00
|
|
|
}
|
|
|
|
|
2018-06-14 17:48:02 +02:00
|
|
|
func (rt *routingTable) bucketFor(target bits.Bitmap) *bucket {
|
2018-05-13 22:02:46 +02:00
|
|
|
return &rt.buckets[rt.bucketNumFor(target)]
|
|
|
|
}
|
|
|
|
|
2018-06-26 19:42:36 +02:00
|
|
|
func (rt *routingTable) shouldSplit(target bits.Bitmap) bool {
|
|
|
|
bucketIndex := rt.bucketNumFor(target)
|
|
|
|
if len(rt.buckets[bucketIndex].peers) >= bucketSize {
|
|
|
|
if bucketIndex == 0 { // this is the bucket covering our node id
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
kClosest := rt.GetClosest(rt.id, bucketSize)
|
|
|
|
kthClosest := kClosest[len(kClosest) - 1]
|
|
|
|
if target.Xor(rt.id).Cmp(kthClosest.ID.Xor(rt.id)) < 0 {
|
|
|
|
return true // the kth closest contact is further than this one
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rt *routingTable) insertContact(c Contact) {
|
|
|
|
if len(rt.buckets[rt.bucketNumFor(c.ID)].peers) < bucketSize {
|
|
|
|
rt.buckets[rt.bucketNumFor(c.ID)].UpdateContact(c, true)
|
|
|
|
} else if rt.shouldSplit(c.ID) {
|
|
|
|
rt.recursiveInsertContact(c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rt *routingTable) recursiveInsertContact(c Contact) {
|
|
|
|
bucketIndex := rt.bucketNumFor(c.ID)
|
|
|
|
b := rt.buckets[bucketIndex]
|
|
|
|
min := b.bucketRange.Start.Big()
|
|
|
|
max := b.bucketRange.End.Big()
|
|
|
|
|
|
|
|
midpoint := max.Sub(max, min)
|
|
|
|
midpoint.Div(midpoint, big.NewInt(2))
|
|
|
|
|
|
|
|
// re-size the bucket to be split
|
|
|
|
b.bucketRange.Start = bits.FromBigP(min)
|
|
|
|
b.bucketRange.End = bits.FromBigP(midpoint.Sub(midpoint, big.NewInt(1)))
|
|
|
|
|
|
|
|
movedPeers := []peer{}
|
|
|
|
resizedPeers := []peer{}
|
|
|
|
|
|
|
|
// set the re-sized bucket to only have peers still in range
|
|
|
|
for _, p := range b.peers {
|
|
|
|
if rt.bucketNumFor(p.Contact.ID) != bucketIndex {
|
|
|
|
movedPeers = append(movedPeers, p)
|
|
|
|
} else {
|
|
|
|
resizedPeers = append(resizedPeers, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.peers = resizedPeers
|
|
|
|
|
|
|
|
// add the new bucket
|
|
|
|
insert := bucket{
|
|
|
|
peers: make([]peer, 0, bucketSize),
|
|
|
|
lock: &sync.RWMutex{},
|
|
|
|
bucketRange: &bits.Range{
|
|
|
|
Start: bits.FromBigP(midpoint),
|
|
|
|
End: bits.FromBigP(max),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
rt.buckets = append(rt.buckets[:bucketIndex], append([]bucket{insert}, rt.buckets[bucketIndex:]...)...)
|
|
|
|
|
|
|
|
// re-insert the contacts that where out of range of the split bucket
|
|
|
|
for _, p := range movedPeers {
|
|
|
|
rt.insertContact(p.Contact)
|
|
|
|
}
|
|
|
|
|
|
|
|
// insert the new contact
|
|
|
|
rt.insertContact(c)
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-06-14 17:48:02 +02:00
|
|
|
func (rt *routingTable) GetIDsForRefresh(refreshInterval time.Duration) []bits.Bitmap {
|
|
|
|
var bitmaps []bits.Bitmap
|
2018-05-13 22:02:46 +02:00
|
|
|
for i, bucket := range rt.buckets {
|
|
|
|
if bucket.NeedsRefresh(refreshInterval) {
|
2018-06-14 17:48:02 +02:00
|
|
|
bitmaps = append(bitmaps, bits.Rand().Prefix(i, false))
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
|
|
|
}
|
2018-05-13 22:02:46 +02:00
|
|
|
return bitmaps
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
|
|
|
|
2018-05-19 19:05:30 +02:00
|
|
|
const rtContactSep = "-"
|
|
|
|
|
|
|
|
type rtSave struct {
|
|
|
|
ID string `json:"id"`
|
|
|
|
Contacts []string `json:"contacts"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rt *routingTable) MarshalJSON() ([]byte, error) {
|
|
|
|
var data rtSave
|
|
|
|
data.ID = rt.id.Hex()
|
|
|
|
for _, b := range rt.buckets {
|
|
|
|
for _, c := range b.Contacts() {
|
|
|
|
data.Contacts = append(data.Contacts, strings.Join([]string{c.ID.Hex(), c.IP.String(), strconv.Itoa(c.Port)}, rtContactSep))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return json.Marshal(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rt *routingTable) UnmarshalJSON(b []byte) error {
|
|
|
|
var data rtSave
|
|
|
|
err := json.Unmarshal(b, &data)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-06-14 17:48:02 +02:00
|
|
|
rt.id, err = bits.FromHex(data.ID)
|
2018-05-19 19:05:30 +02:00
|
|
|
if err != nil {
|
|
|
|
return errors.Prefix("decoding ID", err)
|
|
|
|
}
|
2018-06-19 20:06:35 +02:00
|
|
|
rt.reset()
|
2018-05-19 19:05:30 +02:00
|
|
|
|
|
|
|
for _, s := range data.Contacts {
|
|
|
|
parts := strings.Split(s, rtContactSep)
|
|
|
|
if len(parts) != 3 {
|
|
|
|
return errors.Err("decoding contact %s: wrong number of parts", s)
|
|
|
|
}
|
|
|
|
var c Contact
|
2018-06-14 17:48:02 +02:00
|
|
|
c.ID, err = bits.FromHex(parts[0])
|
2018-05-19 19:05:30 +02:00
|
|
|
if err != nil {
|
|
|
|
return errors.Err("decoding contact %s: invalid ID: %s", s, err)
|
|
|
|
}
|
|
|
|
c.IP = net.ParseIP(parts[1])
|
|
|
|
if c.IP == nil {
|
|
|
|
return errors.Err("decoding contact %s: invalid IP", s)
|
|
|
|
}
|
|
|
|
c.Port, err = strconv.Atoi(parts[2])
|
|
|
|
if err != nil {
|
|
|
|
return errors.Err("decoding contact %s: invalid port: %s", s, err)
|
|
|
|
}
|
|
|
|
rt.Update(c)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// RoutingTableRefresh refreshes any buckets that need to be refreshed
|
2018-06-25 22:49:40 +02:00
|
|
|
func RoutingTableRefresh(n *Node, refreshInterval time.Duration, parentGrp *stop.Group) {
|
|
|
|
done := stop.New()
|
2018-05-13 22:02:46 +02:00
|
|
|
|
|
|
|
for _, id := range n.rt.GetIDsForRefresh(refreshInterval) {
|
2018-06-13 18:45:47 +02:00
|
|
|
done.Add(1)
|
2018-06-14 17:48:02 +02:00
|
|
|
go func(id bits.Bitmap) {
|
2018-06-13 18:45:47 +02:00
|
|
|
defer done.Done()
|
2018-06-25 22:49:40 +02:00
|
|
|
_, _, err := FindContacts(n, id, false, parentGrp)
|
2018-06-13 18:45:47 +02:00
|
|
|
if err != nil {
|
2018-05-30 03:38:55 +02:00
|
|
|
log.Error("error finding contact during routing table refresh - ", err)
|
|
|
|
}
|
2018-05-13 22:02:46 +02:00
|
|
|
}(id)
|
|
|
|
}
|
|
|
|
|
2018-06-13 18:45:47 +02:00
|
|
|
done.Wait()
|
|
|
|
done.Stop()
|
2018-03-24 00:18:00 +01:00
|
|
|
}
|
2018-05-19 19:05:30 +02:00
|
|
|
|
|
|
|
func moveToBack(peers []peer, index int) {
|
|
|
|
if index < 0 || len(peers) <= index+1 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
p := peers[index]
|
|
|
|
for i := index; i < len(peers)-1; i++ {
|
|
|
|
peers[i] = peers[i+1]
|
|
|
|
}
|
|
|
|
peers[len(peers)-1] = p
|
|
|
|
}
|