2018-03-07 02:15:44 +01:00
|
|
|
package dht
|
|
|
|
|
|
|
|
import (
|
2018-05-19 19:05:30 +02:00
|
|
|
"encoding/json"
|
2018-03-24 00:18:00 +01:00
|
|
|
"fmt"
|
2018-03-09 01:50:18 +01:00
|
|
|
"net"
|
2018-05-19 19:05:30 +02:00
|
|
|
"strconv"
|
2018-03-24 00:18:00 +01:00
|
|
|
"strings"
|
|
|
|
"sync"
|
2018-05-13 22:02:46 +02:00
|
|
|
"time"
|
2018-07-10 23:30:47 +02:00
|
|
|
|
2021-10-06 20:00:59 +02:00
|
|
|
"github.com/lbryio/lbry.go/v3/dht/bits"
|
|
|
|
"github.com/lbryio/lbry.go/v3/extras/stop"
|
|
|
|
|
|
|
|
"github.com/cockroachdb/errors"
|
2018-03-07 02:15:44 +01:00
|
|
|
)
|
|
|
|
|
2018-05-22 18:16:01 +02:00
|
|
|
// TODO: if routing table is ever empty (aka the node is isolated), it should re-bootstrap
|
|
|
|
|
|
|
|
// TODO: use a tree with bucket splitting instead of a fixed bucket list. include jack's optimization (see link in commit mesg)
|
|
|
|
// https://github.com/lbryio/lbry/pull/1211/commits/341b27b6d21ac027671d42458826d02735aaae41
|
|
|
|
|
2018-07-12 20:34:24 +02:00
|
|
|
// peer is a contact with extra information
|
2018-05-13 22:02:46 +02:00
|
|
|
type peer struct {
|
2018-05-19 19:05:30 +02:00
|
|
|
Contact Contact
|
2018-07-12 20:34:24 +02:00
|
|
|
Distance bits.Bitmap
|
2018-05-19 19:05:30 +02:00
|
|
|
LastActivity time.Time
|
2018-06-28 00:09:10 +02:00
|
|
|
// LastReplied time.Time
|
|
|
|
// LastRequested time.Time
|
|
|
|
// LastFailure time.Time
|
|
|
|
// SecondLastFailure time.Time
|
2018-07-10 23:30:47 +02:00
|
|
|
NumFailures int
|
2018-06-28 00:09:10 +02:00
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
//<lastPublished>,
|
|
|
|
//<originallyPublished>
|
|
|
|
// <originalPublisherID>
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
func (p *peer) Touch() {
|
2018-05-19 19:05:30 +02:00
|
|
|
p.LastActivity = time.Now()
|
|
|
|
p.NumFailures = 0
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
|
|
|
|
2021-10-06 20:00:59 +02:00
|
|
|
// ActiveInLast returns whether a peer has responded in the last `d` duration
|
2018-05-13 22:02:46 +02:00
|
|
|
// this is used to check if the peer is "good", meaning that we believe the peer will respond to our requests
|
|
|
|
func (p *peer) ActiveInLast(d time.Duration) bool {
|
2018-06-19 19:47:13 +02:00
|
|
|
return time.Since(p.LastActivity) < d
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
2018-03-24 00:18:00 +01:00
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// IsBad returns whether a peer is "bad", meaning that it has failed to respond to multiple pings in a row
|
|
|
|
func (p *peer) IsBad(maxFalures int) bool {
|
2018-05-19 19:05:30 +02:00
|
|
|
return p.NumFailures >= maxFalures
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fail marks a peer as having failed to respond. It returns whether or not the peer should be removed from the routing table
|
|
|
|
func (p *peer) Fail() {
|
2018-05-19 19:05:30 +02:00
|
|
|
p.NumFailures++
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type bucket struct {
|
2018-07-10 23:30:47 +02:00
|
|
|
lock *sync.RWMutex
|
|
|
|
peers []peer
|
|
|
|
lastUpdate time.Time
|
|
|
|
Range bits.Range // capitalized because `range` is a keyword
|
|
|
|
}
|
|
|
|
|
|
|
|
func newBucket(r bits.Range) *bucket {
|
|
|
|
return &bucket{
|
|
|
|
peers: make([]peer, 0, bucketSize),
|
|
|
|
lock: &sync.RWMutex{},
|
|
|
|
Range: r,
|
|
|
|
}
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Len returns the number of peers in the bucket
|
|
|
|
func (b bucket) Len() int {
|
|
|
|
b.lock.RLock()
|
|
|
|
defer b.lock.RUnlock()
|
2018-05-19 19:05:30 +02:00
|
|
|
return len(b.peers)
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
2018-07-12 20:34:24 +02:00
|
|
|
func (b bucket) Has(c Contact) bool {
|
2018-07-10 23:30:47 +02:00
|
|
|
b.lock.RLock()
|
|
|
|
defer b.lock.RUnlock()
|
|
|
|
for _, p := range b.peers {
|
|
|
|
if p.Contact.Equals(c, true) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// Contacts returns a slice of the bucket's contacts
|
|
|
|
func (b bucket) Contacts() []Contact {
|
|
|
|
b.lock.RLock()
|
|
|
|
defer b.lock.RUnlock()
|
2018-05-19 19:05:30 +02:00
|
|
|
contacts := make([]Contact, len(b.peers))
|
|
|
|
for i := range b.peers {
|
|
|
|
contacts[i] = b.peers[i].Contact
|
2018-03-24 00:18:00 +01:00
|
|
|
}
|
2018-05-13 22:02:46 +02:00
|
|
|
return contacts
|
2018-03-24 00:18:00 +01:00
|
|
|
}
|
|
|
|
|
2018-07-12 20:34:24 +02:00
|
|
|
// UpdatePeer marks a contact as having been successfully contacted. if insertIfNew and the contact is does not exist yet, it is inserted
|
|
|
|
func (b *bucket) UpdatePeer(p peer, insertIfNew bool) error {
|
2018-05-13 22:02:46 +02:00
|
|
|
b.lock.Lock()
|
|
|
|
defer b.lock.Unlock()
|
|
|
|
|
2018-07-12 20:34:24 +02:00
|
|
|
if !b.Range.Contains(p.Distance) {
|
2021-10-06 20:00:59 +02:00
|
|
|
return errors.WithStack(errors.New("this bucket range does not cover this peer"))
|
2018-07-12 20:34:24 +02:00
|
|
|
}
|
2018-06-26 19:42:36 +02:00
|
|
|
|
2018-07-12 20:34:24 +02:00
|
|
|
peerIndex := find(p.Contact.ID, b.peers)
|
2018-05-19 19:05:30 +02:00
|
|
|
if peerIndex >= 0 {
|
2018-05-13 22:02:46 +02:00
|
|
|
b.lastUpdate = time.Now()
|
2018-05-19 19:05:30 +02:00
|
|
|
b.peers[peerIndex].Touch()
|
|
|
|
moveToBack(b.peers, peerIndex)
|
2018-05-13 22:02:46 +02:00
|
|
|
} else if insertIfNew {
|
|
|
|
hasRoom := true
|
|
|
|
|
2018-05-19 19:05:30 +02:00
|
|
|
if len(b.peers) >= bucketSize {
|
2018-05-13 22:02:46 +02:00
|
|
|
hasRoom = false
|
2018-05-19 19:05:30 +02:00
|
|
|
for i := range b.peers {
|
|
|
|
if b.peers[i].IsBad(maxPeerFails) {
|
2018-05-13 22:02:46 +02:00
|
|
|
// TODO: Ping contact first. Only remove if it does not respond
|
2018-05-19 19:05:30 +02:00
|
|
|
b.peers = append(b.peers[:i], b.peers[i+1:]...)
|
2018-05-13 22:02:46 +02:00
|
|
|
hasRoom = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if hasRoom {
|
|
|
|
b.lastUpdate = time.Now()
|
2018-07-12 20:34:24 +02:00
|
|
|
p.Touch()
|
|
|
|
b.peers = append(b.peers, p)
|
2018-04-05 22:05:28 +02:00
|
|
|
}
|
|
|
|
}
|
2018-07-12 20:34:24 +02:00
|
|
|
|
|
|
|
return nil
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
2018-04-05 22:05:28 +02:00
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// FailContact marks a contact as having failed, and removes it if it failed too many times
|
2018-06-14 17:48:02 +02:00
|
|
|
func (b *bucket) FailContact(id bits.Bitmap) {
|
2018-05-13 22:02:46 +02:00
|
|
|
b.lock.Lock()
|
|
|
|
defer b.lock.Unlock()
|
2018-05-19 19:05:30 +02:00
|
|
|
i := find(id, b.peers)
|
|
|
|
if i >= 0 {
|
2018-05-13 22:02:46 +02:00
|
|
|
// BEP5 says not to remove the contact until the bucket is full and you try to insert
|
2018-05-19 19:05:30 +02:00
|
|
|
b.peers[i].Fail()
|
2018-04-05 22:05:28 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// find returns the contact in the bucket, or nil if the bucket does not contain the contact
|
2018-06-14 17:48:02 +02:00
|
|
|
func find(id bits.Bitmap, peers []peer) int {
|
2018-05-19 19:05:30 +02:00
|
|
|
for i := range peers {
|
|
|
|
if peers[i].Contact.ID.Equals(id) {
|
|
|
|
return i
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
2018-04-25 00:12:17 +02:00
|
|
|
}
|
2018-05-19 19:05:30 +02:00
|
|
|
return -1
|
2018-04-25 00:12:17 +02:00
|
|
|
}
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// NeedsRefresh returns true if bucket has not been updated in the last `refreshInterval`, false otherwise
|
|
|
|
func (b *bucket) NeedsRefresh(refreshInterval time.Duration) bool {
|
|
|
|
b.lock.RLock()
|
|
|
|
defer b.lock.RUnlock()
|
2018-05-30 03:38:55 +02:00
|
|
|
return time.Since(b.lastUpdate) > refreshInterval
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
2018-07-10 23:30:47 +02:00
|
|
|
func (b *bucket) Split() (*bucket, *bucket) {
|
|
|
|
b.lock.Lock()
|
|
|
|
defer b.lock.Unlock()
|
|
|
|
|
|
|
|
left := newBucket(b.Range.IntervalP(1, 2))
|
|
|
|
right := newBucket(b.Range.IntervalP(2, 2))
|
|
|
|
left.lastUpdate = b.lastUpdate
|
|
|
|
right.lastUpdate = b.lastUpdate
|
|
|
|
|
|
|
|
for _, p := range b.peers {
|
2018-07-12 20:34:24 +02:00
|
|
|
if left.Range.Contains(p.Distance) {
|
2018-07-10 23:30:47 +02:00
|
|
|
left.peers = append(left.peers, p)
|
|
|
|
} else {
|
|
|
|
right.peers = append(right.peers, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-12 20:34:24 +02:00
|
|
|
if len(b.peers) > 1 {
|
|
|
|
if len(left.peers) == 0 {
|
|
|
|
left, right = right.Split()
|
|
|
|
left.Range.Start = b.Range.Start
|
|
|
|
} else if len(right.peers) == 0 {
|
|
|
|
left, right = left.Split()
|
|
|
|
right.Range.End = b.Range.End
|
|
|
|
}
|
2018-07-10 23:30:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return left, right
|
|
|
|
}
|
|
|
|
|
2018-05-19 19:05:30 +02:00
|
|
|
type routingTable struct {
|
2018-06-14 17:48:02 +02:00
|
|
|
id bits.Bitmap
|
2018-07-10 23:30:47 +02:00
|
|
|
buckets []*bucket
|
|
|
|
mu *sync.RWMutex // this mutex is write-locked only when CHANGING THE NUMBER OF BUCKETS in the table
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
2018-06-14 17:48:02 +02:00
|
|
|
func newRoutingTable(id bits.Bitmap) *routingTable {
|
2018-07-10 23:30:47 +02:00
|
|
|
rt := routingTable{
|
|
|
|
id: id,
|
|
|
|
mu: &sync.RWMutex{},
|
|
|
|
}
|
2018-06-19 20:06:35 +02:00
|
|
|
rt.reset()
|
|
|
|
return &rt
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rt *routingTable) reset() {
|
2018-07-10 23:30:47 +02:00
|
|
|
rt.mu.Lock()
|
|
|
|
defer rt.mu.Unlock()
|
|
|
|
rt.buckets = []*bucket{newBucket(bits.MaxRange())}
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
|
|
|
|
2018-05-19 19:05:30 +02:00
|
|
|
func (rt *routingTable) BucketInfo() string {
|
2018-07-10 23:30:47 +02:00
|
|
|
rt.mu.RLock()
|
|
|
|
defer rt.mu.RUnlock()
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
var bucketInfo []string
|
|
|
|
for i, b := range rt.buckets {
|
|
|
|
if b.Len() > 0 {
|
|
|
|
contacts := b.Contacts()
|
|
|
|
s := make([]string, len(contacts))
|
|
|
|
for j, c := range contacts {
|
2018-05-19 19:05:30 +02:00
|
|
|
s[j] = c.ID.HexShort()
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
2018-06-28 00:09:10 +02:00
|
|
|
bucketInfo = append(bucketInfo, fmt.Sprintf("bucket %d: (%d) %s", i, len(contacts), strings.Join(s, ", ")))
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
2018-03-24 00:18:00 +01:00
|
|
|
}
|
2018-05-13 22:02:46 +02:00
|
|
|
if len(bucketInfo) == 0 {
|
|
|
|
return "buckets are empty"
|
|
|
|
}
|
|
|
|
return strings.Join(bucketInfo, "\n")
|
2018-03-24 00:18:00 +01:00
|
|
|
}
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// Update inserts or refreshes a contact
|
2018-05-19 19:05:30 +02:00
|
|
|
func (rt *routingTable) Update(c Contact) {
|
2018-07-10 23:30:47 +02:00
|
|
|
rt.mu.Lock() // write lock, because updates may cause bucket splits
|
|
|
|
defer rt.mu.Unlock()
|
|
|
|
|
2018-07-12 20:34:24 +02:00
|
|
|
b := rt.bucketFor(c.ID)
|
|
|
|
|
|
|
|
if rt.shouldSplit(b, c) {
|
|
|
|
left, right := b.Split()
|
|
|
|
|
|
|
|
for i := range rt.buckets {
|
|
|
|
if rt.buckets[i].Range.Start.Equals(left.Range.Start) {
|
|
|
|
rt.buckets = append(rt.buckets[:i], append([]*bucket{left, right}, rt.buckets[i+1:]...)...)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if left.Range.Contains(c.ID) {
|
|
|
|
b = left
|
|
|
|
} else {
|
|
|
|
b = right
|
|
|
|
}
|
2018-07-10 23:30:47 +02:00
|
|
|
}
|
2018-07-12 20:34:24 +02:00
|
|
|
|
2018-08-07 17:38:55 +02:00
|
|
|
err := b.UpdatePeer(peer{Contact: c, Distance: rt.id.Xor(c.ID)}, true)
|
|
|
|
if err != nil {
|
|
|
|
log.Error(err)
|
|
|
|
}
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fresh refreshes a contact if its already in the routing table
|
2018-05-19 19:05:30 +02:00
|
|
|
func (rt *routingTable) Fresh(c Contact) {
|
2018-07-10 23:30:47 +02:00
|
|
|
rt.mu.RLock()
|
|
|
|
defer rt.mu.RUnlock()
|
2018-08-07 17:38:55 +02:00
|
|
|
err := rt.bucketFor(c.ID).UpdatePeer(peer{Contact: c, Distance: rt.id.Xor(c.ID)}, false)
|
|
|
|
if err != nil {
|
|
|
|
log.Error(err)
|
|
|
|
}
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// FailContact marks a contact as having failed, and removes it if it failed too many times
|
2018-05-19 19:05:30 +02:00
|
|
|
func (rt *routingTable) Fail(c Contact) {
|
2018-07-10 23:30:47 +02:00
|
|
|
rt.mu.RLock()
|
|
|
|
defer rt.mu.RUnlock()
|
2018-05-19 19:05:30 +02:00
|
|
|
rt.bucketFor(c.ID).FailContact(c.ID)
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
2018-03-24 00:18:00 +01:00
|
|
|
|
2018-07-10 23:30:47 +02:00
|
|
|
// GetClosest returns the closest `limit` contacts from the routing table.
|
|
|
|
// This is a locking wrapper around getClosest()
|
|
|
|
func (rt *routingTable) GetClosest(target bits.Bitmap, limit int) []Contact {
|
|
|
|
rt.mu.RLock()
|
|
|
|
defer rt.mu.RUnlock()
|
|
|
|
return rt.getClosest(target, limit)
|
2018-06-29 21:47:00 +02:00
|
|
|
}
|
|
|
|
|
2018-07-10 23:30:47 +02:00
|
|
|
// getClosest returns the closest `limit` contacts from the routing table
|
|
|
|
func (rt *routingTable) getClosest(target bits.Bitmap, limit int) []Contact {
|
2018-07-25 17:44:11 +02:00
|
|
|
var contacts []Contact
|
2018-06-26 19:42:36 +02:00
|
|
|
for _, b := range rt.buckets {
|
2018-07-25 17:44:11 +02:00
|
|
|
contacts = append(contacts, b.Contacts()...)
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
2018-07-10 23:30:47 +02:00
|
|
|
|
2018-07-25 17:44:11 +02:00
|
|
|
sortByDistance(contacts, target)
|
|
|
|
if len(contacts) > limit {
|
|
|
|
contacts = contacts[:limit]
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
2018-07-25 17:44:11 +02:00
|
|
|
|
2018-04-28 02:16:12 +02:00
|
|
|
return contacts
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
|
|
|
|
2018-04-28 02:16:12 +02:00
|
|
|
// Count returns the number of contacts in the routing table
|
2018-05-19 19:05:30 +02:00
|
|
|
func (rt *routingTable) Count() int {
|
2018-07-10 23:30:47 +02:00
|
|
|
rt.mu.RLock()
|
|
|
|
defer rt.mu.RUnlock()
|
2018-04-25 03:12:32 +02:00
|
|
|
count := 0
|
|
|
|
for _, bucket := range rt.buckets {
|
2018-06-21 21:05:45 +02:00
|
|
|
count += bucket.Len()
|
2018-04-25 03:12:32 +02:00
|
|
|
}
|
|
|
|
return count
|
|
|
|
}
|
|
|
|
|
2018-06-29 21:47:00 +02:00
|
|
|
// Len returns the number of buckets in the routing table
|
|
|
|
func (rt *routingTable) Len() int {
|
2018-07-10 23:30:47 +02:00
|
|
|
rt.mu.RLock()
|
|
|
|
defer rt.mu.RUnlock()
|
2018-06-29 21:47:00 +02:00
|
|
|
return len(rt.buckets)
|
|
|
|
}
|
|
|
|
|
2018-07-12 20:34:24 +02:00
|
|
|
func (rt *routingTable) bucketFor(target bits.Bitmap) *bucket {
|
2018-05-01 22:18:38 +02:00
|
|
|
if rt.id.Equals(target) {
|
|
|
|
panic("routing table does not have a bucket for its own id")
|
|
|
|
}
|
2018-06-26 19:42:36 +02:00
|
|
|
distance := target.Xor(rt.id)
|
2018-07-12 20:34:24 +02:00
|
|
|
for _, b := range rt.buckets {
|
2018-07-10 23:30:47 +02:00
|
|
|
if b.Range.Contains(distance) {
|
2018-07-12 20:34:24 +02:00
|
|
|
return b
|
2018-06-26 19:42:36 +02:00
|
|
|
}
|
|
|
|
}
|
2018-07-12 20:34:24 +02:00
|
|
|
panic("target is not contained in any buckets")
|
2018-05-13 22:02:46 +02:00
|
|
|
}
|
|
|
|
|
2018-07-12 20:34:24 +02:00
|
|
|
func (rt *routingTable) shouldSplit(b *bucket, c Contact) bool {
|
|
|
|
if b.Has(c) {
|
2018-07-10 23:30:47 +02:00
|
|
|
return false
|
|
|
|
}
|
2018-06-29 21:47:00 +02:00
|
|
|
if b.Len() >= bucketSize {
|
2018-07-10 23:30:47 +02:00
|
|
|
if b.Range.Start.Equals(bits.Bitmap{}) { // this is the bucket covering our node id
|
2018-06-26 19:42:36 +02:00
|
|
|
return true
|
|
|
|
}
|
2018-07-10 23:30:47 +02:00
|
|
|
kClosest := rt.getClosest(rt.id, bucketSize)
|
|
|
|
kthClosest := kClosest[len(kClosest)-1]
|
|
|
|
if rt.id.Closer(c.ID, kthClosest.ID) {
|
|
|
|
return true
|
2018-06-26 19:42:36 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-08-07 17:53:29 +02:00
|
|
|
//func (rt *routingTable) printBucketInfo() {
|
|
|
|
// fmt.Printf("there are %d contacts in %d buckets\n", rt.Count(), rt.Len())
|
|
|
|
// for i, b := range rt.buckets {
|
|
|
|
// fmt.Printf("bucket %d, %d contacts\n", i+1, len(b.peers))
|
|
|
|
// fmt.Printf(" start : %s\n", b.Range.Start.String())
|
|
|
|
// fmt.Printf(" stop : %s\n", b.Range.End.String())
|
|
|
|
// fmt.Println("")
|
|
|
|
// }
|
|
|
|
//}
|
2018-06-28 00:09:10 +02:00
|
|
|
|
2018-06-14 17:48:02 +02:00
|
|
|
func (rt *routingTable) GetIDsForRefresh(refreshInterval time.Duration) []bits.Bitmap {
|
|
|
|
var bitmaps []bits.Bitmap
|
2018-05-13 22:02:46 +02:00
|
|
|
for i, bucket := range rt.buckets {
|
|
|
|
if bucket.NeedsRefresh(refreshInterval) {
|
2018-06-14 17:48:02 +02:00
|
|
|
bitmaps = append(bitmaps, bits.Rand().Prefix(i, false))
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
|
|
|
}
|
2018-05-13 22:02:46 +02:00
|
|
|
return bitmaps
|
2018-03-07 02:15:44 +01:00
|
|
|
}
|
|
|
|
|
2018-05-19 19:05:30 +02:00
|
|
|
const rtContactSep = "-"
|
|
|
|
|
|
|
|
type rtSave struct {
|
|
|
|
ID string `json:"id"`
|
|
|
|
Contacts []string `json:"contacts"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rt *routingTable) MarshalJSON() ([]byte, error) {
|
|
|
|
var data rtSave
|
|
|
|
data.ID = rt.id.Hex()
|
|
|
|
for _, b := range rt.buckets {
|
|
|
|
for _, c := range b.Contacts() {
|
|
|
|
data.Contacts = append(data.Contacts, strings.Join([]string{c.ID.Hex(), c.IP.String(), strconv.Itoa(c.Port)}, rtContactSep))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return json.Marshal(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rt *routingTable) UnmarshalJSON(b []byte) error {
|
|
|
|
var data rtSave
|
|
|
|
err := json.Unmarshal(b, &data)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-06-14 17:48:02 +02:00
|
|
|
rt.id, err = bits.FromHex(data.ID)
|
2018-05-19 19:05:30 +02:00
|
|
|
if err != nil {
|
2021-10-06 20:00:59 +02:00
|
|
|
return errors.WithMessage(err, "decoding ID")
|
2018-05-19 19:05:30 +02:00
|
|
|
}
|
2018-06-19 20:06:35 +02:00
|
|
|
rt.reset()
|
2018-05-19 19:05:30 +02:00
|
|
|
|
|
|
|
for _, s := range data.Contacts {
|
|
|
|
parts := strings.Split(s, rtContactSep)
|
|
|
|
if len(parts) != 3 {
|
2021-10-06 20:00:59 +02:00
|
|
|
return errors.WithStack(errors.Newf("decoding contact %s: wrong number of parts", s))
|
2018-05-19 19:05:30 +02:00
|
|
|
}
|
|
|
|
var c Contact
|
2018-06-14 17:48:02 +02:00
|
|
|
c.ID, err = bits.FromHex(parts[0])
|
2018-05-19 19:05:30 +02:00
|
|
|
if err != nil {
|
2021-10-06 20:00:59 +02:00
|
|
|
return errors.WithStack(errors.Newf("decoding contact %s: invalid ID: %s", s, err))
|
2018-05-19 19:05:30 +02:00
|
|
|
}
|
|
|
|
c.IP = net.ParseIP(parts[1])
|
|
|
|
if c.IP == nil {
|
2021-10-06 20:00:59 +02:00
|
|
|
return errors.WithStack(errors.Newf("decoding contact %s: invalid IP", s))
|
2018-05-19 19:05:30 +02:00
|
|
|
}
|
|
|
|
c.Port, err = strconv.Atoi(parts[2])
|
|
|
|
if err != nil {
|
2021-10-06 20:00:59 +02:00
|
|
|
return errors.WithStack(errors.Newf("decoding contact %s: invalid port: %s", s, err))
|
2018-05-19 19:05:30 +02:00
|
|
|
}
|
|
|
|
rt.Update(c)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-05-13 22:02:46 +02:00
|
|
|
// RoutingTableRefresh refreshes any buckets that need to be refreshed
|
2018-06-25 22:49:40 +02:00
|
|
|
func RoutingTableRefresh(n *Node, refreshInterval time.Duration, parentGrp *stop.Group) {
|
|
|
|
done := stop.New()
|
2018-05-13 22:02:46 +02:00
|
|
|
|
|
|
|
for _, id := range n.rt.GetIDsForRefresh(refreshInterval) {
|
2018-06-13 18:45:47 +02:00
|
|
|
done.Add(1)
|
2018-06-14 17:48:02 +02:00
|
|
|
go func(id bits.Bitmap) {
|
2018-06-13 18:45:47 +02:00
|
|
|
defer done.Done()
|
2018-06-25 22:49:40 +02:00
|
|
|
_, _, err := FindContacts(n, id, false, parentGrp)
|
2018-06-13 18:45:47 +02:00
|
|
|
if err != nil {
|
2018-05-30 03:38:55 +02:00
|
|
|
log.Error("error finding contact during routing table refresh - ", err)
|
|
|
|
}
|
2018-05-13 22:02:46 +02:00
|
|
|
}(id)
|
|
|
|
}
|
|
|
|
|
2018-06-13 18:45:47 +02:00
|
|
|
done.Wait()
|
|
|
|
done.Stop()
|
2018-03-24 00:18:00 +01:00
|
|
|
}
|
2018-05-19 19:05:30 +02:00
|
|
|
|
|
|
|
func moveToBack(peers []peer, index int) {
|
|
|
|
if index < 0 || len(peers) <= index+1 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
p := peers[index]
|
|
|
|
for i := index; i < len(peers)-1; i++ {
|
|
|
|
peers[i] = peers[i+1]
|
|
|
|
}
|
|
|
|
peers[len(peers)-1] = p
|
|
|
|
}
|