move most dht code into Node
This commit is contained in:
parent
34ab2cd1ae
commit
079a6bf610
13 changed files with 758 additions and 727 deletions
7
dht/bootstrap.go
Normal file
7
dht/bootstrap.go
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
package dht
|
||||||
|
|
||||||
|
// DHT represents a DHT node.
|
||||||
|
type BootstrapNode struct {
|
||||||
|
// node
|
||||||
|
node *Node
|
||||||
|
}
|
219
dht/dht.go
219
dht/dht.go
|
@ -11,9 +11,9 @@ import (
|
||||||
|
|
||||||
"github.com/lbryio/errors.go"
|
"github.com/lbryio/errors.go"
|
||||||
"github.com/lbryio/lbry.go/stopOnce"
|
"github.com/lbryio/lbry.go/stopOnce"
|
||||||
|
"github.com/spf13/cast"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cast"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -42,12 +42,6 @@ const compactNodeInfoLength = nodeIDLength + 6
|
||||||
|
|
||||||
const tokenSecretRotationInterval = 5 * time.Minute // how often the token-generating secret is rotated
|
const tokenSecretRotationInterval = 5 * time.Minute // how often the token-generating secret is rotated
|
||||||
|
|
||||||
// packet represents the information receive from udp.
|
|
||||||
type packet struct {
|
|
||||||
data []byte
|
|
||||||
raddr *net.UDPAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config represents the configure of dht.
|
// Config represents the configure of dht.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// this node's address. format is `ip:port`
|
// this node's address. format is `ip:port`
|
||||||
|
@ -72,33 +66,14 @@ func NewStandardConfig() *Config {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// UDPConn allows using a mocked connection to test sending/receiving data
|
|
||||||
type UDPConn interface {
|
|
||||||
ReadFromUDP([]byte) (int, *net.UDPAddr, error)
|
|
||||||
WriteToUDP([]byte, *net.UDPAddr) (int, error)
|
|
||||||
SetReadDeadline(time.Time) error
|
|
||||||
SetWriteDeadline(time.Time) error
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// DHT represents a DHT node.
|
// DHT represents a DHT node.
|
||||||
type DHT struct {
|
type DHT struct {
|
||||||
// config
|
// config
|
||||||
conf *Config
|
conf *Config
|
||||||
// UDP connection for sending and receiving data
|
// local contact
|
||||||
conn UDPConn
|
contact Contact
|
||||||
// the local dht node
|
// node
|
||||||
node *Node
|
node *Node
|
||||||
// routing table
|
|
||||||
rt *routingTable
|
|
||||||
// channel of incoming packets
|
|
||||||
packets chan packet
|
|
||||||
// data store
|
|
||||||
store *peerStore
|
|
||||||
// transaction manager
|
|
||||||
tm *transactionManager
|
|
||||||
// token manager
|
|
||||||
tokens *tokenManager
|
|
||||||
// stopper to shut down DHT
|
// stopper to shut down DHT
|
||||||
stop *stopOnce.Stopper
|
stop *stopOnce.Stopper
|
||||||
// wait group for all the things that need to be stopped when DHT shuts down
|
// wait group for all the things that need to be stopped when DHT shuts down
|
||||||
|
@ -113,107 +88,27 @@ func New(config *Config) (*DHT, error) {
|
||||||
config = NewStandardConfig()
|
config = NewStandardConfig()
|
||||||
}
|
}
|
||||||
|
|
||||||
var id Bitmap
|
contact, err := getContact(config.NodeID, config.Address)
|
||||||
if config.NodeID == "" {
|
|
||||||
id = RandomBitmapP()
|
|
||||||
} else {
|
|
||||||
id = BitmapFromHexP(config.NodeID)
|
|
||||||
}
|
|
||||||
|
|
||||||
ip, port, err := net.SplitHostPort(config.Address)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Err(err)
|
return nil, err
|
||||||
} else if ip == "" {
|
|
||||||
return nil, errors.Err("address does not contain an IP")
|
|
||||||
} else if port == "" {
|
|
||||||
return nil, errors.Err("address does not contain a port")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
portInt, err := cast.ToIntE(port)
|
node, err := NewNode(contact.id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Err(err)
|
return nil, err
|
||||||
}
|
|
||||||
|
|
||||||
node := &Node{id: id, ip: net.ParseIP(ip), port: portInt}
|
|
||||||
if node.ip == nil {
|
|
||||||
return nil, errors.Err("invalid ip")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &DHT{
|
d := &DHT{
|
||||||
conf: config,
|
conf: config,
|
||||||
|
contact: contact,
|
||||||
node: node,
|
node: node,
|
||||||
rt: newRoutingTable(node),
|
|
||||||
packets: make(chan packet),
|
|
||||||
store: newPeerStore(),
|
|
||||||
stop: stopOnce.New(),
|
stop: stopOnce.New(),
|
||||||
stopWG: &sync.WaitGroup{},
|
stopWG: &sync.WaitGroup{},
|
||||||
joined: make(chan struct{}),
|
joined: make(chan struct{}),
|
||||||
tokens: &tokenManager{},
|
|
||||||
}
|
}
|
||||||
d.tm = newTransactionManager(d)
|
|
||||||
d.tokens.Start(tokenSecretRotationInterval)
|
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// init initializes global variables.
|
|
||||||
func (dht *DHT) init() error {
|
|
||||||
listener, err := net.ListenPacket(network, dht.conf.Address)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Err(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dht.conn = listener.(*net.UDPConn)
|
|
||||||
|
|
||||||
if dht.conf.PrintState > 0 {
|
|
||||||
go func() {
|
|
||||||
t := time.NewTicker(dht.conf.PrintState)
|
|
||||||
for {
|
|
||||||
dht.PrintState()
|
|
||||||
select {
|
|
||||||
case <-t.C:
|
|
||||||
case <-dht.stop.Chan():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// listen receives message from udp.
|
|
||||||
func (dht *DHT) listen() {
|
|
||||||
dht.stopWG.Add(1)
|
|
||||||
defer dht.stopWG.Done()
|
|
||||||
|
|
||||||
buf := make([]byte, udpMaxMessageLength)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-dht.stop.Chan():
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
dht.conn.SetReadDeadline(time.Now().Add(200 * time.Millisecond)) // need this to periodically check shutdown chan
|
|
||||||
n, raddr, err := dht.conn.ReadFromUDP(buf)
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(net.Error); !ok || !e.Timeout() {
|
|
||||||
log.Errorf("udp read error: %v", err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
} else if raddr == nil {
|
|
||||||
log.Errorf("udp read with no raddr")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
data := make([]byte, n)
|
|
||||||
copy(data, buf[:n]) // slices use the same underlying array, so we need a new one for each packet
|
|
||||||
|
|
||||||
dht.packets <- packet{data: data, raddr: raddr}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// join makes current node join the dht network.
|
// join makes current node join the dht network.
|
||||||
func (dht *DHT) join() {
|
func (dht *DHT) join() {
|
||||||
defer close(dht.joined) // if anyone's waiting for join to finish, they'll know its done
|
defer close(dht.joined) // if anyone's waiting for join to finish, they'll know its done
|
||||||
|
@ -243,34 +138,21 @@ func (dht *DHT) join() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *DHT) runHandler() {
|
|
||||||
dht.stopWG.Add(1)
|
|
||||||
defer dht.stopWG.Done()
|
|
||||||
|
|
||||||
var pkt packet
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case pkt = <-dht.packets:
|
|
||||||
handlePacket(dht, pkt)
|
|
||||||
case <-dht.stop.Chan():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start starts the dht
|
// Start starts the dht
|
||||||
func (dht *DHT) Start() error {
|
func (dht *DHT) Start() error {
|
||||||
err := dht.init()
|
listener, err := net.ListenPacket(network, dht.conf.Address)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
conn := listener.(*net.UDPConn)
|
||||||
|
|
||||||
|
err = dht.node.Connect(conn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
go dht.listen()
|
|
||||||
go dht.runHandler()
|
|
||||||
|
|
||||||
dht.join()
|
dht.join()
|
||||||
log.Debugf("[%s] DHT ready on %s (%d nodes found during join)", dht.node.id.HexShort(), dht.node.Addr().String(), dht.rt.Count())
|
log.Debugf("[%s] DHT ready on %s (%d nodes found during join)", dht.node.id.HexShort(), dht.contact.Addr().String(), dht.node.rt.Count())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -286,8 +168,7 @@ func (dht *DHT) Shutdown() {
|
||||||
log.Debugf("[%s] DHT shutting down", dht.node.id.HexShort())
|
log.Debugf("[%s] DHT shutting down", dht.node.id.HexShort())
|
||||||
dht.stop.Stop()
|
dht.stop.Stop()
|
||||||
dht.stopWG.Wait()
|
dht.stopWG.Wait()
|
||||||
dht.tokens.Stop()
|
dht.node.Shutdown()
|
||||||
dht.conn.Close()
|
|
||||||
log.Debugf("[%s] DHT stopped", dht.node.id.HexShort())
|
log.Debugf("[%s] DHT stopped", dht.node.id.HexShort())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -298,8 +179,8 @@ func (dht *DHT) Ping(addr string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpNode := Node{id: RandomBitmapP(), ip: raddr.IP, port: raddr.Port}
|
tmpNode := Contact{id: RandomBitmapP(), ip: raddr.IP, port: raddr.Port}
|
||||||
res := dht.tm.Send(tmpNode, Request{Method: pingMethod})
|
res := dht.node.Send(tmpNode, Request{Method: pingMethod})
|
||||||
if res == nil {
|
if res == nil {
|
||||||
return errors.Err("no response from node %s", addr)
|
return errors.Err("no response from node %s", addr)
|
||||||
}
|
}
|
||||||
|
@ -308,22 +189,22 @@ func (dht *DHT) Ping(addr string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the list of nodes that have the blob for the given hash
|
// Get returns the list of nodes that have the blob for the given hash
|
||||||
func (dht *DHT) Get(hash Bitmap) ([]Node, error) {
|
func (dht *DHT) Get(hash Bitmap) ([]Contact, error) {
|
||||||
nf := newNodeFinder(dht, hash, true)
|
nf := newContactFinder(dht.node, hash, true)
|
||||||
res, err := nf.Find()
|
res, err := nf.Find()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if res.Found {
|
if res.Found {
|
||||||
return res.Nodes, nil
|
return res.Contacts, nil
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Announce announces to the DHT that this node has the blob for the given hash
|
// Announce announces to the DHT that this node has the blob for the given hash
|
||||||
func (dht *DHT) Announce(hash Bitmap) error {
|
func (dht *DHT) Announce(hash Bitmap) error {
|
||||||
nf := newNodeFinder(dht, hash, false)
|
nf := newContactFinder(dht.node, hash, false)
|
||||||
res, err := nf.Find()
|
res, err := nf.Find()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -331,18 +212,18 @@ func (dht *DHT) Announce(hash Bitmap) error {
|
||||||
|
|
||||||
// TODO: if this node is closer than farthest peer, store locally and pop farthest peer
|
// TODO: if this node is closer than farthest peer, store locally and pop farthest peer
|
||||||
|
|
||||||
for _, node := range res.Nodes {
|
for _, node := range res.Contacts {
|
||||||
go dht.storeOnNode(hash, node)
|
go dht.storeOnNode(hash, node)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *DHT) storeOnNode(hash Bitmap, node Node) {
|
func (dht *DHT) storeOnNode(hash Bitmap, node Contact) {
|
||||||
dht.stopWG.Add(1)
|
dht.stopWG.Add(1)
|
||||||
defer dht.stopWG.Done()
|
defer dht.stopWG.Done()
|
||||||
|
|
||||||
resCh := dht.tm.SendAsync(context.Background(), node, Request{
|
resCh := dht.node.SendAsync(context.Background(), node, Request{
|
||||||
Method: findValueMethod,
|
Method: findValueMethod,
|
||||||
Arg: &hash,
|
Arg: &hash,
|
||||||
})
|
})
|
||||||
|
@ -358,30 +239,30 @@ func (dht *DHT) storeOnNode(hash Bitmap, node Node) {
|
||||||
return // request timed out
|
return // request timed out
|
||||||
}
|
}
|
||||||
|
|
||||||
dht.tm.SendAsync(context.Background(), node, Request{
|
dht.node.SendAsync(context.Background(), node, Request{
|
||||||
Method: storeMethod,
|
Method: storeMethod,
|
||||||
StoreArgs: &storeArgs{
|
StoreArgs: &storeArgs{
|
||||||
BlobHash: hash,
|
BlobHash: hash,
|
||||||
Value: storeArgsValue{
|
Value: storeArgsValue{
|
||||||
Token: res.Token,
|
Token: res.Token,
|
||||||
LbryID: dht.node.id,
|
LbryID: dht.contact.id,
|
||||||
Port: dht.node.port,
|
Port: dht.contact.port,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *DHT) PrintState() {
|
func (dht *DHT) PrintState() {
|
||||||
log.Printf("DHT node %s at %s", dht.node.String(), time.Now().Format(time.RFC822Z))
|
log.Printf("DHT node %s at %s", dht.contact.String(), time.Now().Format(time.RFC822Z))
|
||||||
log.Printf("Outstanding transactions: %d", dht.tm.Count())
|
log.Printf("Outstanding transactions: %d", dht.node.CountActiveTransactions())
|
||||||
log.Printf("Stored hashes: %d", dht.store.CountStoredHashes())
|
log.Printf("Stored hashes: %d", dht.node.store.CountStoredHashes())
|
||||||
log.Printf("Buckets:")
|
log.Printf("Buckets:")
|
||||||
for _, line := range strings.Split(dht.rt.BucketInfo(), "\n") {
|
for _, line := range strings.Split(dht.node.rt.BucketInfo(), "\n") {
|
||||||
log.Println(line)
|
log.Println(line)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func printNodeList(list []Node) {
|
func printNodeList(list []Contact) {
|
||||||
for i, n := range list {
|
for i, n := range list {
|
||||||
log.Printf("%d) %s", i, n.String())
|
log.Printf("%d) %s", i, n.String())
|
||||||
}
|
}
|
||||||
|
@ -414,3 +295,33 @@ func MakeTestDHT(numNodes int) []*DHT {
|
||||||
|
|
||||||
return dhts
|
return dhts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getContact(nodeID, addr string) (Contact, error) {
|
||||||
|
var c Contact
|
||||||
|
if nodeID == "" {
|
||||||
|
c.id = RandomBitmapP()
|
||||||
|
} else {
|
||||||
|
c.id = BitmapFromHexP(nodeID)
|
||||||
|
}
|
||||||
|
|
||||||
|
ip, port, err := net.SplitHostPort(addr)
|
||||||
|
if err != nil {
|
||||||
|
return c, errors.Err(err)
|
||||||
|
} else if ip == "" {
|
||||||
|
return c, errors.Err("address does not contain an IP")
|
||||||
|
} else if port == "" {
|
||||||
|
return c, errors.Err("address does not contain a port")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.ip = net.ParseIP(ip)
|
||||||
|
if c.ip == nil {
|
||||||
|
return c, errors.Err("invalid ip")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.port, err = cast.ToIntE(port)
|
||||||
|
if err != nil {
|
||||||
|
return c, errors.Err(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
|
@ -20,12 +20,12 @@ func TestNodeFinder_FindNodes(t *testing.T) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
nf := newNodeFinder(dhts[2], RandomBitmapP(), false)
|
nf := newContactFinder(dhts[2].node, RandomBitmapP(), false)
|
||||||
res, err := nf.Find()
|
res, err := nf.Find()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
foundNodes, found := res.Nodes, res.Found
|
foundNodes, found := res.Contacts, res.Found
|
||||||
|
|
||||||
if found {
|
if found {
|
||||||
t.Fatal("something was found, but it should not have been")
|
t.Fatal("something was found, but it should not have been")
|
||||||
|
@ -42,7 +42,7 @@ func TestNodeFinder_FindNodes(t *testing.T) {
|
||||||
if n.id.Equals(dhts[0].node.id) {
|
if n.id.Equals(dhts[0].node.id) {
|
||||||
foundOne = true
|
foundOne = true
|
||||||
}
|
}
|
||||||
//if n.id.Equals(dhts[1].node.id) {
|
//if n.id.Equals(dhts[1].node.c.id) {
|
||||||
// foundTwo = true
|
// foundTwo = true
|
||||||
//}
|
//}
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,7 @@ func TestNodeFinder_FindNodes(t *testing.T) {
|
||||||
t.Errorf("did not find first node %s", dhts[0].node.id.Hex())
|
t.Errorf("did not find first node %s", dhts[0].node.id.Hex())
|
||||||
}
|
}
|
||||||
//if !foundTwo {
|
//if !foundTwo {
|
||||||
// t.Errorf("did not find second node %s", dhts[1].node.id.Hex())
|
// t.Errorf("did not find second node %s", dhts[1].node.c.id.Hex())
|
||||||
//}
|
//}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,15 +64,15 @@ func TestNodeFinder_FindValue(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
blobHashToFind := RandomBitmapP()
|
blobHashToFind := RandomBitmapP()
|
||||||
nodeToFind := Node{id: RandomBitmapP(), ip: net.IPv4(1, 2, 3, 4), port: 5678}
|
nodeToFind := Contact{id: RandomBitmapP(), ip: net.IPv4(1, 2, 3, 4), port: 5678}
|
||||||
dhts[0].store.Upsert(blobHashToFind, nodeToFind)
|
dhts[0].node.store.Upsert(blobHashToFind, nodeToFind)
|
||||||
|
|
||||||
nf := newNodeFinder(dhts[2], blobHashToFind, true)
|
nf := newContactFinder(dhts[2].node, blobHashToFind, true)
|
||||||
res, err := nf.Find()
|
res, err := nf.Find()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
foundNodes, found := res.Nodes, res.Found
|
foundNodes, found := res.Contacts, res.Found
|
||||||
|
|
||||||
if !found {
|
if !found {
|
||||||
t.Fatal("node was not found")
|
t.Fatal("node was not found")
|
||||||
|
|
|
@ -223,7 +223,7 @@ type Response struct {
|
||||||
ID messageID
|
ID messageID
|
||||||
NodeID Bitmap
|
NodeID Bitmap
|
||||||
Data string
|
Data string
|
||||||
FindNodeData []Node
|
Contacts []Contact
|
||||||
FindValueKey string
|
FindValueKey string
|
||||||
Token string
|
Token string
|
||||||
}
|
}
|
||||||
|
@ -239,7 +239,7 @@ func (r Response) ArgsDebug() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
str += "|"
|
str += "|"
|
||||||
for _, c := range r.FindNodeData {
|
for _, c := range r.Contacts {
|
||||||
str += c.Addr().String() + ":" + c.id.HexShort() + ","
|
str += c.Addr().String() + ":" + c.id.HexShort() + ","
|
||||||
}
|
}
|
||||||
str = strings.TrimRight(str, ",") + "|"
|
str = strings.TrimRight(str, ",") + "|"
|
||||||
|
@ -268,8 +268,8 @@ func (r Response) MarshalBencode() ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var contacts [][]byte
|
var contacts [][]byte
|
||||||
for _, n := range r.FindNodeData {
|
for _, c := range r.Contacts {
|
||||||
compact, err := n.MarshalCompact()
|
compact, err := c.MarshalCompact()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -282,12 +282,12 @@ func (r Response) MarshalBencode() ([]byte, error) {
|
||||||
} else if r.Token != "" {
|
} else if r.Token != "" {
|
||||||
// findValue failure falling back to findNode
|
// findValue failure falling back to findNode
|
||||||
data[headerPayloadField] = map[string]interface{}{
|
data[headerPayloadField] = map[string]interface{}{
|
||||||
contactsField: r.FindNodeData,
|
contactsField: r.Contacts,
|
||||||
tokenField: r.Token,
|
tokenField: r.Token,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// straight up findNode
|
// straight up findNode
|
||||||
data[headerPayloadField] = r.FindNodeData
|
data[headerPayloadField] = r.Contacts
|
||||||
}
|
}
|
||||||
|
|
||||||
return bencode.EncodeBytes(data)
|
return bencode.EncodeBytes(data)
|
||||||
|
@ -314,7 +314,7 @@ func (r *Response) UnmarshalBencode(b []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// maybe data is a list of nodes (response to findNode)?
|
// maybe data is a list of nodes (response to findNode)?
|
||||||
err = bencode.DecodeBytes(raw.Data, &r.FindNodeData)
|
err = bencode.DecodeBytes(raw.Data, &r.Contacts)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -335,25 +335,25 @@ func (r *Response) UnmarshalBencode(b []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if contacts, ok := rawData[contactsField]; ok {
|
if contacts, ok := rawData[contactsField]; ok {
|
||||||
err = bencode.DecodeBytes(contacts, &r.FindNodeData)
|
err = bencode.DecodeBytes(contacts, &r.Contacts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for k, v := range rawData {
|
for k, v := range rawData {
|
||||||
r.FindValueKey = k
|
r.FindValueKey = k
|
||||||
var compactNodes [][]byte
|
var compactContacts [][]byte
|
||||||
err = bencode.DecodeBytes(v, &compactNodes)
|
err = bencode.DecodeBytes(v, &compactContacts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, compact := range compactNodes {
|
for _, compact := range compactContacts {
|
||||||
var uncompactedNode Node
|
var c Contact
|
||||||
err = uncompactedNode.UnmarshalCompact(compact)
|
err = c.UnmarshalCompact(compact)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
r.FindNodeData = append(r.FindNodeData, uncompactedNode)
|
r.Contacts = append(r.Contacts, c)
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,7 +77,7 @@ func TestBencodeFindNodesResponse(t *testing.T) {
|
||||||
res := Response{
|
res := Response{
|
||||||
ID: newMessageID(),
|
ID: newMessageID(),
|
||||||
NodeID: RandomBitmapP(),
|
NodeID: RandomBitmapP(),
|
||||||
FindNodeData: []Node{
|
Contacts: []Contact{
|
||||||
{id: RandomBitmapP(), ip: net.IPv4(1, 2, 3, 4).To4(), port: 5678},
|
{id: RandomBitmapP(), ip: net.IPv4(1, 2, 3, 4).To4(), port: 5678},
|
||||||
{id: RandomBitmapP(), ip: net.IPv4(4, 3, 2, 1).To4(), port: 8765},
|
{id: RandomBitmapP(), ip: net.IPv4(4, 3, 2, 1).To4(), port: 8765},
|
||||||
},
|
},
|
||||||
|
@ -103,7 +103,7 @@ func TestBencodeFindValueResponse(t *testing.T) {
|
||||||
NodeID: RandomBitmapP(),
|
NodeID: RandomBitmapP(),
|
||||||
FindValueKey: RandomBitmapP().RawString(),
|
FindValueKey: RandomBitmapP().RawString(),
|
||||||
Token: "arst",
|
Token: "arst",
|
||||||
FindNodeData: []Node{
|
Contacts: []Contact{
|
||||||
{id: RandomBitmapP(), ip: net.IPv4(1, 2, 3, 4).To4(), port: 5678},
|
{id: RandomBitmapP(), ip: net.IPv4(1, 2, 3, 4).To4(), port: 5678},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -182,7 +182,7 @@ func compareResponses(t *testing.T, res, res2 Response) {
|
||||||
if res.Token != res2.Token {
|
if res.Token != res2.Token {
|
||||||
t.Errorf("expected Token %s, got %s", res.Token, res2.Token)
|
t.Errorf("expected Token %s, got %s", res.Token, res2.Token)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(res.FindNodeData, res2.FindNodeData) {
|
if !reflect.DeepEqual(res.Contacts, res2.Contacts) {
|
||||||
t.Errorf("expected FindNodeData %s, got %s", spew.Sdump(res.FindNodeData), spew.Sdump(res2.FindNodeData))
|
t.Errorf("expected FindNodeData %s, got %s", spew.Sdump(res.Contacts), spew.Sdump(res2.Contacts))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
403
dht/node.go
Normal file
403
dht/node.go
Normal file
|
@ -0,0 +1,403 @@
|
||||||
|
package dht
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lbryio/errors.go"
|
||||||
|
"github.com/lbryio/lbry.go/stopOnce"
|
||||||
|
"github.com/lbryio/lbry.go/util"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"github.com/lyoshenka/bencode"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// packet represents the information receive from udp.
|
||||||
|
type packet struct {
|
||||||
|
data []byte
|
||||||
|
raddr *net.UDPAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
// UDPConn allows using a mocked connection to test sending/receiving data
|
||||||
|
type UDPConn interface {
|
||||||
|
ReadFromUDP([]byte) (int, *net.UDPAddr, error)
|
||||||
|
WriteToUDP([]byte, *net.UDPAddr) (int, error)
|
||||||
|
SetReadDeadline(time.Time) error
|
||||||
|
SetWriteDeadline(time.Time) error
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type Node struct {
|
||||||
|
// TODO: replace Contact with id. ip and port aren't used except when connecting
|
||||||
|
id Bitmap
|
||||||
|
// UDP connection for sending and receiving data
|
||||||
|
conn UDPConn
|
||||||
|
// token manager
|
||||||
|
tokens *tokenManager
|
||||||
|
|
||||||
|
txLock *sync.RWMutex
|
||||||
|
transactions map[messageID]*transaction
|
||||||
|
|
||||||
|
// routing table
|
||||||
|
rt *routingTable
|
||||||
|
// data store
|
||||||
|
store *peerStore
|
||||||
|
|
||||||
|
stop *stopOnce.Stopper
|
||||||
|
stopWG *sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a Node pointer.
|
||||||
|
func NewNode(id Bitmap) (*Node, error) {
|
||||||
|
n := &Node{
|
||||||
|
id: id,
|
||||||
|
rt: newRoutingTable(id),
|
||||||
|
store: newPeerStore(),
|
||||||
|
|
||||||
|
txLock: &sync.RWMutex{},
|
||||||
|
transactions: make(map[messageID]*transaction),
|
||||||
|
|
||||||
|
stop: stopOnce.New(),
|
||||||
|
stopWG: &sync.WaitGroup{},
|
||||||
|
tokens: &tokenManager{},
|
||||||
|
}
|
||||||
|
|
||||||
|
n.tokens.Start(tokenSecretRotationInterval)
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Node) Connect(conn UDPConn) error {
|
||||||
|
n.conn = conn
|
||||||
|
|
||||||
|
//if dht.conf.PrintState > 0 {
|
||||||
|
// go func() {
|
||||||
|
// t := time.NewTicker(dht.conf.PrintState)
|
||||||
|
// for {
|
||||||
|
// dht.PrintState()
|
||||||
|
// select {
|
||||||
|
// case <-t.C:
|
||||||
|
// case <-dht.stop.Chan():
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }()
|
||||||
|
//}
|
||||||
|
|
||||||
|
packets := make(chan packet)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
n.stopWG.Add(1)
|
||||||
|
defer n.stopWG.Done()
|
||||||
|
|
||||||
|
buf := make([]byte, udpMaxMessageLength)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-n.stop.Chan():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
n.conn.SetReadDeadline(time.Now().Add(200 * time.Millisecond)) // need this to periodically check shutdown chan
|
||||||
|
n, raddr, err := n.conn.ReadFromUDP(buf)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(net.Error); !ok || !e.Timeout() {
|
||||||
|
log.Errorf("udp read error: %v", err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else if raddr == nil {
|
||||||
|
log.Errorf("udp read with no raddr")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
data := make([]byte, n)
|
||||||
|
copy(data, buf[:n]) // slices use the same underlying array, so we need a new one for each packet
|
||||||
|
|
||||||
|
packets <- packet{data: data, raddr: raddr}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
n.stopWG.Add(1)
|
||||||
|
defer n.stopWG.Done()
|
||||||
|
|
||||||
|
var pkt packet
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case pkt = <-packets:
|
||||||
|
n.handlePacket(pkt)
|
||||||
|
case <-n.stop.Chan():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown shuts down the node
|
||||||
|
func (n *Node) Shutdown() {
|
||||||
|
log.Debugf("[%s] node shutting down", n.id.HexShort())
|
||||||
|
n.stop.Stop()
|
||||||
|
n.stopWG.Wait()
|
||||||
|
n.tokens.Stop()
|
||||||
|
n.conn.Close()
|
||||||
|
log.Debugf("[%s] node stopped", n.id.HexShort())
|
||||||
|
}
|
||||||
|
|
||||||
|
// handlePacket handles packets received from udp.
|
||||||
|
func (n *Node) handlePacket(pkt packet) {
|
||||||
|
//log.Debugf("[%s] Received message from %s (%d bytes) %s", n.id.HexShort(), pkt.raddr.String(), len(pkt.data), hex.EncodeToString(pkt.data))
|
||||||
|
|
||||||
|
if !util.InSlice(string(pkt.data[0:5]), []string{"d1:0i", "di0ei"}) {
|
||||||
|
log.Errorf("[%s] data is not a well-formatted dict: (%d bytes) %s", n.id.HexShort(), len(pkt.data), hex.EncodeToString(pkt.data))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: test this stuff more thoroughly
|
||||||
|
|
||||||
|
// the following is a bit of a hack, but it lets us avoid decoding every message twice
|
||||||
|
// it depends on the data being a dict with 0 as the first key (so it starts with "d1:0i") and the message type as the first value
|
||||||
|
|
||||||
|
switch pkt.data[5] {
|
||||||
|
case '0' + requestType:
|
||||||
|
request := Request{}
|
||||||
|
err := bencode.DecodeBytes(pkt.data, &request)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("[%s] error decoding request from %s: %s: (%d bytes) %s", n.id.HexShort(), pkt.raddr.String(), err.Error(), len(pkt.data), hex.EncodeToString(pkt.data))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Debugf("[%s] query %s: received request from %s: %s(%s)", n.id.HexShort(), request.ID.HexShort(), request.NodeID.HexShort(), request.Method, request.ArgsDebug())
|
||||||
|
n.handleRequest(pkt.raddr, request)
|
||||||
|
|
||||||
|
case '0' + responseType:
|
||||||
|
response := Response{}
|
||||||
|
err := bencode.DecodeBytes(pkt.data, &response)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("[%s] error decoding response from %s: %s: (%d bytes) %s", n.id.HexShort(), pkt.raddr.String(), err.Error(), len(pkt.data), hex.EncodeToString(pkt.data))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Debugf("[%s] query %s: received response from %s: %s", n.id.HexShort(), response.ID.HexShort(), response.NodeID.HexShort(), response.ArgsDebug())
|
||||||
|
n.handleResponse(pkt.raddr, response)
|
||||||
|
|
||||||
|
case '0' + errorType:
|
||||||
|
e := Error{}
|
||||||
|
err := bencode.DecodeBytes(pkt.data, &e)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("[%s] error decoding error from %s: %s: (%d bytes) %s", n.id.HexShort(), pkt.raddr.String(), err.Error(), len(pkt.data), hex.EncodeToString(pkt.data))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Debugf("[%s] query %s: received error from %s: %s", n.id.HexShort(), e.ID.HexShort(), e.NodeID.HexShort(), e.ExceptionType)
|
||||||
|
n.handleError(pkt.raddr, e)
|
||||||
|
|
||||||
|
default:
|
||||||
|
log.Errorf("[%s] invalid message type: %s", n.id.HexShort(), pkt.data[5])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleRequest handles the requests received from udp.
|
||||||
|
func (n *Node) handleRequest(addr *net.UDPAddr, request Request) {
|
||||||
|
if request.NodeID.Equals(n.id) {
|
||||||
|
log.Warn("ignoring self-request")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch request.Method {
|
||||||
|
default:
|
||||||
|
// n.send(addr, makeError(t, protocolError, "invalid q"))
|
||||||
|
log.Errorln("invalid request method")
|
||||||
|
return
|
||||||
|
case pingMethod:
|
||||||
|
n.send(addr, Response{ID: request.ID, NodeID: n.id, Data: pingSuccessResponse})
|
||||||
|
case storeMethod:
|
||||||
|
// TODO: we should be sending the IP in the request, not just using the sender's IP
|
||||||
|
// TODO: should we be using StoreArgs.NodeID or StoreArgs.Value.LbryID ???
|
||||||
|
if n.tokens.Verify(request.StoreArgs.Value.Token, request.NodeID, addr) {
|
||||||
|
n.store.Upsert(request.StoreArgs.BlobHash, Contact{id: request.StoreArgs.NodeID, ip: addr.IP, port: request.StoreArgs.Value.Port})
|
||||||
|
n.send(addr, Response{ID: request.ID, NodeID: n.id, Data: storeSuccessResponse})
|
||||||
|
} else {
|
||||||
|
n.send(addr, Error{ID: request.ID, NodeID: n.id, ExceptionType: "invalid-token"})
|
||||||
|
}
|
||||||
|
case findNodeMethod:
|
||||||
|
if request.Arg == nil {
|
||||||
|
log.Errorln("request is missing arg")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n.send(addr, Response{
|
||||||
|
ID: request.ID,
|
||||||
|
NodeID: n.id,
|
||||||
|
Contacts: n.rt.GetClosest(*request.Arg, bucketSize),
|
||||||
|
})
|
||||||
|
|
||||||
|
case findValueMethod:
|
||||||
|
if request.Arg == nil {
|
||||||
|
log.Errorln("request is missing arg")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
res := Response{
|
||||||
|
ID: request.ID,
|
||||||
|
NodeID: n.id,
|
||||||
|
Token: n.tokens.Get(request.NodeID, addr),
|
||||||
|
}
|
||||||
|
|
||||||
|
if contacts := n.store.Get(*request.Arg); len(contacts) > 0 {
|
||||||
|
res.FindValueKey = request.Arg.RawString()
|
||||||
|
res.Contacts = contacts
|
||||||
|
} else {
|
||||||
|
res.Contacts = n.rt.GetClosest(*request.Arg, bucketSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
n.send(addr, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
// nodes that send us requests should not be inserted, only refreshed.
|
||||||
|
// the routing table must only contain "good" nodes, which are nodes that reply to our requests
|
||||||
|
// if a node is already good (aka in the table), its fine to refresh it
|
||||||
|
// http://www.bittorrent.org/beps/bep_0005.html#routing-table
|
||||||
|
n.rt.UpdateIfExists(Contact{id: request.NodeID, ip: addr.IP, port: addr.Port})
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleResponse handles responses received from udp.
|
||||||
|
func (n *Node) handleResponse(addr *net.UDPAddr, response Response) {
|
||||||
|
tx := n.txFind(response.ID, addr)
|
||||||
|
if tx != nil {
|
||||||
|
tx.res <- response
|
||||||
|
}
|
||||||
|
|
||||||
|
n.rt.Update(Contact{id: response.NodeID, ip: addr.IP, port: addr.Port})
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleError handles errors received from udp.
|
||||||
|
func (n *Node) handleError(addr *net.UDPAddr, e Error) {
|
||||||
|
spew.Dump(e)
|
||||||
|
n.rt.UpdateIfExists(Contact{id: e.NodeID, ip: addr.IP, port: addr.Port})
|
||||||
|
}
|
||||||
|
|
||||||
|
// send sends data to a udp address
|
||||||
|
func (n *Node) send(addr *net.UDPAddr, data Message) error {
|
||||||
|
encoded, err := bencode.EncodeBytes(data)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if req, ok := data.(Request); ok {
|
||||||
|
log.Debugf("[%s] query %s: sending request to %s (%d bytes) %s(%s)",
|
||||||
|
n.id.HexShort(), req.ID.HexShort(), addr.String(), len(encoded), req.Method, req.ArgsDebug())
|
||||||
|
} else if res, ok := data.(Response); ok {
|
||||||
|
log.Debugf("[%s] query %s: sending response to %s (%d bytes) %s",
|
||||||
|
n.id.HexShort(), res.ID.HexShort(), addr.String(), len(encoded), res.ArgsDebug())
|
||||||
|
} else {
|
||||||
|
log.Debugf("[%s] (%d bytes) %s", n.id.HexShort(), len(encoded), spew.Sdump(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
n.conn.SetWriteDeadline(time.Now().Add(time.Second * 15))
|
||||||
|
|
||||||
|
_, err = n.conn.WriteToUDP(encoded, addr)
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// transaction represents a single query to the dht. it stores the queried contact, the request, and the response channel
|
||||||
|
type transaction struct {
|
||||||
|
contact Contact
|
||||||
|
req Request
|
||||||
|
res chan Response
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert adds a transaction to the manager.
|
||||||
|
func (n *Node) txInsert(tx *transaction) {
|
||||||
|
n.txLock.Lock()
|
||||||
|
defer n.txLock.Unlock()
|
||||||
|
n.transactions[tx.req.ID] = tx
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete removes a transaction from the manager.
|
||||||
|
func (n *Node) txDelete(id messageID) {
|
||||||
|
n.txLock.Lock()
|
||||||
|
defer n.txLock.Unlock()
|
||||||
|
delete(n.transactions, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find finds a transaction for the given id. it optionally ensures that addr matches contact from transaction
|
||||||
|
func (n *Node) txFind(id messageID, addr *net.UDPAddr) *transaction {
|
||||||
|
n.txLock.RLock()
|
||||||
|
defer n.txLock.RUnlock()
|
||||||
|
|
||||||
|
// TODO: also check that the response's nodeid matches the id you thought you sent to?
|
||||||
|
|
||||||
|
t, ok := n.transactions[id]
|
||||||
|
if !ok || (addr != nil && t.contact.Addr().String() != addr.String()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendAsync sends a transaction and returns a channel that will eventually contain the transaction response
|
||||||
|
// The response channel is closed when the transaction is completed or times out.
|
||||||
|
func (n *Node) SendAsync(ctx context.Context, contact Contact, req Request) <-chan *Response {
|
||||||
|
if contact.id.Equals(n.id) {
|
||||||
|
log.Error("sending query to self")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ch := make(chan *Response, 1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(ch)
|
||||||
|
|
||||||
|
req.ID = newMessageID()
|
||||||
|
req.NodeID = n.id
|
||||||
|
tx := &transaction{
|
||||||
|
contact: contact,
|
||||||
|
req: req,
|
||||||
|
res: make(chan Response),
|
||||||
|
}
|
||||||
|
|
||||||
|
n.txInsert(tx)
|
||||||
|
defer n.txDelete(tx.req.ID)
|
||||||
|
|
||||||
|
for i := 0; i < udpRetry; i++ {
|
||||||
|
if err := n.send(contact.Addr(), tx.req); err != nil {
|
||||||
|
if !strings.Contains(err.Error(), "use of closed network connection") { // this only happens on localhost. real UDP has no connections
|
||||||
|
log.Error("send error: ", err)
|
||||||
|
}
|
||||||
|
continue // try again? return?
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case res := <-tx.res:
|
||||||
|
ch <- &res
|
||||||
|
return
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(udpTimeout):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if request timed out each time
|
||||||
|
n.rt.RemoveByID(tx.contact.id)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send sends a transaction and blocks until the response is available. It returns a response, or nil
|
||||||
|
// if the transaction timed out.
|
||||||
|
func (n *Node) Send(contact Contact, req Request) *Response {
|
||||||
|
return <-n.SendAsync(context.Background(), contact, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the number of transactions in the manager
|
||||||
|
func (n *Node) CountActiveTransactions() int {
|
||||||
|
n.txLock.Lock()
|
||||||
|
defer n.txLock.Unlock()
|
||||||
|
return len(n.transactions)
|
||||||
|
}
|
|
@ -2,6 +2,7 @@ package dht
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -11,21 +12,21 @@ import (
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type nodeFinder struct {
|
type contactFinder struct {
|
||||||
findValue bool // true if we're using findValue
|
findValue bool // true if we're using findValue
|
||||||
target Bitmap
|
target Bitmap
|
||||||
dht *DHT
|
node *Node
|
||||||
|
|
||||||
done *stopOnce.Stopper
|
done *stopOnce.Stopper
|
||||||
|
|
||||||
findValueMutex *sync.Mutex
|
findValueMutex *sync.Mutex
|
||||||
findValueResult []Node
|
findValueResult []Contact
|
||||||
|
|
||||||
activeNodesMutex *sync.Mutex
|
activeContactsMutex *sync.Mutex
|
||||||
activeNodes []Node
|
activeContacts []Contact
|
||||||
|
|
||||||
shortlistMutex *sync.Mutex
|
shortlistMutex *sync.Mutex
|
||||||
shortlist []Node
|
shortlist []Contact
|
||||||
shortlistAdded map[Bitmap]bool
|
shortlistAdded map[Bitmap]bool
|
||||||
|
|
||||||
outstandingRequestsMutex *sync.RWMutex
|
outstandingRequestsMutex *sync.RWMutex
|
||||||
|
@ -33,33 +34,33 @@ type nodeFinder struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type findNodeResponse struct {
|
type findNodeResponse struct {
|
||||||
Found bool
|
Found bool
|
||||||
Nodes []Node
|
Contacts []Contact
|
||||||
}
|
}
|
||||||
|
|
||||||
func newNodeFinder(dht *DHT, target Bitmap, findValue bool) *nodeFinder {
|
func newContactFinder(node *Node, target Bitmap, findValue bool) *contactFinder {
|
||||||
return &nodeFinder{
|
return &contactFinder{
|
||||||
dht: dht,
|
node: node,
|
||||||
target: target,
|
target: target,
|
||||||
findValue: findValue,
|
findValue: findValue,
|
||||||
findValueMutex: &sync.Mutex{},
|
findValueMutex: &sync.Mutex{},
|
||||||
activeNodesMutex: &sync.Mutex{},
|
activeContactsMutex: &sync.Mutex{},
|
||||||
shortlistMutex: &sync.Mutex{},
|
shortlistMutex: &sync.Mutex{},
|
||||||
shortlistAdded: make(map[Bitmap]bool),
|
shortlistAdded: make(map[Bitmap]bool),
|
||||||
done: stopOnce.New(),
|
done: stopOnce.New(),
|
||||||
outstandingRequestsMutex: &sync.RWMutex{},
|
outstandingRequestsMutex: &sync.RWMutex{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nf *nodeFinder) Find() (findNodeResponse, error) {
|
func (cf *contactFinder) Find() (findNodeResponse, error) {
|
||||||
if nf.findValue {
|
if cf.findValue {
|
||||||
log.Debugf("[%s] starting an iterative Find for the value %s", nf.dht.node.id.HexShort(), nf.target.HexShort())
|
log.Debugf("[%s] starting an iterative Find for the value %s", cf.node.id.HexShort(), cf.target.HexShort())
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("[%s] starting an iterative Find for nodes near %s", nf.dht.node.id.HexShort(), nf.target.HexShort())
|
log.Debugf("[%s] starting an iterative Find for contacts near %s", cf.node.id.HexShort(), cf.target.HexShort())
|
||||||
}
|
}
|
||||||
nf.appendNewToShortlist(nf.dht.rt.GetClosest(nf.target, alpha))
|
cf.appendNewToShortlist(cf.node.rt.GetClosest(cf.target, alpha))
|
||||||
if len(nf.shortlist) == 0 {
|
if len(cf.shortlist) == 0 {
|
||||||
return findNodeResponse{}, errors.Err("no nodes in routing table")
|
return findNodeResponse{}, errors.Err("no contacts in routing table")
|
||||||
}
|
}
|
||||||
|
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
|
@ -68,163 +69,163 @@ func (nf *nodeFinder) Find() (findNodeResponse, error) {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
nf.iterationWorker(i + 1)
|
cf.iterationWorker(i + 1)
|
||||||
}(i)
|
}(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
// TODO: what to do if we have less than K active nodes, shortlist is empty, but we
|
// TODO: what to do if we have less than K active contacts, shortlist is empty, but we
|
||||||
// TODO: have other nodes in our routing table whom we have not contacted. prolly contact them
|
// TODO: have other contacts in our routing table whom we have not contacted. prolly contact them
|
||||||
|
|
||||||
result := findNodeResponse{}
|
result := findNodeResponse{}
|
||||||
if nf.findValue && len(nf.findValueResult) > 0 {
|
if cf.findValue && len(cf.findValueResult) > 0 {
|
||||||
result.Found = true
|
result.Found = true
|
||||||
result.Nodes = nf.findValueResult
|
result.Contacts = cf.findValueResult
|
||||||
} else {
|
} else {
|
||||||
result.Nodes = nf.activeNodes
|
result.Contacts = cf.activeContacts
|
||||||
if len(result.Nodes) > bucketSize {
|
if len(result.Contacts) > bucketSize {
|
||||||
result.Nodes = result.Nodes[:bucketSize]
|
result.Contacts = result.Contacts[:bucketSize]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nf *nodeFinder) iterationWorker(num int) {
|
func (cf *contactFinder) iterationWorker(num int) {
|
||||||
log.Debugf("[%s] starting worker %d", nf.dht.node.id.HexShort(), num)
|
log.Debugf("[%s] starting worker %d", cf.node.id.HexShort(), num)
|
||||||
defer func() { log.Debugf("[%s] stopping worker %d", nf.dht.node.id.HexShort(), num) }()
|
defer func() { log.Debugf("[%s] stopping worker %d", cf.node.id.HexShort(), num) }()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
maybeNode := nf.popFromShortlist()
|
maybeContact := cf.popFromShortlist()
|
||||||
if maybeNode == nil {
|
if maybeContact == nil {
|
||||||
// TODO: block if there are pending requests out from other workers. there may be more shortlist values coming
|
// TODO: block if there are pending requests out from other workers. there may be more shortlist values coming
|
||||||
log.Debugf("[%s] worker %d: no nodes in shortlist, waiting...", nf.dht.node.id.HexShort(), num)
|
log.Debugf("[%s] worker %d: no contacts in shortlist, waiting...", cf.node.id.HexShort(), num)
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
} else {
|
} else {
|
||||||
node := *maybeNode
|
contact := *maybeContact
|
||||||
|
|
||||||
if node.id.Equals(nf.dht.node.id) {
|
if contact.id.Equals(cf.node.id) {
|
||||||
continue // cannot contact self
|
continue // cannot contact self
|
||||||
}
|
}
|
||||||
|
|
||||||
req := Request{Arg: &nf.target}
|
req := Request{Arg: &cf.target}
|
||||||
if nf.findValue {
|
if cf.findValue {
|
||||||
req.Method = findValueMethod
|
req.Method = findValueMethod
|
||||||
} else {
|
} else {
|
||||||
req.Method = findNodeMethod
|
req.Method = findNodeMethod
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("[%s] worker %d: contacting %s", nf.dht.node.id.HexShort(), num, node.id.HexShort())
|
log.Debugf("[%s] worker %d: contacting %s", cf.node.id.HexShort(), num, contact.id.HexShort())
|
||||||
|
|
||||||
nf.incrementOutstanding()
|
cf.incrementOutstanding()
|
||||||
|
|
||||||
var res *Response
|
var res *Response
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
resCh := nf.dht.tm.SendAsync(ctx, node, req)
|
resCh := cf.node.SendAsync(ctx, contact, req)
|
||||||
select {
|
select {
|
||||||
case res = <-resCh:
|
case res = <-resCh:
|
||||||
case <-nf.done.Chan():
|
case <-cf.done.Chan():
|
||||||
log.Debugf("[%s] worker %d: canceled", nf.dht.node.id.HexShort(), num)
|
log.Debugf("[%s] worker %d: canceled", cf.node.id.HexShort(), num)
|
||||||
cancel()
|
cancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if res == nil {
|
if res == nil {
|
||||||
// nothing to do, response timed out
|
// nothing to do, response timed out
|
||||||
log.Debugf("[%s] worker %d: timed out waiting for %s", nf.dht.node.id.HexShort(), num, node.id.HexShort())
|
log.Debugf("[%s] worker %d: timed out waiting for %s", cf.node.id.HexShort(), num, contact.id.HexShort())
|
||||||
} else if nf.findValue && res.FindValueKey != "" {
|
} else if cf.findValue && res.FindValueKey != "" {
|
||||||
log.Debugf("[%s] worker %d: got value", nf.dht.node.id.HexShort(), num)
|
log.Debugf("[%s] worker %d: got value", cf.node.id.HexShort(), num)
|
||||||
nf.findValueMutex.Lock()
|
cf.findValueMutex.Lock()
|
||||||
nf.findValueResult = res.FindNodeData
|
cf.findValueResult = res.Contacts
|
||||||
nf.findValueMutex.Unlock()
|
cf.findValueMutex.Unlock()
|
||||||
nf.done.Stop()
|
cf.done.Stop()
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("[%s] worker %d: got contacts", nf.dht.node.id.HexShort(), num)
|
log.Debugf("[%s] worker %d: got contacts", cf.node.id.HexShort(), num)
|
||||||
nf.insertIntoActiveList(node)
|
cf.insertIntoActiveList(contact)
|
||||||
nf.appendNewToShortlist(res.FindNodeData)
|
cf.appendNewToShortlist(res.Contacts)
|
||||||
}
|
}
|
||||||
|
|
||||||
nf.decrementOutstanding() // this is all the way down here because we need to add to shortlist first
|
cf.decrementOutstanding() // this is all the way down here because we need to add to shortlist first
|
||||||
}
|
}
|
||||||
|
|
||||||
if nf.isSearchFinished() {
|
if cf.isSearchFinished() {
|
||||||
log.Debugf("[%s] worker %d: search is finished", nf.dht.node.id.HexShort(), num)
|
log.Debugf("[%s] worker %d: search is finished", cf.node.id.HexShort(), num)
|
||||||
nf.done.Stop()
|
cf.done.Stop()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nf *nodeFinder) appendNewToShortlist(nodes []Node) {
|
func (cf *contactFinder) appendNewToShortlist(contacts []Contact) {
|
||||||
nf.shortlistMutex.Lock()
|
cf.shortlistMutex.Lock()
|
||||||
defer nf.shortlistMutex.Unlock()
|
defer cf.shortlistMutex.Unlock()
|
||||||
|
|
||||||
for _, n := range nodes {
|
for _, c := range contacts {
|
||||||
if _, ok := nf.shortlistAdded[n.id]; !ok {
|
if _, ok := cf.shortlistAdded[c.id]; !ok {
|
||||||
nf.shortlist = append(nf.shortlist, n)
|
cf.shortlist = append(cf.shortlist, c)
|
||||||
nf.shortlistAdded[n.id] = true
|
cf.shortlistAdded[c.id] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sortNodesInPlace(nf.shortlist, nf.target)
|
sortInPlace(cf.shortlist, cf.target)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nf *nodeFinder) popFromShortlist() *Node {
|
func (cf *contactFinder) popFromShortlist() *Contact {
|
||||||
nf.shortlistMutex.Lock()
|
cf.shortlistMutex.Lock()
|
||||||
defer nf.shortlistMutex.Unlock()
|
defer cf.shortlistMutex.Unlock()
|
||||||
|
|
||||||
if len(nf.shortlist) == 0 {
|
if len(cf.shortlist) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
first := nf.shortlist[0]
|
first := cf.shortlist[0]
|
||||||
nf.shortlist = nf.shortlist[1:]
|
cf.shortlist = cf.shortlist[1:]
|
||||||
return &first
|
return &first
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nf *nodeFinder) insertIntoActiveList(node Node) {
|
func (cf *contactFinder) insertIntoActiveList(contact Contact) {
|
||||||
nf.activeNodesMutex.Lock()
|
cf.activeContactsMutex.Lock()
|
||||||
defer nf.activeNodesMutex.Unlock()
|
defer cf.activeContactsMutex.Unlock()
|
||||||
|
|
||||||
inserted := false
|
inserted := false
|
||||||
for i, n := range nf.activeNodes {
|
for i, n := range cf.activeContacts {
|
||||||
if node.id.Xor(nf.target).Less(n.id.Xor(nf.target)) {
|
if contact.id.Xor(cf.target).Less(n.id.Xor(cf.target)) {
|
||||||
nf.activeNodes = append(nf.activeNodes[:i], append([]Node{node}, nf.activeNodes[i:]...)...)
|
cf.activeContacts = append(cf.activeContacts[:i], append([]Contact{contact}, cf.activeContacts[i:]...)...)
|
||||||
inserted = true
|
inserted = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !inserted {
|
if !inserted {
|
||||||
nf.activeNodes = append(nf.activeNodes, node)
|
cf.activeContacts = append(cf.activeContacts, contact)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nf *nodeFinder) isSearchFinished() bool {
|
func (cf *contactFinder) isSearchFinished() bool {
|
||||||
if nf.findValue && len(nf.findValueResult) > 0 {
|
if cf.findValue && len(cf.findValueResult) > 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-nf.done.Chan():
|
case <-cf.done.Chan():
|
||||||
return true
|
return true
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
if !nf.areRequestsOutstanding() {
|
if !cf.areRequestsOutstanding() {
|
||||||
nf.shortlistMutex.Lock()
|
cf.shortlistMutex.Lock()
|
||||||
defer nf.shortlistMutex.Unlock()
|
defer cf.shortlistMutex.Unlock()
|
||||||
|
|
||||||
if len(nf.shortlist) == 0 {
|
if len(cf.shortlist) == 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
nf.activeNodesMutex.Lock()
|
cf.activeContactsMutex.Lock()
|
||||||
defer nf.activeNodesMutex.Unlock()
|
defer cf.activeContactsMutex.Unlock()
|
||||||
|
|
||||||
if len(nf.activeNodes) >= bucketSize && nf.activeNodes[bucketSize-1].id.Xor(nf.target).Less(nf.shortlist[0].id.Xor(nf.target)) {
|
if len(cf.activeContacts) >= bucketSize && cf.activeContacts[bucketSize-1].id.Xor(cf.target).Less(cf.shortlist[0].id.Xor(cf.target)) {
|
||||||
// we have at least K active nodes, and we don't have any closer nodes yet to contact
|
// we have at least K active contacts, and we don't have any closer contacts to ping
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -232,20 +233,34 @@ func (nf *nodeFinder) isSearchFinished() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nf *nodeFinder) incrementOutstanding() {
|
func (cf *contactFinder) incrementOutstanding() {
|
||||||
nf.outstandingRequestsMutex.Lock()
|
cf.outstandingRequestsMutex.Lock()
|
||||||
defer nf.outstandingRequestsMutex.Unlock()
|
defer cf.outstandingRequestsMutex.Unlock()
|
||||||
nf.outstandingRequests++
|
cf.outstandingRequests++
|
||||||
}
|
}
|
||||||
func (nf *nodeFinder) decrementOutstanding() {
|
func (cf *contactFinder) decrementOutstanding() {
|
||||||
nf.outstandingRequestsMutex.Lock()
|
cf.outstandingRequestsMutex.Lock()
|
||||||
defer nf.outstandingRequestsMutex.Unlock()
|
defer cf.outstandingRequestsMutex.Unlock()
|
||||||
if nf.outstandingRequests > 0 {
|
if cf.outstandingRequests > 0 {
|
||||||
nf.outstandingRequests--
|
cf.outstandingRequests--
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (nf *nodeFinder) areRequestsOutstanding() bool {
|
func (cf *contactFinder) areRequestsOutstanding() bool {
|
||||||
nf.outstandingRequestsMutex.RLock()
|
cf.outstandingRequestsMutex.RLock()
|
||||||
defer nf.outstandingRequestsMutex.RUnlock()
|
defer cf.outstandingRequestsMutex.RUnlock()
|
||||||
return nf.outstandingRequests > 0
|
return cf.outstandingRequests > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortInPlace(contacts []Contact, target Bitmap) {
|
||||||
|
toSort := make([]sortedContact, len(contacts))
|
||||||
|
|
||||||
|
for i, n := range contacts {
|
||||||
|
toSort[i] = sortedContact{n, n.id.Xor(target)}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(byXorDistance(toSort))
|
||||||
|
|
||||||
|
for i, c := range toSort {
|
||||||
|
contacts[i] = c.contact
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,9 +97,11 @@ func TestPing(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
dht.conn = conn
|
|
||||||
go dht.listen()
|
err = dht.node.Connect(conn)
|
||||||
go dht.runHandler()
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
defer dht.Shutdown()
|
defer dht.Shutdown()
|
||||||
|
|
||||||
messageID := newMessageID()
|
messageID := newMessageID()
|
||||||
|
@ -193,9 +195,10 @@ func TestStore(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dht.conn = conn
|
err = dht.node.Connect(conn)
|
||||||
go dht.listen()
|
if err != nil {
|
||||||
go dht.runHandler()
|
t.Fatal(err)
|
||||||
|
}
|
||||||
defer dht.Shutdown()
|
defer dht.Shutdown()
|
||||||
|
|
||||||
messageID := newMessageID()
|
messageID := newMessageID()
|
||||||
|
@ -208,7 +211,7 @@ func TestStore(t *testing.T) {
|
||||||
StoreArgs: &storeArgs{
|
StoreArgs: &storeArgs{
|
||||||
BlobHash: blobHashToStore,
|
BlobHash: blobHashToStore,
|
||||||
Value: storeArgsValue{
|
Value: storeArgsValue{
|
||||||
Token: dht.tokens.Get(testNodeID, conn.addr),
|
Token: dht.node.tokens.Get(testNodeID, conn.addr),
|
||||||
LbryID: testNodeID,
|
LbryID: testNodeID,
|
||||||
Port: 9999,
|
Port: 9999,
|
||||||
},
|
},
|
||||||
|
@ -266,11 +269,11 @@ func TestStore(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dht.store.hashes) != 1 {
|
if len(dht.node.store.hashes) != 1 {
|
||||||
t.Error("dht store has wrong number of items")
|
t.Error("dht store has wrong number of items")
|
||||||
}
|
}
|
||||||
|
|
||||||
items := dht.store.Get(blobHashToStore)
|
items := dht.node.store.Get(blobHashToStore)
|
||||||
if len(items) != 1 {
|
if len(items) != 1 {
|
||||||
t.Error("list created in store, but nothing in list")
|
t.Error("list created in store, but nothing in list")
|
||||||
}
|
}
|
||||||
|
@ -289,17 +292,19 @@ func TestFindNode(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
dht.conn = conn
|
|
||||||
go dht.listen()
|
err = dht.node.Connect(conn)
|
||||||
go dht.runHandler()
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
defer dht.Shutdown()
|
defer dht.Shutdown()
|
||||||
|
|
||||||
nodesToInsert := 3
|
nodesToInsert := 3
|
||||||
var nodes []Node
|
var nodes []Contact
|
||||||
for i := 0; i < nodesToInsert; i++ {
|
for i := 0; i < nodesToInsert; i++ {
|
||||||
n := Node{id: RandomBitmapP(), ip: net.ParseIP("127.0.0.1"), port: 10000 + i}
|
n := Contact{id: RandomBitmapP(), ip: net.ParseIP("127.0.0.1"), port: 10000 + i}
|
||||||
nodes = append(nodes, n)
|
nodes = append(nodes, n)
|
||||||
dht.rt.Update(n)
|
dht.node.rt.Update(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
messageID := newMessageID()
|
messageID := newMessageID()
|
||||||
|
@ -357,17 +362,18 @@ func TestFindValueExisting(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dht.conn = conn
|
err = dht.node.Connect(conn)
|
||||||
go dht.listen()
|
if err != nil {
|
||||||
go dht.runHandler()
|
t.Fatal(err)
|
||||||
|
}
|
||||||
defer dht.Shutdown()
|
defer dht.Shutdown()
|
||||||
|
|
||||||
nodesToInsert := 3
|
nodesToInsert := 3
|
||||||
var nodes []Node
|
var nodes []Contact
|
||||||
for i := 0; i < nodesToInsert; i++ {
|
for i := 0; i < nodesToInsert; i++ {
|
||||||
n := Node{id: RandomBitmapP(), ip: net.ParseIP("127.0.0.1"), port: 10000 + i}
|
n := Contact{id: RandomBitmapP(), ip: net.ParseIP("127.0.0.1"), port: 10000 + i}
|
||||||
nodes = append(nodes, n)
|
nodes = append(nodes, n)
|
||||||
dht.rt.Update(n)
|
dht.node.rt.Update(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
//data, _ := hex.DecodeString("64313a30693065313a3132303a7de8e57d34e316abbb5a8a8da50dcd1ad4c80e0f313a3234383a7ce1b831dec8689e44f80f547d2dea171f6a625e1a4ff6c6165e645f953103dabeb068a622203f859c6c64658fd3aa3b313a33393a66696e6456616c7565313a346c34383aa47624b8e7ee1e54df0c45e2eb858feb0b705bd2a78d8b739be31ba188f4bd6f56b371c51fecc5280d5fd26ba4168e966565")
|
//data, _ := hex.DecodeString("64313a30693065313a3132303a7de8e57d34e316abbb5a8a8da50dcd1ad4c80e0f313a3234383a7ce1b831dec8689e44f80f547d2dea171f6a625e1a4ff6c6165e645f953103dabeb068a622203f859c6c64658fd3aa3b313a33393a66696e6456616c7565313a346c34383aa47624b8e7ee1e54df0c45e2eb858feb0b705bd2a78d8b739be31ba188f4bd6f56b371c51fecc5280d5fd26ba4168e966565")
|
||||||
|
@ -375,10 +381,10 @@ func TestFindValueExisting(t *testing.T) {
|
||||||
messageID := newMessageID()
|
messageID := newMessageID()
|
||||||
valueToFind := RandomBitmapP()
|
valueToFind := RandomBitmapP()
|
||||||
|
|
||||||
nodeToFind := Node{id: RandomBitmapP(), ip: net.ParseIP("1.2.3.4"), port: 1286}
|
nodeToFind := Contact{id: RandomBitmapP(), ip: net.ParseIP("1.2.3.4"), port: 1286}
|
||||||
dht.store.Upsert(valueToFind, nodeToFind)
|
dht.node.store.Upsert(valueToFind, nodeToFind)
|
||||||
dht.store.Upsert(valueToFind, nodeToFind)
|
dht.node.store.Upsert(valueToFind, nodeToFind)
|
||||||
dht.store.Upsert(valueToFind, nodeToFind)
|
dht.node.store.Upsert(valueToFind, nodeToFind)
|
||||||
|
|
||||||
request := Request{
|
request := Request{
|
||||||
ID: messageID,
|
ID: messageID,
|
||||||
|
@ -428,7 +434,7 @@ func TestFindValueExisting(t *testing.T) {
|
||||||
t.Fatal("search results are not a list")
|
t.Fatal("search results are not a list")
|
||||||
}
|
}
|
||||||
|
|
||||||
verifyCompactContacts(t, contacts, []Node{nodeToFind})
|
verifyCompactContacts(t, contacts, []Contact{nodeToFind})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFindValueFallbackToFindNode(t *testing.T) {
|
func TestFindValueFallbackToFindNode(t *testing.T) {
|
||||||
|
@ -442,17 +448,18 @@ func TestFindValueFallbackToFindNode(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dht.conn = conn
|
err = dht.node.Connect(conn)
|
||||||
go dht.listen()
|
if err != nil {
|
||||||
go dht.runHandler()
|
t.Fatal(err)
|
||||||
|
}
|
||||||
defer dht.Shutdown()
|
defer dht.Shutdown()
|
||||||
|
|
||||||
nodesToInsert := 3
|
nodesToInsert := 3
|
||||||
var nodes []Node
|
var nodes []Contact
|
||||||
for i := 0; i < nodesToInsert; i++ {
|
for i := 0; i < nodesToInsert; i++ {
|
||||||
n := Node{id: RandomBitmapP(), ip: net.ParseIP("127.0.0.1"), port: 10000 + i}
|
n := Contact{id: RandomBitmapP(), ip: net.ParseIP("127.0.0.1"), port: 10000 + i}
|
||||||
nodes = append(nodes, n)
|
nodes = append(nodes, n)
|
||||||
dht.rt.Update(n)
|
dht.node.rt.Update(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
messageID := newMessageID()
|
messageID := newMessageID()
|
||||||
|
@ -557,7 +564,7 @@ func verifyResponse(t *testing.T, resp map[string]interface{}, id messageID, dht
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyContacts(t *testing.T, contacts []interface{}, nodes []Node) {
|
func verifyContacts(t *testing.T, contacts []interface{}, nodes []Contact) {
|
||||||
if len(contacts) != len(nodes) {
|
if len(contacts) != len(nodes) {
|
||||||
t.Errorf("got %d contacts; expected %d", len(contacts), len(nodes))
|
t.Errorf("got %d contacts; expected %d", len(contacts), len(nodes))
|
||||||
return
|
return
|
||||||
|
@ -577,7 +584,7 @@ func verifyContacts(t *testing.T, contacts []interface{}, nodes []Node) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var currNode Node
|
var currNode Contact
|
||||||
currNodeFound := false
|
currNodeFound := false
|
||||||
|
|
||||||
id, ok := contact[0].(string)
|
id, ok := contact[0].(string)
|
||||||
|
@ -618,7 +625,7 @@ func verifyContacts(t *testing.T, contacts []interface{}, nodes []Node) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyCompactContacts(t *testing.T, contacts []interface{}, nodes []Node) {
|
func verifyCompactContacts(t *testing.T, contacts []interface{}, nodes []Contact) {
|
||||||
if len(contacts) != len(nodes) {
|
if len(contacts) != len(nodes) {
|
||||||
t.Errorf("got %d contacts; expected %d", len(contacts), len(nodes))
|
t.Errorf("got %d contacts; expected %d", len(contacts), len(nodes))
|
||||||
return
|
return
|
||||||
|
@ -633,14 +640,14 @@ func verifyCompactContacts(t *testing.T, contacts []interface{}, nodes []Node) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
contact := Node{}
|
contact := Contact{}
|
||||||
err := contact.UnmarshalCompact([]byte(compact))
|
err := contact.UnmarshalCompact([]byte(compact))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var currNode Node
|
var currNode Contact
|
||||||
currNodeFound := false
|
currNodeFound := false
|
||||||
|
|
||||||
if _, ok := foundNodes[contact.id.Hex()]; ok {
|
if _, ok := foundNodes[contact.id.Hex()]; ok {
|
|
@ -14,34 +14,33 @@ import (
|
||||||
"github.com/lyoshenka/bencode"
|
"github.com/lyoshenka/bencode"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Node struct {
|
type Contact struct {
|
||||||
id Bitmap
|
id Bitmap
|
||||||
ip net.IP
|
ip net.IP
|
||||||
port int
|
port int
|
||||||
token string // this is set when the node is returned from a FindNode call
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n Node) String() string {
|
func (c Contact) Addr() *net.UDPAddr {
|
||||||
return n.id.HexShort() + "@" + n.Addr().String()
|
return &net.UDPAddr{IP: c.ip, Port: c.port}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n Node) Addr() *net.UDPAddr {
|
func (c Contact) String() string {
|
||||||
return &net.UDPAddr{IP: n.ip, Port: n.port}
|
return c.id.HexShort() + "@" + c.Addr().String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n Node) MarshalCompact() ([]byte, error) {
|
func (c Contact) MarshalCompact() ([]byte, error) {
|
||||||
if n.ip.To4() == nil {
|
if c.ip.To4() == nil {
|
||||||
return nil, errors.Err("ip not set")
|
return nil, errors.Err("ip not set")
|
||||||
}
|
}
|
||||||
if n.port < 0 || n.port > 65535 {
|
if c.port < 0 || c.port > 65535 {
|
||||||
return nil, errors.Err("invalid port")
|
return nil, errors.Err("invalid port")
|
||||||
}
|
}
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
buf.Write(n.ip.To4())
|
buf.Write(c.ip.To4())
|
||||||
buf.WriteByte(byte(n.port >> 8))
|
buf.WriteByte(byte(c.port >> 8))
|
||||||
buf.WriteByte(byte(n.port))
|
buf.WriteByte(byte(c.port))
|
||||||
buf.Write(n.id[:])
|
buf.Write(c.id[:])
|
||||||
|
|
||||||
if buf.Len() != compactNodeInfoLength {
|
if buf.Len() != compactNodeInfoLength {
|
||||||
return nil, errors.Err("i dont know how this happened")
|
return nil, errors.Err("i dont know how this happened")
|
||||||
|
@ -50,21 +49,21 @@ func (n Node) MarshalCompact() ([]byte, error) {
|
||||||
return buf.Bytes(), nil
|
return buf.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Node) UnmarshalCompact(b []byte) error {
|
func (c *Contact) UnmarshalCompact(b []byte) error {
|
||||||
if len(b) != compactNodeInfoLength {
|
if len(b) != compactNodeInfoLength {
|
||||||
return errors.Err("invalid compact length")
|
return errors.Err("invalid compact length")
|
||||||
}
|
}
|
||||||
n.ip = net.IPv4(b[0], b[1], b[2], b[3]).To4()
|
c.ip = net.IPv4(b[0], b[1], b[2], b[3]).To4()
|
||||||
n.port = int(uint16(b[5]) | uint16(b[4])<<8)
|
c.port = int(uint16(b[5]) | uint16(b[4])<<8)
|
||||||
n.id = BitmapFromBytesP(b[6:])
|
c.id = BitmapFromBytesP(b[6:])
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n Node) MarshalBencode() ([]byte, error) {
|
func (c Contact) MarshalBencode() ([]byte, error) {
|
||||||
return bencode.EncodeBytes([]interface{}{n.id, n.ip.String(), n.port})
|
return bencode.EncodeBytes([]interface{}{c.id, c.ip.String(), c.port})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Node) UnmarshalBencode(b []byte) error {
|
func (c *Contact) UnmarshalBencode(b []byte) error {
|
||||||
var raw []bencode.RawMessage
|
var raw []bencode.RawMessage
|
||||||
err := bencode.DecodeBytes(b, &raw)
|
err := bencode.DecodeBytes(b, &raw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -75,7 +74,7 @@ func (n *Node) UnmarshalBencode(b []byte) error {
|
||||||
return errors.Err("contact must have 3 elements; got %d", len(raw))
|
return errors.Err("contact must have 3 elements; got %d", len(raw))
|
||||||
}
|
}
|
||||||
|
|
||||||
err = bencode.DecodeBytes(raw[0], &n.id)
|
err = bencode.DecodeBytes(raw[0], &c.id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -85,12 +84,12 @@ func (n *Node) UnmarshalBencode(b []byte) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
n.ip = net.ParseIP(ipStr).To4()
|
c.ip = net.ParseIP(ipStr).To4()
|
||||||
if n.ip == nil {
|
if c.ip == nil {
|
||||||
return errors.Err("invalid IP")
|
return errors.Err("invalid IP")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = bencode.DecodeBytes(raw[2], &n.port)
|
err = bencode.DecodeBytes(raw[2], &c.port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -98,12 +97,12 @@ func (n *Node) UnmarshalBencode(b []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type sortedNode struct {
|
type sortedContact struct {
|
||||||
node Node
|
contact Contact
|
||||||
xorDistanceToTarget Bitmap
|
xorDistanceToTarget Bitmap
|
||||||
}
|
}
|
||||||
|
|
||||||
type byXorDistance []sortedNode
|
type byXorDistance []sortedContact
|
||||||
|
|
||||||
func (a byXorDistance) Len() int { return len(a) }
|
func (a byXorDistance) Len() int { return len(a) }
|
||||||
func (a byXorDistance) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
func (a byXorDistance) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
@ -112,17 +111,17 @@ func (a byXorDistance) Less(i, j int) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
type routingTable struct {
|
type routingTable struct {
|
||||||
node Node
|
id Bitmap
|
||||||
buckets [numBuckets]*list.List
|
buckets [numBuckets]*list.List
|
||||||
lock *sync.RWMutex
|
lock *sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRoutingTable(node *Node) *routingTable {
|
func newRoutingTable(id Bitmap) *routingTable {
|
||||||
var rt routingTable
|
var rt routingTable
|
||||||
for i := range rt.buckets {
|
for i := range rt.buckets {
|
||||||
rt.buckets[i] = list.New()
|
rt.buckets[i] = list.New()
|
||||||
}
|
}
|
||||||
rt.node = *node
|
rt.id = id
|
||||||
rt.lock = &sync.RWMutex{}
|
rt.lock = &sync.RWMutex{}
|
||||||
return &rt
|
return &rt
|
||||||
}
|
}
|
||||||
|
@ -131,7 +130,7 @@ func (rt *routingTable) BucketInfo() string {
|
||||||
rt.lock.RLock()
|
rt.lock.RLock()
|
||||||
defer rt.lock.RUnlock()
|
defer rt.lock.RUnlock()
|
||||||
|
|
||||||
bucketInfo := []string{}
|
var bucketInfo []string
|
||||||
for i, b := range rt.buckets {
|
for i, b := range rt.buckets {
|
||||||
contents := bucketContents(b)
|
contents := bucketContents(b)
|
||||||
if contents != "" {
|
if contents != "" {
|
||||||
|
@ -152,7 +151,7 @@ func bucketContents(b *list.List) string {
|
||||||
if ids != "" {
|
if ids != "" {
|
||||||
ids += ", "
|
ids += ", "
|
||||||
}
|
}
|
||||||
ids += curr.Value.(Node).id.HexShort()
|
ids += curr.Value.(Contact).id.HexShort()
|
||||||
}
|
}
|
||||||
|
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
|
@ -162,31 +161,31 @@ func bucketContents(b *list.List) string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update inserts or refreshes a node
|
// Update inserts or refreshes a contact
|
||||||
func (rt *routingTable) Update(node Node) {
|
func (rt *routingTable) Update(c Contact) {
|
||||||
rt.lock.Lock()
|
rt.lock.Lock()
|
||||||
defer rt.lock.Unlock()
|
defer rt.lock.Unlock()
|
||||||
bucketNum := bucketFor(rt.node.id, node.id)
|
bucketNum := bucketFor(rt.id, c.id)
|
||||||
bucket := rt.buckets[bucketNum]
|
bucket := rt.buckets[bucketNum]
|
||||||
element := findInList(bucket, node.id)
|
element := findInList(bucket, c.id)
|
||||||
if element == nil {
|
if element == nil {
|
||||||
if bucket.Len() >= bucketSize {
|
if bucket.Len() >= bucketSize {
|
||||||
// TODO: Ping front node first. Only remove if it does not respond
|
// TODO: Ping front contact first. Only remove if it does not respond
|
||||||
bucket.Remove(bucket.Front())
|
bucket.Remove(bucket.Front())
|
||||||
}
|
}
|
||||||
bucket.PushBack(node)
|
bucket.PushBack(c)
|
||||||
} else {
|
} else {
|
||||||
bucket.MoveToBack(element)
|
bucket.MoveToBack(element)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateIfExists refreshes a node if its already in the routing table
|
// UpdateIfExists refreshes a contact if its already in the routing table
|
||||||
func (rt *routingTable) UpdateIfExists(node Node) {
|
func (rt *routingTable) UpdateIfExists(c Contact) {
|
||||||
rt.lock.Lock()
|
rt.lock.Lock()
|
||||||
defer rt.lock.Unlock()
|
defer rt.lock.Unlock()
|
||||||
bucketNum := bucketFor(rt.node.id, node.id)
|
bucketNum := bucketFor(rt.id, c.id)
|
||||||
bucket := rt.buckets[bucketNum]
|
bucket := rt.buckets[bucketNum]
|
||||||
element := findInList(bucket, node.id)
|
element := findInList(bucket, c.id)
|
||||||
if element != nil {
|
if element != nil {
|
||||||
bucket.MoveToBack(element)
|
bucket.MoveToBack(element)
|
||||||
}
|
}
|
||||||
|
@ -195,55 +194,55 @@ func (rt *routingTable) UpdateIfExists(node Node) {
|
||||||
func (rt *routingTable) RemoveByID(id Bitmap) {
|
func (rt *routingTable) RemoveByID(id Bitmap) {
|
||||||
rt.lock.Lock()
|
rt.lock.Lock()
|
||||||
defer rt.lock.Unlock()
|
defer rt.lock.Unlock()
|
||||||
bucketNum := bucketFor(rt.node.id, id)
|
bucketNum := bucketFor(rt.id, id)
|
||||||
bucket := rt.buckets[bucketNum]
|
bucket := rt.buckets[bucketNum]
|
||||||
element := findInList(bucket, rt.node.id)
|
element := findInList(bucket, rt.id)
|
||||||
if element != nil {
|
if element != nil {
|
||||||
bucket.Remove(element)
|
bucket.Remove(element)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *routingTable) GetClosest(target Bitmap, limit int) []Node {
|
func (rt *routingTable) GetClosest(target Bitmap, limit int) []Contact {
|
||||||
rt.lock.RLock()
|
rt.lock.RLock()
|
||||||
defer rt.lock.RUnlock()
|
defer rt.lock.RUnlock()
|
||||||
|
|
||||||
var toSort []sortedNode
|
var toSort []sortedContact
|
||||||
var bucketNum int
|
var bucketNum int
|
||||||
|
|
||||||
if rt.node.id.Equals(target) {
|
if rt.id.Equals(target) {
|
||||||
bucketNum = 0
|
bucketNum = 0
|
||||||
} else {
|
} else {
|
||||||
bucketNum = bucketFor(rt.node.id, target)
|
bucketNum = bucketFor(rt.id, target)
|
||||||
}
|
}
|
||||||
|
|
||||||
bucket := rt.buckets[bucketNum]
|
bucket := rt.buckets[bucketNum]
|
||||||
toSort = appendNodes(toSort, bucket.Front(), target)
|
toSort = appendContacts(toSort, bucket.Front(), target)
|
||||||
|
|
||||||
for i := 1; (bucketNum-i >= 0 || bucketNum+i < numBuckets) && len(toSort) < limit; i++ {
|
for i := 1; (bucketNum-i >= 0 || bucketNum+i < numBuckets) && len(toSort) < limit; i++ {
|
||||||
if bucketNum-i >= 0 {
|
if bucketNum-i >= 0 {
|
||||||
bucket = rt.buckets[bucketNum-i]
|
bucket = rt.buckets[bucketNum-i]
|
||||||
toSort = appendNodes(toSort, bucket.Front(), target)
|
toSort = appendContacts(toSort, bucket.Front(), target)
|
||||||
}
|
}
|
||||||
if bucketNum+i < numBuckets {
|
if bucketNum+i < numBuckets {
|
||||||
bucket = rt.buckets[bucketNum+i]
|
bucket = rt.buckets[bucketNum+i]
|
||||||
toSort = appendNodes(toSort, bucket.Front(), target)
|
toSort = appendContacts(toSort, bucket.Front(), target)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Sort(byXorDistance(toSort))
|
sort.Sort(byXorDistance(toSort))
|
||||||
|
|
||||||
var nodes []Node
|
var contacts []Contact
|
||||||
for _, c := range toSort {
|
for _, sorted := range toSort {
|
||||||
nodes = append(nodes, c.node)
|
contacts = append(contacts, sorted.contact)
|
||||||
if len(nodes) >= limit {
|
if len(contacts) >= limit {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nodes
|
return contacts
|
||||||
}
|
}
|
||||||
|
|
||||||
// Count returns the number of nodes in the routing table
|
// Count returns the number of contacts in the routing table
|
||||||
func (rt *routingTable) Count() int {
|
func (rt *routingTable) Count() int {
|
||||||
rt.lock.RLock()
|
rt.lock.RLock()
|
||||||
defer rt.lock.RUnlock()
|
defer rt.lock.RUnlock()
|
||||||
|
@ -258,38 +257,24 @@ func (rt *routingTable) Count() int {
|
||||||
|
|
||||||
func findInList(bucket *list.List, value Bitmap) *list.Element {
|
func findInList(bucket *list.List, value Bitmap) *list.Element {
|
||||||
for curr := bucket.Front(); curr != nil; curr = curr.Next() {
|
for curr := bucket.Front(); curr != nil; curr = curr.Next() {
|
||||||
if curr.Value.(Node).id.Equals(value) {
|
if curr.Value.(Contact).id.Equals(value) {
|
||||||
return curr
|
return curr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func appendNodes(nodes []sortedNode, start *list.Element, target Bitmap) []sortedNode {
|
func appendContacts(contacts []sortedContact, start *list.Element, target Bitmap) []sortedContact {
|
||||||
for curr := start; curr != nil; curr = curr.Next() {
|
for curr := start; curr != nil; curr = curr.Next() {
|
||||||
node := curr.Value.(Node)
|
c := curr.Value.(Contact)
|
||||||
nodes = append(nodes, sortedNode{node, node.id.Xor(target)})
|
contacts = append(contacts, sortedContact{c, c.id.Xor(target)})
|
||||||
}
|
}
|
||||||
return nodes
|
return contacts
|
||||||
}
|
}
|
||||||
|
|
||||||
func bucketFor(id Bitmap, target Bitmap) int {
|
func bucketFor(id Bitmap, target Bitmap) int {
|
||||||
if id.Equals(target) {
|
if id.Equals(target) {
|
||||||
panic("nodes do not have a bucket for themselves")
|
panic("routing table does not have a bucket for its own id")
|
||||||
}
|
}
|
||||||
return numBuckets - 1 - target.Xor(id).PrefixLen()
|
return numBuckets - 1 - target.Xor(id).PrefixLen()
|
||||||
}
|
}
|
||||||
|
|
||||||
func sortNodesInPlace(nodes []Node, target Bitmap) {
|
|
||||||
toSort := make([]sortedNode, len(nodes))
|
|
||||||
|
|
||||||
for i, n := range nodes {
|
|
||||||
toSort[i] = sortedNode{n, n.id.Xor(target)}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(byXorDistance(toSort))
|
|
||||||
|
|
||||||
for i, c := range toSort {
|
|
||||||
nodes[i] = c.node
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -36,9 +36,9 @@ func TestRoutingTable(t *testing.T) {
|
||||||
n1 := BitmapFromHexP("FFFFFFFF0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
n1 := BitmapFromHexP("FFFFFFFF0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||||
n2 := BitmapFromHexP("FFFFFFF00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
n2 := BitmapFromHexP("FFFFFFF00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||||
n3 := BitmapFromHexP("111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
n3 := BitmapFromHexP("111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||||
rt := newRoutingTable(&Node{n1, net.ParseIP("127.0.0.1"), 8000, ""})
|
rt := newRoutingTable(n1)
|
||||||
rt.Update(Node{n2, net.ParseIP("127.0.0.1"), 8001, ""})
|
rt.Update(Contact{n2, net.ParseIP("127.0.0.1"), 8001})
|
||||||
rt.Update(Node{n3, net.ParseIP("127.0.0.1"), 8002, ""})
|
rt.Update(Contact{n3, net.ParseIP("127.0.0.1"), 8002})
|
||||||
|
|
||||||
contacts := rt.GetClosest(BitmapFromHexP("222222220000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), 1)
|
contacts := rt.GetClosest(BitmapFromHexP("222222220000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), 1)
|
||||||
if len(contacts) != 1 {
|
if len(contacts) != 1 {
|
||||||
|
@ -63,14 +63,14 @@ func TestRoutingTable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCompactEncoding(t *testing.T) {
|
func TestCompactEncoding(t *testing.T) {
|
||||||
n := Node{
|
c := Contact{
|
||||||
id: BitmapFromHexP("1c8aff71b99462464d9eeac639595ab99664be3482cb91a29d87467515c7d9158fe72aa1f1582dab07d8f8b5db277f41"),
|
id: BitmapFromHexP("1c8aff71b99462464d9eeac639595ab99664be3482cb91a29d87467515c7d9158fe72aa1f1582dab07d8f8b5db277f41"),
|
||||||
ip: net.ParseIP("1.2.3.4"),
|
ip: net.ParseIP("1.2.3.4"),
|
||||||
port: int(55<<8 + 66),
|
port: int(55<<8 + 66),
|
||||||
}
|
}
|
||||||
|
|
||||||
var compact []byte
|
var compact []byte
|
||||||
compact, err := n.MarshalCompact()
|
compact, err := c.MarshalCompact()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -79,7 +79,7 @@ func TestCompactEncoding(t *testing.T) {
|
||||||
t.Fatalf("got length of %d; expected %d", len(compact), compactNodeInfoLength)
|
t.Fatalf("got length of %d; expected %d", len(compact), compactNodeInfoLength)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(compact, append([]byte{1, 2, 3, 4, 55, 66}, n.id[:]...)) {
|
if !reflect.DeepEqual(compact, append([]byte{1, 2, 3, 4, 55, 66}, c.id[:]...)) {
|
||||||
t.Errorf("compact bytes not encoded correctly")
|
t.Errorf("compact bytes not encoded correctly")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
178
dht/rpc.go
178
dht/rpc.go
|
@ -1,178 +0,0 @@
|
||||||
package dht
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/hex"
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/lbryio/errors.go"
|
|
||||||
"github.com/lbryio/lbry.go/util"
|
|
||||||
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"github.com/lyoshenka/bencode"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// handlePacket handles packets received from udp.
|
|
||||||
func handlePacket(dht *DHT, pkt packet) {
|
|
||||||
//log.Debugf("[%s] Received message from %s (%d bytes) %s", dht.node.id.HexShort(), pkt.raddr.String(), len(pkt.data), hex.EncodeToString(pkt.data))
|
|
||||||
|
|
||||||
if !util.InSlice(string(pkt.data[0:5]), []string{"d1:0i", "di0ei"}) {
|
|
||||||
log.Errorf("[%s] data is not a well-formatted dict: (%d bytes) %s", dht.node.id.HexShort(), len(pkt.data), hex.EncodeToString(pkt.data))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: test this stuff more thoroughly
|
|
||||||
|
|
||||||
// the following is a bit of a hack, but it lets us avoid decoding every message twice
|
|
||||||
// it depends on the data being a dict with 0 as the first key (so it starts with "d1:0i") and the message type as the first value
|
|
||||||
|
|
||||||
switch pkt.data[5] {
|
|
||||||
case '0' + requestType:
|
|
||||||
request := Request{}
|
|
||||||
err := bencode.DecodeBytes(pkt.data, &request)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("[%s] error decoding request from %s: %s: (%d bytes) %s", dht.node.id.HexShort(), pkt.raddr.String(), err.Error(), len(pkt.data), hex.EncodeToString(pkt.data))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Debugf("[%s] query %s: received request from %s: %s(%s)", dht.node.id.HexShort(), request.ID.HexShort(), request.NodeID.HexShort(), request.Method, request.ArgsDebug())
|
|
||||||
handleRequest(dht, pkt.raddr, request)
|
|
||||||
|
|
||||||
case '0' + responseType:
|
|
||||||
response := Response{}
|
|
||||||
err := bencode.DecodeBytes(pkt.data, &response)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("[%s] error decoding response from %s: %s: (%d bytes) %s", dht.node.id.HexShort(), pkt.raddr.String(), err.Error(), len(pkt.data), hex.EncodeToString(pkt.data))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Debugf("[%s] query %s: received response from %s: %s", dht.node.id.HexShort(), response.ID.HexShort(), response.NodeID.HexShort(), response.ArgsDebug())
|
|
||||||
handleResponse(dht, pkt.raddr, response)
|
|
||||||
|
|
||||||
case '0' + errorType:
|
|
||||||
e := Error{}
|
|
||||||
err := bencode.DecodeBytes(pkt.data, &e)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("[%s] error decoding error from %s: %s: (%d bytes) %s", dht.node.id.HexShort(), pkt.raddr.String(), err.Error(), len(pkt.data), hex.EncodeToString(pkt.data))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Debugf("[%s] query %s: received error from %s: %s", dht.node.id.HexShort(), e.ID.HexShort(), e.NodeID.HexShort(), e.ExceptionType)
|
|
||||||
handleError(dht, pkt.raddr, e)
|
|
||||||
|
|
||||||
default:
|
|
||||||
log.Errorf("[%s] invalid message type: %s", dht.node.id.HexShort(), pkt.data[5])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleRequest handles the requests received from udp.
|
|
||||||
func handleRequest(dht *DHT, addr *net.UDPAddr, request Request) {
|
|
||||||
if request.NodeID.Equals(dht.node.id) {
|
|
||||||
log.Warn("ignoring self-request")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch request.Method {
|
|
||||||
default:
|
|
||||||
// send(dht, addr, makeError(t, protocolError, "invalid q"))
|
|
||||||
log.Errorln("invalid request method")
|
|
||||||
return
|
|
||||||
case pingMethod:
|
|
||||||
send(dht, addr, Response{ID: request.ID, NodeID: dht.node.id, Data: pingSuccessResponse})
|
|
||||||
case storeMethod:
|
|
||||||
// TODO: we should be sending the IP in the request, not just using the sender's IP
|
|
||||||
// TODO: should we be using StoreArgs.NodeID or StoreArgs.Value.LbryID ???
|
|
||||||
if dht.tokens.Verify(request.StoreArgs.Value.Token, request.NodeID, addr) {
|
|
||||||
dht.store.Upsert(request.StoreArgs.BlobHash, Node{id: request.StoreArgs.NodeID, ip: addr.IP, port: request.StoreArgs.Value.Port})
|
|
||||||
send(dht, addr, Response{ID: request.ID, NodeID: dht.node.id, Data: storeSuccessResponse})
|
|
||||||
} else {
|
|
||||||
send(dht, addr, Error{ID: request.ID, NodeID: dht.node.id, ExceptionType: "invalid-token"})
|
|
||||||
}
|
|
||||||
case findNodeMethod:
|
|
||||||
if request.Arg == nil {
|
|
||||||
log.Errorln("request is missing arg")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
send(dht, addr, getFindResponse(dht, request))
|
|
||||||
|
|
||||||
case findValueMethod:
|
|
||||||
if request.Arg == nil {
|
|
||||||
log.Errorln("request is missing arg")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if nodes := dht.store.Get(*request.Arg); len(nodes) > 0 {
|
|
||||||
send(dht, addr, Response{
|
|
||||||
ID: request.ID,
|
|
||||||
NodeID: dht.node.id,
|
|
||||||
FindValueKey: request.Arg.RawString(),
|
|
||||||
FindNodeData: nodes,
|
|
||||||
Token: dht.tokens.Get(request.NodeID, addr),
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
res := getFindResponse(dht, request)
|
|
||||||
res.Token = dht.tokens.Get(request.NodeID, addr)
|
|
||||||
send(dht, addr, res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// nodes that send us requests should not be inserted, only refreshed.
|
|
||||||
// the routing table must only contain "good" nodes, which are nodes that reply to our requests
|
|
||||||
// if a node is already good (aka in the table), its fine to refresh it
|
|
||||||
// http://www.bittorrent.org/beps/bep_0005.html#routing-table
|
|
||||||
node := Node{id: request.NodeID, ip: addr.IP, port: addr.Port}
|
|
||||||
dht.rt.UpdateIfExists(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFindResponse(dht *DHT, request Request) Response {
|
|
||||||
closestNodes := dht.rt.GetClosest(*request.Arg, bucketSize)
|
|
||||||
response := Response{
|
|
||||||
ID: request.ID,
|
|
||||||
NodeID: dht.node.id,
|
|
||||||
FindNodeData: make([]Node, len(closestNodes)),
|
|
||||||
}
|
|
||||||
for i, n := range closestNodes {
|
|
||||||
response.FindNodeData[i] = n
|
|
||||||
}
|
|
||||||
return response
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleResponse handles responses received from udp.
|
|
||||||
func handleResponse(dht *DHT, addr *net.UDPAddr, response Response) {
|
|
||||||
tx := dht.tm.Find(response.ID, addr)
|
|
||||||
if tx != nil {
|
|
||||||
tx.res <- response
|
|
||||||
}
|
|
||||||
|
|
||||||
node := Node{id: response.NodeID, ip: addr.IP, port: addr.Port}
|
|
||||||
dht.rt.Update(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleError handles errors received from udp.
|
|
||||||
func handleError(dht *DHT, addr *net.UDPAddr, e Error) {
|
|
||||||
spew.Dump(e)
|
|
||||||
node := Node{id: e.NodeID, ip: addr.IP, port: addr.Port}
|
|
||||||
dht.rt.UpdateIfExists(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
// send sends data to a udp address
|
|
||||||
func send(dht *DHT, addr *net.UDPAddr, data Message) error {
|
|
||||||
encoded, err := bencode.EncodeBytes(data)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Err(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if req, ok := data.(Request); ok {
|
|
||||||
log.Debugf("[%s] query %s: sending request to %s (%d bytes) %s(%s)",
|
|
||||||
dht.node.id.HexShort(), req.ID.HexShort(), addr.String(), len(encoded), req.Method, req.ArgsDebug())
|
|
||||||
} else if res, ok := data.(Response); ok {
|
|
||||||
log.Debugf("[%s] query %s: sending response to %s (%d bytes) %s",
|
|
||||||
dht.node.id.HexShort(), res.ID.HexShort(), addr.String(), len(encoded), res.ArgsDebug())
|
|
||||||
} else {
|
|
||||||
log.Debugf("[%s] (%d bytes) %s", dht.node.id.HexShort(), len(encoded), spew.Sdump(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
dht.conn.SetWriteDeadline(time.Now().Add(time.Second * 15))
|
|
||||||
|
|
||||||
_, err = dht.conn.WriteToUDP(encoded, addr)
|
|
||||||
return errors.Err(err)
|
|
||||||
}
|
|
34
dht/store.go
34
dht/store.go
|
@ -3,7 +3,7 @@ package dht
|
||||||
import "sync"
|
import "sync"
|
||||||
|
|
||||||
type peer struct {
|
type peer struct {
|
||||||
node Node
|
contact Contact
|
||||||
//<lastPublished>,
|
//<lastPublished>,
|
||||||
//<originallyPublished>
|
//<originallyPublished>
|
||||||
// <originalPublisherID>
|
// <originalPublisherID>
|
||||||
|
@ -12,42 +12,48 @@ type peer struct {
|
||||||
type peerStore struct {
|
type peerStore struct {
|
||||||
// map of blob hashes to (map of node IDs to bools)
|
// map of blob hashes to (map of node IDs to bools)
|
||||||
hashes map[Bitmap]map[Bitmap]bool
|
hashes map[Bitmap]map[Bitmap]bool
|
||||||
// map of node IDs to peers
|
// stores the peers themselves, so they can be updated in one place
|
||||||
nodeInfo map[Bitmap]peer
|
peers map[Bitmap]peer
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPeerStore() *peerStore {
|
func newPeerStore() *peerStore {
|
||||||
return &peerStore{
|
return &peerStore{
|
||||||
hashes: make(map[Bitmap]map[Bitmap]bool),
|
hashes: make(map[Bitmap]map[Bitmap]bool),
|
||||||
nodeInfo: make(map[Bitmap]peer),
|
peers: make(map[Bitmap]peer),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *peerStore) Upsert(blobHash Bitmap, node Node) {
|
func (s *peerStore) Upsert(blobHash Bitmap, contact Contact) {
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
defer s.lock.Unlock()
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
if _, ok := s.hashes[blobHash]; !ok {
|
if _, ok := s.hashes[blobHash]; !ok {
|
||||||
s.hashes[blobHash] = make(map[Bitmap]bool)
|
s.hashes[blobHash] = make(map[Bitmap]bool)
|
||||||
}
|
}
|
||||||
s.hashes[blobHash][node.id] = true
|
s.hashes[blobHash][contact.id] = true
|
||||||
s.nodeInfo[node.id] = peer{node: node}
|
s.peers[contact.id] = peer{contact: contact}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *peerStore) Get(blobHash Bitmap) []Node {
|
func (s *peerStore) Get(blobHash Bitmap) []Contact {
|
||||||
s.lock.RLock()
|
s.lock.RLock()
|
||||||
defer s.lock.RUnlock()
|
defer s.lock.RUnlock()
|
||||||
var nodes []Node
|
|
||||||
|
var contacts []Contact
|
||||||
if ids, ok := s.hashes[blobHash]; ok {
|
if ids, ok := s.hashes[blobHash]; ok {
|
||||||
for id := range ids {
|
for id := range ids {
|
||||||
peer, ok := s.nodeInfo[id]
|
peer, ok := s.peers[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
panic("node id in IDs list, but not in nodeInfo")
|
panic("node id in IDs list, but not in nodeInfo")
|
||||||
}
|
}
|
||||||
nodes = append(nodes, peer.node)
|
contacts = append(contacts, peer.contact)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nodes
|
return contacts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *peerStore) RemoveTODO(contact Contact) {
|
||||||
|
// TODO: remove peer from everywhere
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *peerStore) CountStoredHashes() int {
|
func (s *peerStore) CountStoredHashes() int {
|
||||||
|
|
|
@ -1,125 +0,0 @@
|
||||||
package dht
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// transaction represents a single query to the dht. it stores the queried node, the request, and the response channel
|
|
||||||
type transaction struct {
|
|
||||||
node Node
|
|
||||||
req Request
|
|
||||||
res chan Response
|
|
||||||
}
|
|
||||||
|
|
||||||
// transactionManager keeps track of the outstanding transactions
|
|
||||||
type transactionManager struct {
|
|
||||||
dht *DHT
|
|
||||||
lock *sync.RWMutex
|
|
||||||
transactions map[messageID]*transaction
|
|
||||||
}
|
|
||||||
|
|
||||||
// newTransactionManager returns a new transactionManager
|
|
||||||
func newTransactionManager(dht *DHT) *transactionManager {
|
|
||||||
return &transactionManager{
|
|
||||||
lock: &sync.RWMutex{},
|
|
||||||
transactions: make(map[messageID]*transaction),
|
|
||||||
dht: dht,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// insert adds a transaction to the manager.
|
|
||||||
func (tm *transactionManager) insert(tx *transaction) {
|
|
||||||
tm.lock.Lock()
|
|
||||||
defer tm.lock.Unlock()
|
|
||||||
tm.transactions[tx.req.ID] = tx
|
|
||||||
}
|
|
||||||
|
|
||||||
// delete removes a transaction from the manager.
|
|
||||||
func (tm *transactionManager) delete(id messageID) {
|
|
||||||
tm.lock.Lock()
|
|
||||||
defer tm.lock.Unlock()
|
|
||||||
delete(tm.transactions, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find finds a transaction for the given id. it optionally ensures that addr matches node from transaction
|
|
||||||
func (tm *transactionManager) Find(id messageID, addr *net.UDPAddr) *transaction {
|
|
||||||
tm.lock.RLock()
|
|
||||||
defer tm.lock.RUnlock()
|
|
||||||
|
|
||||||
// TODO: also check that the response's nodeid matches the id you thought you sent to?
|
|
||||||
|
|
||||||
t, ok := tm.transactions[id]
|
|
||||||
if !ok || (addr != nil && t.node.Addr().String() != addr.String()) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendAsync sends a transaction and returns a channel that will eventually contain the transaction response
|
|
||||||
// The response channel is closed when the transaction is completed or times out.
|
|
||||||
func (tm *transactionManager) SendAsync(ctx context.Context, node Node, req Request) <-chan *Response {
|
|
||||||
if node.id.Equals(tm.dht.node.id) {
|
|
||||||
log.Error("sending query to self")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ch := make(chan *Response, 1)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer close(ch)
|
|
||||||
|
|
||||||
req.ID = newMessageID()
|
|
||||||
req.NodeID = tm.dht.node.id
|
|
||||||
tx := &transaction{
|
|
||||||
node: node,
|
|
||||||
req: req,
|
|
||||||
res: make(chan Response),
|
|
||||||
}
|
|
||||||
|
|
||||||
tm.insert(tx)
|
|
||||||
defer tm.delete(tx.req.ID)
|
|
||||||
|
|
||||||
for i := 0; i < udpRetry; i++ {
|
|
||||||
if err := send(tm.dht, node.Addr(), tx.req); err != nil {
|
|
||||||
if !strings.Contains(err.Error(), "use of closed network connection") { // this only happens on localhost. real UDP has no connections
|
|
||||||
log.Error("send error: ", err)
|
|
||||||
}
|
|
||||||
continue // try again? return?
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case res := <-tx.res:
|
|
||||||
ch <- &res
|
|
||||||
return
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-time.After(udpTimeout):
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if request timed out each time
|
|
||||||
tm.dht.rt.RemoveByID(tx.node.id)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send sends a transaction and blocks until the response is available. It returns a response, or nil
|
|
||||||
// if the transaction timed out.
|
|
||||||
func (tm *transactionManager) Send(node Node, req Request) *Response {
|
|
||||||
return <-tm.SendAsync(context.Background(), node, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the number of transactions in the manager
|
|
||||||
func (tm *transactionManager) Count() int {
|
|
||||||
tm.lock.Lock()
|
|
||||||
defer tm.lock.Unlock()
|
|
||||||
return len(tm.transactions)
|
|
||||||
}
|
|
Loading…
Reference in a new issue