Rework federation to be synchronous, and add tests

This commit is contained in:
Jeffrey Picard 2021-10-30 01:27:25 -04:00
parent a67f9b43d1
commit 914d2bfc61
7 changed files with 675 additions and 274 deletions

1
go.mod
View file

@ -8,6 +8,7 @@ require (
github.com/lbryio/lbry.go/v2 v2.7.2-0.20210625145058-2b155597bf57 github.com/lbryio/lbry.go/v2 v2.7.2-0.20210625145058-2b155597bf57
github.com/olivere/elastic/v7 v7.0.24 github.com/olivere/elastic/v7 v7.0.24
github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_golang v1.11.0
github.com/prometheus/client_model v0.2.0
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 // indirect golang.org/x/net v0.0.0-20210525063256-abc453219eb5 // indirect
golang.org/x/text v0.3.6 golang.org/x/text v0.3.6
google.golang.org/genproto v0.0.0-20210524171403-669157292da3 // indirect google.golang.org/genproto v0.0.0-20210524171403-669157292da3 // indirect

View file

@ -22,5 +22,13 @@ var (
Help: "Histogram of query times", Help: "Histogram of query times",
Buckets: HistogramBuckets, Buckets: HistogramBuckets,
}, []string{"method"}) }, []string{"method"})
PeersKnown = promauto.NewGauge(prometheus.GaugeOpts{
Name: "peers_known",
Help: "Number of peers we know about.",
})
PeersSubscribed = promauto.NewGauge(prometheus.GaugeOpts{
Name: "peers_subbed",
Help: "Number of peers that are subscribed to us.",
})
) )

29
main.go
View file

@ -4,14 +4,12 @@ import (
"context" "context"
"fmt" "fmt"
"log" "log"
"net"
"time" "time"
pb "github.com/lbryio/hub/protobuf/go" pb "github.com/lbryio/hub/protobuf/go"
"github.com/lbryio/hub/server" "github.com/lbryio/hub/server"
"github.com/lbryio/lbry.go/v2/extras/util" "github.com/lbryio/lbry.go/v2/extras/util"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/reflection"
) )
func main() { func main() {
@ -26,20 +24,21 @@ func main() {
ctxWCancel, cancel := context.WithCancel(ctx) ctxWCancel, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
l, err := net.Listen("tcp", ":"+args.Port)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
s := server.MakeHubServer(ctxWCancel, args) s := server.MakeHubServer(ctxWCancel, args)
pb.RegisterHubServer(s.GrpcServer, s) s.Run()
reflection.Register(s.GrpcServer) //l, err := net.Listen("tcp", ":"+args.Port)
//if err != nil {
log.Printf("listening on %s\n", l.Addr().String()) // log.Fatalf("failed to listen: %v", err)
log.Println(s.Args) //}
if err := s.GrpcServer.Serve(l); err != nil { //
log.Fatalf("failed to serve: %v", err) //pb.RegisterHubServer(s.GrpcServer, s)
} //reflection.Register(s.GrpcServer)
//
//log.Printf("listening on %s\n", l.Addr().String())
//log.Println(s.Args)
//if err := s.GrpcServer.Serve(l); err != nil {
// log.Fatalf("failed to serve: %v", err)
//}
return return
} }

View file

@ -22,15 +22,15 @@ type Args struct {
UDPPort string UDPPort string
EsHost string EsHost string
EsPort string EsPort string
DisableEs bool
PrometheusPort string PrometheusPort string
EsIndex string EsIndex string
Debug bool
RefreshDelta int RefreshDelta int
CacheTTL int CacheTTL int
PeerFile string PeerFile string
Country string Country string
StartPeerAdder bool DisableEs bool
Debug bool
LoadPeers bool
StartPrometheus bool StartPrometheus bool
StartUDP bool StartUDP bool
WritePeers bool WritePeers bool
@ -48,7 +48,7 @@ const (
DefaultCacheTTL = 5 DefaultCacheTTL = 5
DefaultPeerFile = "peers.txt" DefaultPeerFile = "peers.txt"
DefaultCountry = "US" DefaultCountry = "US"
DefaultStartPeerAdder = true DefaultLoadPeers = true
DefaultStartPrometheus = true DefaultStartPrometheus = true
DefaultStartUDP = true DefaultStartUDP = true
DefaultWritePeers = true DefaultWritePeers = true
@ -83,8 +83,6 @@ func ParseArgs(searchRequest *pb.SearchRequest) *Args {
serveCmd := parser.NewCommand("serve", "start the hub server") serveCmd := parser.NewCommand("serve", "start the hub server")
searchCmd := parser.NewCommand("search", "claim search") searchCmd := parser.NewCommand("search", "claim search")
debug := parser.Flag("", "debug", &argparse.Options{Required: false, Help: "enable debug logging", Default: false})
disableEs := parser.Flag("", "disable-es", &argparse.Options{Required: false, Help: "Disable elastic search, for running/testing independently", Default: false})
host := parser.String("", "rpchost", &argparse.Options{Required: false, Help: "RPC host", Default: DefaultHost}) host := parser.String("", "rpchost", &argparse.Options{Required: false, Help: "RPC host", Default: DefaultHost})
port := parser.String("", "rpcport", &argparse.Options{Required: false, Help: "RPC port", Default: DefaultPort}) port := parser.String("", "rpcport", &argparse.Options{Required: false, Help: "RPC port", Default: DefaultPort})
@ -97,7 +95,10 @@ func ParseArgs(searchRequest *pb.SearchRequest) *Args {
cacheTTL := parser.Int("", "cachettl", &argparse.Options{Required: false, Help: "Cache TTL in minutes", Default: DefaultCacheTTL}) cacheTTL := parser.Int("", "cachettl", &argparse.Options{Required: false, Help: "Cache TTL in minutes", Default: DefaultCacheTTL})
peerFile := parser.String("", "peerfile", &argparse.Options{Required: false, Help: "Initial peer file for federation", Default: DefaultPeerFile}) peerFile := parser.String("", "peerfile", &argparse.Options{Required: false, Help: "Initial peer file for federation", Default: DefaultPeerFile})
country := parser.String("", "country", &argparse.Options{Required: false, Help: "Country this node is running in. Default US.", Default: DefaultCountry}) country := parser.String("", "country", &argparse.Options{Required: false, Help: "Country this node is running in. Default US.", Default: DefaultCountry})
startPeerAdder := parser.Flag("", "start-peer-adder", &argparse.Options{Required: false, Help: "Start peer adder service", Default: DefaultStartPeerAdder})
debug := parser.Flag("", "debug", &argparse.Options{Required: false, Help: "enable debug logging", Default: false})
disableEs := parser.Flag("", "disable-es", &argparse.Options{Required: false, Help: "Disable elastic search, for running/testing independently", Default: false})
loadPeers := parser.Flag("", "load-peers", &argparse.Options{Required: false, Help: "load peers from disk at startup", Default: DefaultLoadPeers})
startPrometheus := parser.Flag("", "start-prometheus", &argparse.Options{Required: false, Help: "Start prometheus server", Default: DefaultStartPrometheus}) startPrometheus := parser.Flag("", "start-prometheus", &argparse.Options{Required: false, Help: "Start prometheus server", Default: DefaultStartPrometheus})
startUdp := parser.Flag("", "start-udp", &argparse.Options{Required: false, Help: "Start UDP ping server", Default: DefaultStartUDP}) startUdp := parser.Flag("", "start-udp", &argparse.Options{Required: false, Help: "Start UDP ping server", Default: DefaultStartUDP})
writePeers := parser.Flag("", "write-peers", &argparse.Options{Required: false, Help: "Write peer to disk as we learn about them", Default: DefaultWritePeers}) writePeers := parser.Flag("", "write-peers", &argparse.Options{Required: false, Help: "Write peer to disk as we learn about them", Default: DefaultWritePeers})
@ -125,15 +126,15 @@ func ParseArgs(searchRequest *pb.SearchRequest) *Args {
EsHost: *esHost, EsHost: *esHost,
EsPort: *esPort, EsPort: *esPort,
UDPPort: *udpPort, UDPPort: *udpPort,
DisableEs: *disableEs,
PrometheusPort: *prometheusPort, PrometheusPort: *prometheusPort,
EsIndex: *esIndex, EsIndex: *esIndex,
Debug: *debug,
RefreshDelta: *refreshDelta, RefreshDelta: *refreshDelta,
CacheTTL: *cacheTTL, CacheTTL: *cacheTTL,
PeerFile: *peerFile, PeerFile: *peerFile,
Country: *country, Country: *country,
StartPeerAdder: *startPeerAdder, DisableEs: *disableEs,
Debug: *debug,
LoadPeers: *loadPeers,
StartPrometheus: *startPrometheus, StartPrometheus: *startPrometheus,
StartUDP: *startUdp, StartUDP: *startUdp,
WritePeers: *writePeers, WritePeers: *writePeers,

View file

@ -6,19 +6,14 @@ import (
"log" "log"
"os" "os"
"strings" "strings"
"sync/atomic"
"time" "time"
"github.com/lbryio/hub/internal/metrics"
pb "github.com/lbryio/hub/protobuf/go" pb "github.com/lbryio/hub/protobuf/go"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
// peerAddMsg is an internal structure for use in the channel communicating
// to the peerAdder gorountine.
type peerAddMsg struct {
msg *pb.ServerMessage
ping bool
}
// FederatedServer hold relevant information about peers that we known about. // FederatedServer hold relevant information about peers that we known about.
type FederatedServer struct { type FederatedServer struct {
Address string Address string
@ -26,29 +21,56 @@ type FederatedServer struct {
Ts time.Time Ts time.Time
} }
var (
localHosts = map[string]bool{
"127.0.0.1": true,
"0.0.0.0": true,
"localhost": true,
}
)
// peerKey takes a ServerMessage object and returns the key that for that peer // peerKey takes a ServerMessage object and returns the key that for that peer
// in our peer table. // in our peer table.
func peerKey(msg *pb.ServerMessage) string { func peerKey(msg *pb.ServerMessage) string {
return msg.Address + ":" + msg.Port return msg.Address + ":" + msg.Port
} }
func (s *Server) incNumPeers() {
atomic.AddInt64(s.NumPeerServers, 1)
}
func (s *Server) decNumPeers() {
atomic.AddInt64(s.NumPeerServers, -1)
}
func (s *Server) getNumPeers() int64 {
return *s.NumPeerServers
}
func (s *Server) incNumSubs() {
atomic.AddInt64(s.NumPeerSubs, 1)
}
func (s *Server) decNumSubs() {
atomic.AddInt64(s.NumPeerSubs, -1)
}
func (s *Server) getNumSubs() int64 {
return *s.NumPeerSubs
}
// loadPeers takes the arguments given to the hub at startup and loads the // loadPeers takes the arguments given to the hub at startup and loads the
// previously known peers from disk and verifies their existence before // previously known peers from disk and verifies their existence before
// storing them as known peers. Returns a map of peerKey -> object // storing them as known peers. Returns a map of peerKey -> object
func loadPeers(args *Args) map[string]*FederatedServer { func (s *Server) loadPeers() error {
localHosts := map[string]bool { peerFile := s.Args.PeerFile
"127.0.0.1": true, port := s.Args.Port
"0.0.0.0": true,
"localhost": true,
}
servers := make(map[string]*FederatedServer)
peerFile := args.PeerFile
port := args.Port
f, err := os.Open(peerFile) f, err := os.Open(peerFile)
if err != nil { if err != nil {
log.Println(err) log.Println(err)
return map[string]*FederatedServer{} return err
} }
scanner := bufio.NewScanner(f) scanner := bufio.NewScanner(f)
scanner.Split(bufio.ScanLines) scanner.Split(bufio.ScanLines)
@ -68,25 +90,164 @@ func loadPeers(args *Args) map[string]*FederatedServer {
continue continue
} }
// If the peer is us, skip // If the peer is us, skip
log.Println(args)
log.Println(ipPort) log.Println(ipPort)
if ipPort[1] == port && localHosts[ipPort[0]] { if ipPort[1] == port && localHosts[ipPort[0]] {
log.Println("Self peer, skipping ...") log.Println("Self peer, skipping ...")
continue continue
} }
server := &FederatedServer{ srvMsg := &pb.ServerMessage{
Address: ipPort[0], Address: ipPort[0],
Port: ipPort[1], Port: ipPort[1],
Ts: time.Now(),
} }
log.Println("pinging peer", server) log.Printf("pinging peer %+v\n", srvMsg)
if helloPeer(server, args) { err := s.addPeer(srvMsg, true)
servers[line] = server if err != nil {
log.Println(err)
} }
} }
log.Println("Returning from loadPeers") log.Println("Returning from loadPeers")
return servers return nil
}
// getFastestPeer determines the fastest peer in its list of peers by sending
// out udp pings and seeing who responds first. This is currently not
// implemented and just returns the first peer.
func (s *Server) getFastestPeer() *FederatedServer {
var fastestPeer *FederatedServer
s.PeerServers.Range(func(_, v interface{}) bool {
fastestPeer = v.(*FederatedServer)
return false
})
return fastestPeer
}
// subscribeToFastestPeer is a convenience function to find and subscribe to
// the fastest peer we know about.
func (s *Server) subscribeToFastestPeer() {
peer := s.getFastestPeer()
if peer != nil {
err := s.subscribeToPeer(peer)
if err != nil {
log.Println(err)
}
} else {
log.Println("No peers found, not subscribed to any.")
}
}
// subscribeToPeer subscribes us to a peer to we'll get updates about their
// known peers.
func (s *Server) subscribeToPeer(peer *FederatedServer) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
conn, err := grpc.DialContext(ctx,
peer.Address+":"+peer.Port,
grpc.WithInsecure(),
grpc.WithBlock(),
)
if err != nil {
return err
}
defer conn.Close()
msg := &pb.ServerMessage{
Address: s.Args.Host,
Port: s.Args.Port,
}
c := pb.NewHubClient(conn)
log.Printf("%s:%s subscribing to %+v\n", s.Args.Host, s.Args.Port, peer)
_, err = c.PeerSubscribe(ctx, msg)
if err != nil {
return err
}
s.Subscribed = true
return nil
}
// helloPeer takes a peer to say hello to and sends a hello message
// containing all the peers we know about and information about us.
// This is used to confirm existence of peers on start and let them
// know about us. Returns the response from the server on success,
// nil otherwise.
func (s *Server) helloPeer(server *FederatedServer) (*pb.HelloMessage, error) {
log.Println("In helloPeer")
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
conn, err := grpc.DialContext(ctx,
server.Address+":"+server.Port,
grpc.WithInsecure(),
grpc.WithBlock(),
)
if err != nil {
log.Println(err)
return nil, err
}
defer conn.Close()
c := pb.NewHubClient(conn)
msg := &pb.HelloMessage{
Port: s.Args.Port,
Host: s.Args.Host,
Servers: []*pb.ServerMessage{},
}
log.Printf("%s:%s saying hello to %+v\n", s.Args.Host, s.Args.Port, server)
res, err := c.Hello(ctx, msg)
if err != nil {
log.Println(err)
return nil, err
}
log.Println(res)
return res, nil
}
// writePeers writes our current known peers to disk.
func (s *Server) writePeers() {
if !s.Args.WritePeers {
return
}
f, err := os.Create(s.Args.PeerFile)
if err != nil {
log.Println(err)
return
}
writer := bufio.NewWriter(f)
s.PeerServers.Range(func(k, _ interface{}) bool {
key, ok := k.(string)
if !ok {
log.Println("Failed to cast key when writing peers: ", k)
return true
}
line := key + "\n"
_, err := writer.WriteString(line)
if err != nil {
log.Println(err)
}
return true
})
err = writer.Flush()
if err != nil {
log.Println(err)
}
err = f.Close()
if err != nil {
log.Println(err)
}
} }
// notifyPeer takes a peer to notify and a new peer we just learned about // notifyPeer takes a peer to notify and a new peer we just learned about
@ -120,115 +281,8 @@ func notifyPeer(peerToNotify *FederatedServer, newPeer *FederatedServer) error {
return nil return nil
} }
// helloPeer takes a peer to say hello to and sends a hello message // notifyPeerSubs takes a new peer server we just learned about and notifies
// containing all the peers we know about and information about us. // all the peers that have subscribed to us about it.
// This is used to confirm existence of peers on start and let them
// know about us. Returns true is call was successful, false otherwise.
func helloPeer(server *FederatedServer, args *Args) bool {
log.Println("In helloPeer")
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
conn, err := grpc.DialContext(ctx,
server.Address+":"+server.Port,
grpc.WithInsecure(),
grpc.WithBlock(),
)
if err != nil {
log.Println(err)
return false
}
defer conn.Close()
c := pb.NewHubClient(conn)
msg := &pb.HelloMessage{
Port: args.Port,
Host: args.Host,
Servers: []*pb.ServerMessage{},
}
res, err := c.Hello(ctx, msg)
if err != nil {
log.Println(err)
return false
}
log.Println(res)
return true
}
// writePeers writes our current known peers to disk.
func (s *Server) writePeers() {
if !s.Args.WritePeers {
return
}
failedCreat := "WARNING: Peer writer failed to create peer file, it's still running but may not be working!"
failedWrite := "WARNING: Peer writer failed to write a line, it's still running but may not be working!"
failedFlush := "WARNING: Peer writer failed to flush, it's still running but may not be working!"
failedClose := "WARNING: Peer writer failed to close the peer file, it's still running but may not be working!"
f, err := os.Create(s.Args.PeerFile)
if err != nil {
log.Println(failedCreat)
log.Println(err)
}
writer := bufio.NewWriter(f)
for _, peer := range s.Servers {
line := peer.Address + ":" + peer.Port + "\n"
_, err := writer.WriteString(line)
if err != nil {
log.Println(failedWrite)
log.Println(err)
}
}
err = writer.Flush()
if err != nil {
log.Println(failedFlush)
log.Println(err)
}
err = f.Close()
if err != nil {
log.Println(failedClose)
log.Println(err)
}
}
// peerAdder is a goroutine which listens for new peers added and then
// optionally checks if they're online and adds them to our map of
// peers in a thread safe manner.
func (s *Server) peerAdder(ctx context.Context) {
for {
select {
case chanMsg := <-s.peerChannel:
msg := chanMsg.msg
ping := chanMsg.ping
k := msg.Address + ":" + msg.Port
if _, ok := s.Servers[k]; !ok {
newServer := &FederatedServer{
Address: msg.Address,
Port: msg.Port,
Ts: time.Now(),
}
if !ping || helloPeer(newServer, s.Args) {
s.Servers[k] = newServer
s.writePeers()
s.notifyPeerSubs(newServer)
}
} else {
s.Servers[k].Ts = time.Now()
}
case <-ctx.Done():
log.Println("context finished, peerAdder shutting down.")
return
}
}
}
func (s *Server) notifyPeerSubs(newServer *FederatedServer) { func (s *Server) notifyPeerSubs(newServer *FederatedServer) {
var unsubscribe []string var unsubscribe []string
s.PeerSubs.Range(func(k, v interface{}) bool { s.PeerSubs.Range(func(k, v interface{}) bool {
@ -254,37 +308,96 @@ func (s *Server) notifyPeerSubs(newServer *FederatedServer) {
}) })
for _, key := range unsubscribe { for _, key := range unsubscribe {
s.decNumSubs()
metrics.PeersSubscribed.Dec()
s.PeerSubs.Delete(key) s.PeerSubs.Delete(key)
} }
} }
// addPeer is an internal function to add a peer to this hub. // addPeer takes a new peer as a pb.ServerMessage, optionally checks to see
func (s *Server) addPeer(msg *pb.ServerMessage, ping bool) { // if they're online, and adds them to our list of peer. If we're not currently
s.peerChannel <- &peerAddMsg{msg, ping} // subscribed to a peer, it will also subscribe to it.
func (s *Server) addPeer(msg *pb.ServerMessage, ping bool) error {
if s.Args.Port == msg.Port &&
(localHosts[msg.Address] || msg.Address == s.Args.Host) {
log.Printf("%s:%s addPeer: Self peer, skipping...\n", s.Args.Host, s.Args.Port)
return nil
}
k := peerKey(msg)
newServer := &FederatedServer{
Address: msg.Address,
Port: msg.Port,
Ts: time.Now(),
}
log.Printf("%s:%s adding peer %+v\n", s.Args.Host, s.Args.Port, msg)
if oldServer, loaded := s.PeerServers.LoadOrStore(k, newServer); !loaded {
if ping {
_, err := s.helloPeer(newServer)
if err != nil {
s.PeerServers.Delete(k)
return err
}
}
s.incNumPeers()
metrics.PeersKnown.Inc()
s.writePeers()
s.notifyPeerSubs(newServer)
// If aren't subscribed to a server yet, subscribe to
// this one.
if !s.Subscribed {
err := s.subscribeToPeer(newServer)
if err != nil {
s.PeerServers.Delete(k)
return err
} else {
s.Subscribed = true
}
}
} else {
oldServerCast, ok := oldServer.(*FederatedServer)
// This shouldn't happen, but if it does, I guess delete the key
// and try adding this one since it's new.
if !ok {
log.Println("Error casting map value: ", oldServer)
s.PeerServers.Delete(k)
return s.addPeer(msg, ping)
}
oldServerCast.Ts = time.Now()
}
return nil
} }
// mergeFederatedServers is an internal convenience function to add a list of // mergeFederatedServers is an internal convenience function to add a list of
// peers. // peers.
func (s *Server) mergeFederatedServers(servers []*pb.ServerMessage) { func (s *Server) mergeFederatedServers(servers []*pb.ServerMessage) {
for _, srvMsg := range servers { for _, srvMsg := range servers {
s.peerChannel <- &peerAddMsg{srvMsg, false} err := s.addPeer(srvMsg, false)
// This shouldn't happen because we're not pinging them.
if err != nil {
log.Println(err)
}
} }
} }
// makeHelloMessage makes a message for this hub to call the Hello endpoint // makeHelloMessage makes a message for this hub to call the Hello endpoint
// on another hub. // on another hub.
func (s *Server) makeHelloMessage() *pb.HelloMessage { func (s *Server) makeHelloMessage() *pb.HelloMessage {
n := len(s.Servers) servers := make([]*pb.ServerMessage, 0, 10)
servers := make([]*pb.ServerMessage, n)
var i = 0 s.PeerServers.Range(func(_, v interface{}) bool {
for _, v := range s.Servers { peer, ok := v.(*FederatedServer)
servers[i] = &pb.ServerMessage{ if !ok {
Address: v.Address, log.Println("Failed to cast value in makeHelloMessage", v)
Port: v.Port, return true
}
i += 1
} }
servers = append(servers, &pb.ServerMessage{
Address: peer.Address,
Port: peer.Port,
})
return true
})
return &pb.HelloMessage{ return &pb.HelloMessage{
Port: s.Args.Port, Port: s.Args.Port,

View file

@ -8,9 +8,11 @@ import (
"os" "os"
"strings" "strings"
"testing" "testing"
"time"
"github.com/lbryio/hub/internal/metrics"
pb "github.com/lbryio/hub/protobuf/go" pb "github.com/lbryio/hub/protobuf/go"
dto "github.com/prometheus/client_model/go"
"google.golang.org/grpc"
) )
// lineCountFile takes a fileName and counts the number of lines in it. // lineCountFile takes a fileName and counts the number of lines in it.
@ -40,9 +42,7 @@ func removeFile(fileName string) {
} }
} }
// TestPeerAdder tests the peer adder goroutine. func makeDefaultArgs() *Args {
func TestPeerAdder(t *testing.T) {
ctx := context.Background()
args := &Args{ args := &Args{
CmdType: ServeCmd, CmdType: ServeCmd,
Host: DefaultHost, Host: DefaultHost,
@ -50,20 +50,28 @@ func TestPeerAdder(t *testing.T) {
EsHost: DefaultEsHost, EsHost: DefaultEsHost,
EsPort: DefaultEsPort, EsPort: DefaultEsPort,
UDPPort: DefaultUdpPort, UDPPort: DefaultUdpPort,
DisableEs: true,
PrometheusPort: DefaultPrometheusPort, PrometheusPort: DefaultPrometheusPort,
EsIndex: DefaultEsIndex, EsIndex: DefaultEsIndex,
Debug: true,
RefreshDelta: DefaultRefreshDelta, RefreshDelta: DefaultRefreshDelta,
CacheTTL: DefaultCacheTTL, CacheTTL: DefaultCacheTTL,
PeerFile: DefaultPeerFile, PeerFile: DefaultPeerFile,
Country: DefaultCountry, Country: DefaultCountry,
StartPeerAdder: false, DisableEs: true,
Debug: true,
LoadPeers: false,
StartPrometheus: false, StartPrometheus: false,
StartUDP: false, StartUDP: false,
WritePeers: false, WritePeers: false,
} }
return args
}
// TestAddPeer tests the ability to add peers
func TestAddPeer(t *testing.T) {
ctx := context.Background()
args := makeDefaultArgs()
tests := []struct { tests := []struct {
name string name string
want int want int
@ -81,67 +89,47 @@ func TestPeerAdder(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T){ t.Run(tt.name, func(t *testing.T){
server := MakeHubServer(ctx, args) server := MakeHubServer(ctx, args)
ctxWCancel, cancel := context.WithCancel(ctx) server.Subscribed = true
metrics.PeersKnown.Set(0)
go server.peerAdder(ctxWCancel)
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
var msg *peerAddMsg var msg *pb.ServerMessage
if strings.Contains(tt.name, "1 unique") { if strings.Contains(tt.name, "1 unique") {
msg = &peerAddMsg{ msg = &pb.ServerMessage{
msg: &pb.ServerMessage{
Address: "1.1.1.1", Address: "1.1.1.1",
Port: "50051", Port: "50051",
},
ping: false,
} }
} else { } else {
msg = &peerAddMsg{ x := i + 1
msg: &pb.ServerMessage{ msg = &pb.ServerMessage{
Address: fmt.Sprintf("%d.%d.%d.%d", i, i, i, i), Address: fmt.Sprintf("%d.%d.%d.%d", x, x, x, x),
Port: "50051", Port: "50051",
},
ping: false,
} }
} }
server.peerChannel <- msg //log.Printf("Adding peer %+v\n", msg)
err := server.addPeer(msg, false)
if err != nil {
log.Println(err)
} }
// Have to give it a second to update peers since it's in }
// another thread. var m = &dto.Metric{}
time.Sleep(time.Second) if err := metrics.PeersKnown.Write(m); err != nil {
got := len(server.Servers) t.Errorf("Error getting metrics %+v\n", err)
}
got := int(*m.Gauge.Value)
if got != tt.want { if got != tt.want {
t.Errorf("len(server.Servers) = %d, want %d", got, tt.want) t.Errorf("len(server.PeerServers) = %d, want %d\n", got, tt.want)
} }
cancel()
}) })
} }
} }
// TestPeerWriter tests that the peerAdder goroutine writes the peer file // TestPeerWriter tests that peers get written properly
// properly when set to do so.
func TestPeerWriter(t *testing.T) { func TestPeerWriter(t *testing.T) {
ctx := context.Background() ctx := context.Background()
args := &Args{ args := makeDefaultArgs()
CmdType: ServeCmd, args.WritePeers = true
Host: DefaultHost,
Port: DefaultPort,
EsHost: DefaultEsHost,
EsPort: DefaultEsPort,
UDPPort: DefaultUdpPort,
DisableEs: true,
PrometheusPort: DefaultPrometheusPort,
EsIndex: DefaultEsIndex,
Debug: true,
RefreshDelta: DefaultRefreshDelta,
CacheTTL: DefaultCacheTTL,
PeerFile: DefaultPeerFile,
Country: DefaultCountry,
StartPeerAdder: false,
StartPrometheus: false,
StartUDP: false,
WritePeers: true,
}
tests := []struct { tests := []struct {
name string name string
@ -160,40 +148,279 @@ func TestPeerWriter(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T){ t.Run(tt.name, func(t *testing.T){
server := MakeHubServer(ctx, args) server := MakeHubServer(ctx, args)
ctxWCancel, cancel := context.WithCancel(ctx) server.Subscribed = true
go server.peerAdder(ctxWCancel)
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
var msg *peerAddMsg var msg *pb.ServerMessage
if strings.Contains(tt.name, "1 unique") { if strings.Contains(tt.name, "1 unique") {
msg = &peerAddMsg{ msg = &pb.ServerMessage{
msg: &pb.ServerMessage{
Address: "1.1.1.1", Address: "1.1.1.1",
Port: "50051", Port: "50051",
},
ping: false,
} }
} else { } else {
msg = &peerAddMsg{ msg = &pb.ServerMessage{
msg: &pb.ServerMessage{
Address: fmt.Sprintf("%d.%d.%d.%d", i, i, i, i), Address: fmt.Sprintf("%d.%d.%d.%d", i, i, i, i),
Port: "50051", Port: "50051",
},
ping: false,
} }
} }
server.peerChannel <- msg //log.Printf("Adding peer %+v\n", msg)
err := server.addPeer(msg, false)
if err != nil {
log.Println(err)
} }
// Have to give it a second to update peers since it's in }
// another thread. //log.Println("Counting lines...")
time.Sleep(time.Second * 1)
got := lineCountFile(server.Args.PeerFile) got := lineCountFile(server.Args.PeerFile)
if got != tt.want { if got != tt.want {
t.Errorf("len(server.Servers) = %d, want %d", got, tt.want) t.Errorf("lineCountFile(peers.txt) = %d, want %d", got, tt.want)
} }
cancel()
}) })
} }
removeFile(args.PeerFile) removeFile(args.PeerFile)
} }
// TestAddPeerEndpoint tests the ability to add peers
func TestAddPeerEndpoint(t *testing.T) {
ctx := context.Background()
args := makeDefaultArgs()
args2 := makeDefaultArgs()
args2.Port = "50052"
tests := []struct {
name string
wantServerOne int64
wantServerTwo int64
} {
{
// outside -> server1.AddPeer(server2, ping=true) : server1 = 1, server2 = 0
// server1 -> server2.Hello(server1) : server1 = 1, server2 = 0
// server2 -> server2.addPeer(server1, ping=false) : server1 = 1, server2 = 1
// server2 -> server1.PeerSubscribe(server2) : server1 = 1, server2 = 1
// server1 <- server2.makeHelloMessage() : server1 = 1, server2 = 1
// server1.notifyPeer() : server1 = 1, server2 = 1
// server1 -> server2.AddPeer(server2) : server1 = 1, server2 = 1
// server2 self peer, skipping : server1 = 1, server2 = 1
// server1 -> server2.PeerSubscribe(server1) : server1 = 1, server2 = 1
name: "Add 1 peer",
wantServerOne: 1,
wantServerTwo: 1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T){
server := MakeHubServer(ctx, args)
server2 := MakeHubServer(ctx, args2)
metrics.PeersKnown.Set(0)
go server.Run()
go server2.Run()
//go server.Run()
conn, err := grpc.Dial("localhost:"+args.Port,
grpc.WithInsecure(),
grpc.WithBlock(),
)
if err != nil {
log.Fatalf("did not connect: %v", err)
}
c := pb.NewHubClient(conn)
msg := &pb.ServerMessage{
Address: "0.0.0.0",
Port: "50052",
}
_, err = c.AddPeer(context.Background(), msg)
if err != nil {
log.Println(err)
}
server.GrpcServer.GracefulStop()
server2.GrpcServer.GracefulStop()
got1 := server.getNumPeers()
got2 := server2.getNumPeers()
if got1 != tt.wantServerOne {
t.Errorf("len(server.PeerServers) = %d, want %d\n", got1, tt.wantServerOne)
}
if got2 != tt.wantServerTwo {
t.Errorf("len(server2.PeerServers) = %d, want %d\n", got2, tt.wantServerTwo)
}
})
}
}
// TestAddPeerEndpoint2 tests the ability to add peers
func TestAddPeerEndpoint2(t *testing.T) {
ctx := context.Background()
args := makeDefaultArgs()
args2 := makeDefaultArgs()
args3 := makeDefaultArgs()
args2.Port = "50052"
args3.Port = "50053"
tests := []struct {
name string
wantServerOne int64
wantServerTwo int64
wantServerThree int64
} {
{
name: "Add 2 peers",
wantServerOne: 2,
wantServerTwo: 2,
wantServerThree: 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T){
server := MakeHubServer(ctx, args)
server2 := MakeHubServer(ctx, args2)
server3 := MakeHubServer(ctx, args3)
metrics.PeersKnown.Set(0)
go server.Run()
go server2.Run()
go server3.Run()
conn, err := grpc.Dial("localhost:"+args.Port,
grpc.WithInsecure(),
grpc.WithBlock(),
)
if err != nil {
log.Fatalf("did not connect: %v", err)
}
c := pb.NewHubClient(conn)
msg := &pb.ServerMessage{
Address: "0.0.0.0",
Port: "50052",
}
msg2 := &pb.ServerMessage{
Address: "0.0.0.0",
Port: "50053",
}
_, err = c.AddPeer(context.Background(), msg)
if err != nil {
log.Println(err)
}
_, err = c.AddPeer(context.Background(), msg2)
if err != nil {
log.Println(err)
}
server.GrpcServer.GracefulStop()
server2.GrpcServer.GracefulStop()
server3.GrpcServer.GracefulStop()
got1 := server.getNumPeers()
got2 := server2.getNumPeers()
got3 := server3.getNumPeers()
if got1 != tt.wantServerOne {
t.Errorf("len(server.PeerServers) = %d, want %d\n", got1, tt.wantServerOne)
}
if got2 != tt.wantServerTwo {
t.Errorf("len(server2.PeerServers) = %d, want %d\n", got2, tt.wantServerTwo)
}
if got3 != tt.wantServerThree {
t.Errorf("len(server3.PeerServers) = %d, want %d\n", got3, tt.wantServerThree)
}
})
}
}
// TestAddPeerEndpoint3 tests the ability to add peers
func TestAddPeerEndpoint3(t *testing.T) {
ctx := context.Background()
args := makeDefaultArgs()
args2 := makeDefaultArgs()
args3 := makeDefaultArgs()
args2.Port = "50052"
args3.Port = "50053"
tests := []struct {
name string
wantServerOne int64
wantServerTwo int64
wantServerThree int64
} {
{
name: "Add 1 peer to each",
wantServerOne: 2,
wantServerTwo: 2,
wantServerThree: 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T){
server := MakeHubServer(ctx, args)
server2 := MakeHubServer(ctx, args2)
server3 := MakeHubServer(ctx, args3)
metrics.PeersKnown.Set(0)
go server.Run()
go server2.Run()
go server3.Run()
conn, err := grpc.Dial("localhost:"+args.Port,
grpc.WithInsecure(),
grpc.WithBlock(),
)
if err != nil {
log.Fatalf("did not connect: %v", err)
}
conn2, err := grpc.Dial("localhost:50052",
grpc.WithInsecure(),
grpc.WithBlock(),
)
if err != nil {
log.Fatalf("did not connect: %v", err)
}
c := pb.NewHubClient(conn)
c2 := pb.NewHubClient(conn2)
msg := &pb.ServerMessage{
Address: "0.0.0.0",
Port: "50052",
}
msg2 := &pb.ServerMessage{
Address: "0.0.0.0",
Port: "50053",
}
_, err = c.AddPeer(context.Background(), msg)
if err != nil {
log.Println(err)
}
_, err = c2.AddPeer(context.Background(), msg2)
if err != nil {
log.Println(err)
}
server.GrpcServer.GracefulStop()
server2.GrpcServer.GracefulStop()
server3.GrpcServer.GracefulStop()
got1 := server.getNumPeers()
got2 := server2.getNumPeers()
got3 := server3.getNumPeers()
if got1 != tt.wantServerOne {
t.Errorf("len(server.PeerServers) = %d, want %d\n", got1, tt.wantServerOne)
}
if got2 != tt.wantServerTwo {
t.Errorf("len(server2.PeerServers) = %d, want %d\n", got2, tt.wantServerTwo)
}
if got3 != tt.wantServerThree {
t.Errorf("len(server3.PeerServers) = %d, want %d\n", got3, tt.wantServerThree)
}
})
}
}

View file

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"hash" "hash"
"log" "log"
"net"
"net/http" "net/http"
"os" "os"
"regexp" "regexp"
@ -20,6 +21,7 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/reflection"
) )
type Server struct { type Server struct {
@ -28,14 +30,16 @@ type Server struct {
MultiSpaceRe *regexp.Regexp MultiSpaceRe *regexp.Regexp
WeirdCharsRe *regexp.Regexp WeirdCharsRe *regexp.Regexp
EsClient *elastic.Client EsClient *elastic.Client
Servers map[string]*FederatedServer
QueryCache *ttlcache.Cache QueryCache *ttlcache.Cache
S256 *hash.Hash S256 *hash.Hash
LastRefreshCheck time.Time LastRefreshCheck time.Time
RefreshDelta time.Duration RefreshDelta time.Duration
NumESRefreshes int64 NumESRefreshes int64
PeerServers sync.Map //map[string]*FederatedServer
NumPeerServers *int64
PeerSubs sync.Map PeerSubs sync.Map
peerChannel chan *peerAddMsg NumPeerSubs *int64
Subscribed bool
pb.UnimplementedHubServer pb.UnimplementedHubServer
} }
@ -83,14 +87,28 @@ func getVersion() string {
'blockchain.address.unsubscribe' 'blockchain.address.unsubscribe'
*/ */
func (s *Server) Run() {
l, err := net.Listen("tcp", ":"+s.Args.Port)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
pb.RegisterHubServer(s.GrpcServer, s)
reflection.Register(s.GrpcServer)
log.Printf("listening on %s\n", l.Addr().String())
log.Println(s.Args)
if err := s.GrpcServer.Serve(l); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
// MakeHubServer takes the arguments given to a hub when it's started and // MakeHubServer takes the arguments given to a hub when it's started and
// initializes everything. It loads information about previously known peers, // initializes everything. It loads information about previously known peers,
// creates needed internal data structures, and initializes goroutines. // creates needed internal data structures, and initializes goroutines.
func MakeHubServer(ctx context.Context, args *Args) *Server { func MakeHubServer(ctx context.Context, args *Args) *Server {
grpcServer := grpc.NewServer(grpc.NumStreamWorkers(10)) grpcServer := grpc.NewServer(grpc.NumStreamWorkers(10))
peerChannel := make(chan *peerAddMsg)
multiSpaceRe, err := regexp.Compile(`\s{2,}`) multiSpaceRe, err := regexp.Compile(`\s{2,}`)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@ -101,8 +119,6 @@ func MakeHubServer(ctx context.Context, args *Args) *Server {
log.Fatal(err) log.Fatal(err)
} }
servers := loadPeers(args)
var client *elastic.Client var client *elastic.Client
if !args.DisableEs { if !args.DisableEs {
esUrl := args.EsHost + ":" + args.EsPort esUrl := args.EsHost + ":" + args.EsPort
@ -134,6 +150,11 @@ func MakeHubServer(ctx context.Context, args *Args) *Server {
refreshDelta = time.Second * 0 refreshDelta = time.Second * 0
} }
numPeers := new(int64)
*numPeers = 0
numSubs := new(int64)
*numSubs = 0
s := &Server{ s := &Server{
GrpcServer: grpcServer, GrpcServer: grpcServer,
Args: args, Args: args,
@ -145,15 +166,14 @@ func MakeHubServer(ctx context.Context, args *Args) *Server {
LastRefreshCheck: time.Now(), LastRefreshCheck: time.Now(),
RefreshDelta: refreshDelta, RefreshDelta: refreshDelta,
NumESRefreshes: 0, NumESRefreshes: 0,
Servers: servers, PeerServers: sync.Map{},
NumPeerServers: numPeers,
PeerSubs: sync.Map{}, PeerSubs: sync.Map{},
peerChannel: peerChannel, NumPeerSubs: numSubs,
Subscribed: false,
} }
// Start up our background services // Start up our background services
if args.StartPeerAdder {
go s.peerAdder(ctx)
}
if args.StartPrometheus { if args.StartPrometheus {
go s.prometheusEndpoint(s.Args.PrometheusPort, "metrics") go s.prometheusEndpoint(s.Args.PrometheusPort, "metrics")
} }
@ -165,6 +185,20 @@ func MakeHubServer(ctx context.Context, args *Args) *Server {
} }
}() }()
} }
// Load peers from disk and subscribe to one if there are any
if args.LoadPeers {
// We Subscribed to true, so we don't try subscribing to peers as we
// add them, we'll find the best one after
s.Subscribed = true
err = s.loadPeers()
if err != nil {
log.Println(err)
}
// subscribe to the fastest peer we know (if there are any) for updates
// about their peers.
s.Subscribed = false
s.subscribeToFastestPeer()
}
return s return s
} }
@ -182,6 +216,7 @@ func (s *Server) prometheusEndpoint(port string, endpoint string) {
// The passed message includes information about the other hub, and all // The passed message includes information about the other hub, and all
// of its peers which are added to the knowledge of this hub. // of its peers which are added to the knowledge of this hub.
func (s *Server) Hello(ctx context.Context, args *pb.HelloMessage) (*pb.HelloMessage, error) { func (s *Server) Hello(ctx context.Context, args *pb.HelloMessage) (*pb.HelloMessage, error) {
metrics.RequestsCount.With(prometheus.Labels{"method": "hello"}).Inc()
port := args.Port port := args.Port
host := args.Host host := args.Host
server := &FederatedServer{ server := &FederatedServer{
@ -191,7 +226,11 @@ func (s *Server) Hello(ctx context.Context, args *pb.HelloMessage) (*pb.HelloMes
} }
log.Println(server) log.Println(server)
s.addPeer(&pb.ServerMessage{Address: host, Port: port}, false) err := s.addPeer(&pb.ServerMessage{Address: host, Port: port}, false)
// They just contacted us, so this shouldn't happen
if err != nil {
log.Println(err)
}
s.mergeFederatedServers(args.Servers) s.mergeFederatedServers(args.Servers)
s.writePeers() s.writePeers()
s.notifyPeerSubs(server) s.notifyPeerSubs(server)
@ -202,21 +241,34 @@ func (s *Server) Hello(ctx context.Context, args *pb.HelloMessage) (*pb.HelloMes
// PeerSubscribe adds a peer hub to the list of subscribers to update about // PeerSubscribe adds a peer hub to the list of subscribers to update about
// new peers. // new peers.
func (s *Server) PeerSubscribe(ctx context.Context, in *pb.ServerMessage) (*pb.StringValue, error) { func (s *Server) PeerSubscribe(ctx context.Context, in *pb.ServerMessage) (*pb.StringValue, error) {
metrics.RequestsCount.With(prometheus.Labels{"method": "peer_subscribe"}).Inc()
var msg = "Success"
peer := &FederatedServer{ peer := &FederatedServer{
Address: in.Address, Address: in.Address,
Port: in.Port, Port: in.Port,
Ts: time.Now(), Ts: time.Now(),
} }
s.PeerSubs.Store(peerKey(in), peer) if _, loaded := s.PeerSubs.LoadOrStore(peerKey(in), peer); !loaded {
s.incNumSubs()
metrics.PeersSubscribed.Inc()
} else {
msg = "Already subscribed"
}
return &pb.StringValue{Value: "Success"}, nil return &pb.StringValue{Value: msg}, nil
} }
// AddPeer is a grpc endpoint to tell this hub about another hub in the network. // AddPeer is a grpc endpoint to tell this hub about another hub in the network.
func (s *Server) AddPeer(ctx context.Context, args *pb.ServerMessage) (*pb.StringValue, error) { func (s *Server) AddPeer(ctx context.Context, args *pb.ServerMessage) (*pb.StringValue, error) {
s.addPeer(args, true) metrics.RequestsCount.With(prometheus.Labels{"method": "add_peer"}).Inc()
return &pb.StringValue{Value: "Success!"}, nil var msg = "Success"
err := s.addPeer(args, true)
if err != nil {
log.Println(err)
msg = "Failed"
}
return &pb.StringValue{Value: msg}, err
} }
// Ping is a grpc endpoint that returns a short message. // Ping is a grpc endpoint that returns a short message.