fixed a few channel lockups, fixed announced port in dht, successfully announced and served a blob
This commit is contained in:
parent
4ab3aea7b6
commit
e480fa146f
10 changed files with 115 additions and 77 deletions
|
@ -15,8 +15,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// DefaultClusterPort is the default port used when starting up a Cluster.
|
DefaultPort = 17946
|
||||||
DefaultClusterPort = 17946
|
|
||||||
MembershipChangeBufferWindow = 1 * time.Second
|
MembershipChangeBufferWindow = 1 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -103,11 +102,11 @@ func (c *Cluster) listen() {
|
||||||
case event := <-c.eventCh:
|
case event := <-c.eventCh:
|
||||||
switch event.EventType() {
|
switch event.EventType() {
|
||||||
case serf.EventMemberJoin, serf.EventMemberFailed, serf.EventMemberLeave:
|
case serf.EventMemberJoin, serf.EventMemberFailed, serf.EventMemberLeave:
|
||||||
memberEvent := event.(serf.MemberEvent)
|
// // ignore event from my own joining of the cluster
|
||||||
if event.EventType() == serf.EventMemberJoin && len(memberEvent.Members) == 1 && memberEvent.Members[0].Name == c.name {
|
//memberEvent := event.(serf.MemberEvent)
|
||||||
// ignore event from my own joining of the cluster
|
//if event.EventType() == serf.EventMemberJoin && len(memberEvent.Members) == 1 && memberEvent.Members[0].Name == c.name {
|
||||||
continue
|
// continue
|
||||||
}
|
//}
|
||||||
|
|
||||||
if timerCh == nil {
|
if timerCh == nil {
|
||||||
timer.Reset(MembershipChangeBufferWindow)
|
timer.Reset(MembershipChangeBufferWindow)
|
||||||
|
|
|
@ -32,7 +32,7 @@ func peerCmd(cmd *cobra.Command, args []string) {
|
||||||
combo := store.NewDBBackedS3Store(s3, db)
|
combo := store.NewDBBackedS3Store(s3, db)
|
||||||
peerServer := peer.NewServer(combo)
|
peerServer := peer.NewServer(combo)
|
||||||
|
|
||||||
err = peerServer.Start("localhost:" + strconv.Itoa(peer.DefaultPort))
|
err = peerServer.Start(":" + strconv.Itoa(peer.DefaultPort))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ func reflectorCmd(cmd *cobra.Command, args []string) {
|
||||||
s3 := store.NewS3BlobStore(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName)
|
s3 := store.NewS3BlobStore(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName)
|
||||||
combo := store.NewDBBackedS3Store(s3, db)
|
combo := store.NewDBBackedS3Store(s3, db)
|
||||||
reflectorServer := reflector.NewServer(combo)
|
reflectorServer := reflector.NewServer(combo)
|
||||||
err = reflectorServer.Start("localhost:" + strconv.Itoa(reflector.DefaultPort))
|
err = reflectorServer.Start(":" + strconv.Itoa(reflector.DefaultPort))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
56
cmd/start.go
56
cmd/start.go
|
@ -4,9 +4,13 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/lbryio/reflector.go/cluster"
|
||||||
"github.com/lbryio/reflector.go/db"
|
"github.com/lbryio/reflector.go/db"
|
||||||
|
"github.com/lbryio/reflector.go/dht"
|
||||||
|
"github.com/lbryio/reflector.go/dht/bits"
|
||||||
"github.com/lbryio/reflector.go/peer"
|
"github.com/lbryio/reflector.go/peer"
|
||||||
"github.com/lbryio/reflector.go/prism"
|
"github.com/lbryio/reflector.go/prism"
|
||||||
"github.com/lbryio/reflector.go/reflector"
|
"github.com/lbryio/reflector.go/reflector"
|
||||||
|
@ -16,28 +20,33 @@ import (
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
startNewCluster = "new"
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
startClusterPort int
|
||||||
startPeerPort int
|
startPeerPort int
|
||||||
startReflectorPort int
|
startReflectorPort int
|
||||||
startDhtPort int
|
startDhtPort int
|
||||||
startDhtSeedPort int
|
startDhtSeeds []string
|
||||||
startClusterPort int
|
startHashRange string
|
||||||
startClusterSeedPort int
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
var cmd = &cobra.Command{
|
var cmd = &cobra.Command{
|
||||||
Use: "start [cluster-address]",
|
Use: `start [cluster-address|"new"]`,
|
||||||
Short: "Runs prism application with cluster, dht, peer server, and reflector server.",
|
Short: "Runs full prism application with cluster, dht, peer server, and reflector server.",
|
||||||
Run: startCmd,
|
Run: startCmd,
|
||||||
Args: cobra.RangeArgs(0, 1),
|
Args: cobra.ExactArgs(1),
|
||||||
}
|
}
|
||||||
cmd.PersistentFlags().IntVar(&startDhtPort, "dht-port", 4570, "Port to start DHT on")
|
cmd.PersistentFlags().IntVar(&startClusterPort, "cluster-port", cluster.DefaultPort, "Port that cluster listens on")
|
||||||
cmd.PersistentFlags().IntVar(&startDhtSeedPort, "dht-seed-port", 4567, "Port to connect to DHT bootstrap node on")
|
|
||||||
cmd.PersistentFlags().IntVar(&startClusterPort, "cluster-port", 5678, "Port to start DHT on")
|
|
||||||
cmd.PersistentFlags().IntVar(&startClusterSeedPort, "cluster-seed-port", 0, "Port to start DHT on")
|
|
||||||
cmd.PersistentFlags().IntVar(&startPeerPort, "peer-port", peer.DefaultPort, "Port to start peer protocol on")
|
cmd.PersistentFlags().IntVar(&startPeerPort, "peer-port", peer.DefaultPort, "Port to start peer protocol on")
|
||||||
cmd.PersistentFlags().IntVar(&startReflectorPort, "reflector-port", reflector.DefaultPort, "Port to start reflector protocol on")
|
cmd.PersistentFlags().IntVar(&startReflectorPort, "reflector-port", reflector.DefaultPort, "Port to start reflector protocol on")
|
||||||
|
cmd.PersistentFlags().IntVar(&startDhtPort, "dht-port", dht.DefaultPort, "Port that dht will listen on")
|
||||||
|
cmd.PersistentFlags().StringSliceVar(&startDhtSeeds, "dht-seeds", []string{}, "Comma-separated list of dht seed nodes (addr:port,addr:port,...)")
|
||||||
|
|
||||||
|
cmd.PersistentFlags().StringVar(&startHashRange, "hash-range", "", "Limit on range of hashes to announce (start-end)")
|
||||||
|
|
||||||
rootCmd.AddCommand(cmd)
|
rootCmd.AddCommand(cmd)
|
||||||
}
|
}
|
||||||
|
@ -49,25 +58,32 @@ func startCmd(cmd *cobra.Command, args []string) {
|
||||||
s3 := store.NewS3BlobStore(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName)
|
s3 := store.NewS3BlobStore(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName)
|
||||||
comboStore := store.NewDBBackedS3Store(s3, db)
|
comboStore := store.NewDBBackedS3Store(s3, db)
|
||||||
|
|
||||||
|
conf := prism.DefaultConf()
|
||||||
|
|
||||||
// TODO: args we need:
|
// TODO: args we need:
|
||||||
// clusterAddr - to connect to cluster (or start new cluster if empty)
|
|
||||||
// minNodes - minimum number of nodes before announcing starts. otherwise first node will try to announce all the blobs in the db
|
// minNodes - minimum number of nodes before announcing starts. otherwise first node will try to announce all the blobs in the db
|
||||||
// or maybe we should do maxHashesPerNode?
|
// or maybe we should do maxHashesPerNode?
|
||||||
// in either case, this should not kill the cluster, but should only limit announces (and notify when some hashes are being left unannounced)
|
// in either case, this should not kill the cluster, but should only limit announces (and notify when some hashes are being left unannounced)
|
||||||
|
|
||||||
//clusterAddr := ""
|
if args[0] != startNewCluster {
|
||||||
//if len(args) > 0 {
|
conf.ClusterSeedAddr = args[0]
|
||||||
// clusterAddr = args[0]
|
}
|
||||||
//}
|
|
||||||
|
|
||||||
conf := prism.DefaultConf()
|
|
||||||
conf.DB = db
|
conf.DB = db
|
||||||
conf.Blobs = comboStore
|
conf.Blobs = comboStore
|
||||||
conf.DhtAddress = "127.0.0.1:" + strconv.Itoa(startDhtPort)
|
conf.DhtAddress = "0.0.0.0:" + strconv.Itoa(startDhtPort)
|
||||||
conf.DhtSeedNodes = []string{"127.0.0.1:" + strconv.Itoa(startDhtSeedPort)}
|
conf.DhtSeedNodes = startDhtSeeds
|
||||||
conf.ClusterPort = startClusterPort
|
conf.ClusterPort = startClusterPort
|
||||||
if startClusterSeedPort > 0 {
|
conf.PeerPort = startPeerPort
|
||||||
conf.ClusterSeedAddr = "127.0.0.1:" + strconv.Itoa(startClusterSeedPort)
|
conf.ReflectorPort = startReflectorPort
|
||||||
|
|
||||||
|
if startHashRange != "" {
|
||||||
|
hashRange := strings.Split(startHashRange, "-")
|
||||||
|
if len(hashRange) != 2 {
|
||||||
|
log.Fatal("invalid hash range")
|
||||||
|
}
|
||||||
|
r := bits.Range{Start: bits.FromShortHexP(hashRange[0]), End: bits.FromShortHexP(hashRange[1])}
|
||||||
|
conf.HashRange = &r
|
||||||
}
|
}
|
||||||
|
|
||||||
p := prism.New(conf)
|
p := prism.New(conf)
|
||||||
|
|
15
dht/dht.go
15
dht/dht.go
|
@ -3,16 +3,19 @@ package dht
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/lbryio/reflector.go/dht/bits"
|
||||||
|
peerproto "github.com/lbryio/reflector.go/peer"
|
||||||
|
|
||||||
"github.com/lbryio/lbry.go/errors"
|
"github.com/lbryio/lbry.go/errors"
|
||||||
"github.com/lbryio/lbry.go/stopOnce"
|
"github.com/lbryio/lbry.go/stopOnce"
|
||||||
"github.com/lbryio/reflector.go/dht/bits"
|
|
||||||
"github.com/spf13/cast"
|
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/spf13/cast"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -22,6 +25,7 @@ func init() {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Network = "udp4"
|
Network = "udp4"
|
||||||
|
DefaultPort = 4444
|
||||||
|
|
||||||
// TODO: all these constants should be defaults, and should be used to set values in the standard Config. then the code should use values in the config
|
// TODO: all these constants should be defaults, and should be used to set values in the standard Config. then the code should use values in the config
|
||||||
// TODO: alternatively, have a global Config for constants. at least that way tests can modify the values
|
// TODO: alternatively, have a global Config for constants. at least that way tests can modify the values
|
||||||
|
@ -57,17 +61,20 @@ type Config struct {
|
||||||
NodeID string
|
NodeID string
|
||||||
// print the state of the dht every X time
|
// print the state of the dht every X time
|
||||||
PrintState time.Duration
|
PrintState time.Duration
|
||||||
|
// the port that clients can use to download blobs using the LBRY peer protocol
|
||||||
|
PeerProtocolPort int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStandardConfig returns a Config pointer with default values.
|
// NewStandardConfig returns a Config pointer with default values.
|
||||||
func NewStandardConfig() *Config {
|
func NewStandardConfig() *Config {
|
||||||
return &Config{
|
return &Config{
|
||||||
Address: "0.0.0.0:4444",
|
Address: "0.0.0.0:" + strconv.Itoa(DefaultPort),
|
||||||
SeedNodes: []string{
|
SeedNodes: []string{
|
||||||
"lbrynet1.lbry.io:4444",
|
"lbrynet1.lbry.io:4444",
|
||||||
"lbrynet2.lbry.io:4444",
|
"lbrynet2.lbry.io:4444",
|
||||||
"lbrynet3.lbry.io:4444",
|
"lbrynet3.lbry.io:4444",
|
||||||
},
|
},
|
||||||
|
PeerProtocolPort: peerproto.DefaultPort,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -330,7 +337,7 @@ func (dht *DHT) storeOnNode(hash bits.Bitmap, c Contact) {
|
||||||
Value: storeArgsValue{
|
Value: storeArgsValue{
|
||||||
Token: res.Token,
|
Token: res.Token,
|
||||||
LbryID: dht.contact.ID,
|
LbryID: dht.contact.ID,
|
||||||
Port: dht.contact.Port,
|
Port: dht.conf.PeerProtocolPort,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
|
@ -299,7 +299,11 @@ func (n *Node) handleRequest(addr *net.UDPAddr, request Request) {
|
||||||
func (n *Node) handleResponse(addr *net.UDPAddr, response Response) {
|
func (n *Node) handleResponse(addr *net.UDPAddr, response Response) {
|
||||||
tx := n.txFind(response.ID, Contact{ID: response.NodeID, IP: addr.IP, Port: addr.Port})
|
tx := n.txFind(response.ID, Contact{ID: response.NodeID, IP: addr.IP, Port: addr.Port})
|
||||||
if tx != nil {
|
if tx != nil {
|
||||||
tx.res <- response
|
select {
|
||||||
|
case tx.res <- response:
|
||||||
|
default:
|
||||||
|
log.Errorf("[%s] query %s: response received but tx has no listener", n.id.HexShort(), response.ID.HexShort())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
n.rt.Update(Contact{ID: response.NodeID, IP: addr.IP, Port: addr.Port})
|
n.rt.Update(Contact{ID: response.NodeID, IP: addr.IP, Port: addr.Port})
|
||||||
|
|
|
@ -69,13 +69,13 @@ func (cf *contactFinder) Stop() {
|
||||||
|
|
||||||
func (cf *contactFinder) Find() ([]Contact, bool, error) {
|
func (cf *contactFinder) Find() ([]Contact, bool, error) {
|
||||||
if cf.findValue {
|
if cf.findValue {
|
||||||
log.Debugf("[%s] starting an iterative Find for the value %s", cf.node.id.HexShort(), cf.target.HexShort())
|
log.Debugf("[%s] find %s: starting iterativeFindValue", cf.node.id.HexShort(), cf.target.HexShort())
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("[%s] starting an iterative Find for contacts near %s", cf.node.id.HexShort(), cf.target.HexShort())
|
log.Debugf("[%s] find %s: starting iterativeFindNode", cf.node.id.HexShort(), cf.target.HexShort())
|
||||||
}
|
}
|
||||||
cf.appendNewToShortlist(cf.node.rt.GetClosest(cf.target, alpha))
|
cf.appendNewToShortlist(cf.node.rt.GetClosest(cf.target, alpha))
|
||||||
if len(cf.shortlist) == 0 {
|
if len(cf.shortlist) == 0 {
|
||||||
return nil, false, errors.Err("no contacts in routing table")
|
return nil, false, errors.Err("[%s] find %s: no contacts in routing table", cf.node.id.HexShort(), cf.target.HexShort())
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < alpha; i++ {
|
for i := 0; i < alpha; i++ {
|
||||||
|
@ -108,14 +108,16 @@ func (cf *contactFinder) Find() ([]Contact, bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cf *contactFinder) iterationWorker(num int) {
|
func (cf *contactFinder) iterationWorker(num int) {
|
||||||
log.Debugf("[%s] starting worker %d", cf.node.id.HexShort(), num)
|
log.Debugf("[%s] find %s: starting worker %d", cf.node.id.HexShort(), cf.target.HexShort(), num)
|
||||||
defer func() { log.Debugf("[%s] stopping worker %d", cf.node.id.HexShort(), num) }()
|
defer func() {
|
||||||
|
log.Debugf("[%s] find %s: stopping worker %d", cf.node.id.HexShort(), cf.target.HexShort(), num)
|
||||||
|
}()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
maybeContact := cf.popFromShortlist()
|
maybeContact := cf.popFromShortlist()
|
||||||
if maybeContact == nil {
|
if maybeContact == nil {
|
||||||
// TODO: block if there are pending requests out from other workers. there may be more shortlist values coming
|
// TODO: block if there are pending requests out from other workers. there may be more shortlist values coming
|
||||||
log.Debugf("[%s] worker %d: no contacts in shortlist, waiting...", cf.node.id.HexShort(), num)
|
//log.Debugf("[%s] worker %d: no contacts in shortlist, waiting...", cf.node.id.HexShort(), num)
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
} else {
|
} else {
|
||||||
contact := *maybeContact
|
contact := *maybeContact
|
||||||
|
@ -131,7 +133,7 @@ func (cf *contactFinder) iterationWorker(num int) {
|
||||||
req.Method = findNodeMethod
|
req.Method = findNodeMethod
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("[%s] worker %d: contacting %s", cf.node.id.HexShort(), num, contact.ID.HexShort())
|
log.Debugf("[%s] find %s: worker %d: contacting %s", cf.node.id.HexShort(), cf.target.HexShort(), num, contact.ID.HexShort())
|
||||||
|
|
||||||
cf.incrementOutstanding()
|
cf.incrementOutstanding()
|
||||||
|
|
||||||
|
@ -140,23 +142,23 @@ func (cf *contactFinder) iterationWorker(num int) {
|
||||||
select {
|
select {
|
||||||
case res = <-resCh:
|
case res = <-resCh:
|
||||||
case <-cf.stop.Ch():
|
case <-cf.stop.Ch():
|
||||||
log.Debugf("[%s] worker %d: canceled", cf.node.id.HexShort(), num)
|
log.Debugf("[%s] find %s: worker %d: canceled", cf.node.id.HexShort(), cf.target.HexShort(), num)
|
||||||
cancel()
|
cancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if res == nil {
|
if res == nil {
|
||||||
// nothing to do, response timed out
|
// nothing to do, response timed out
|
||||||
log.Debugf("[%s] worker %d: search canceled or timed out waiting for %s", cf.node.id.HexShort(), num, contact.ID.HexShort())
|
log.Debugf("[%s] find %s: worker %d: search canceled or timed out waiting for %s", cf.node.id.HexShort(), cf.target.HexShort(), num, contact.ID.HexShort())
|
||||||
} else if cf.findValue && res.FindValueKey != "" {
|
} else if cf.findValue && res.FindValueKey != "" {
|
||||||
log.Debugf("[%s] worker %d: got value", cf.node.id.HexShort(), num)
|
log.Debugf("[%s] find %s: worker %d: got value", cf.node.id.HexShort(), cf.target.HexShort(), num)
|
||||||
cf.findValueMutex.Lock()
|
cf.findValueMutex.Lock()
|
||||||
cf.findValueResult = res.Contacts
|
cf.findValueResult = res.Contacts
|
||||||
cf.findValueMutex.Unlock()
|
cf.findValueMutex.Unlock()
|
||||||
cf.stop.Stop()
|
cf.stop.Stop()
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("[%s] worker %d: got contacts", cf.node.id.HexShort(), num)
|
log.Debugf("[%s] find %s: worker %d: got contacts", cf.node.id.HexShort(), cf.target.HexShort(), num)
|
||||||
cf.insertIntoActiveList(contact)
|
cf.insertIntoActiveList(contact)
|
||||||
cf.appendNewToShortlist(res.Contacts)
|
cf.appendNewToShortlist(res.Contacts)
|
||||||
}
|
}
|
||||||
|
@ -165,7 +167,7 @@ func (cf *contactFinder) iterationWorker(num int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if cf.isSearchFinished() {
|
if cf.isSearchFinished() {
|
||||||
log.Debugf("[%s] worker %d: search is finished", cf.node.id.HexShort(), num)
|
log.Debugf("[%s] find %s: worker %d: search is finished", cf.node.id.HexShort(), cf.target.HexShort(), num)
|
||||||
cf.stop.Stop()
|
cf.stop.Stop()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,7 @@ func (s *Server) Shutdown() {
|
||||||
// Start starts the server listener to handle connections.
|
// Start starts the server listener to handle connections.
|
||||||
func (s *Server) Start(address string) error {
|
func (s *Server) Start(address string) error {
|
||||||
log.Println("peer listening on " + address)
|
log.Println("peer listening on " + address)
|
||||||
l, err := net.Listen("tcp", address)
|
l, err := net.Listen("tcp4", address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,9 @@ type Config struct {
|
||||||
ClusterPort int
|
ClusterPort int
|
||||||
ClusterSeedAddr string
|
ClusterSeedAddr string
|
||||||
|
|
||||||
|
// limit the range of hashes to announce. useful for testing
|
||||||
|
HashRange *bits.Range
|
||||||
|
|
||||||
DB *db.SQL
|
DB *db.SQL
|
||||||
Blobs store.BlobStore
|
Blobs store.BlobStore
|
||||||
}
|
}
|
||||||
|
@ -36,7 +39,7 @@ type Config struct {
|
||||||
// DefaultConf returns a default config
|
// DefaultConf returns a default config
|
||||||
func DefaultConf() *Config {
|
func DefaultConf() *Config {
|
||||||
return &Config{
|
return &Config{
|
||||||
ClusterPort: cluster.DefaultClusterPort,
|
ClusterPort: cluster.DefaultPort,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,7 +64,10 @@ func New(conf *Config) *Prism {
|
||||||
|
|
||||||
dhtConf := dht.NewStandardConfig()
|
dhtConf := dht.NewStandardConfig()
|
||||||
dhtConf.Address = conf.DhtAddress
|
dhtConf.Address = conf.DhtAddress
|
||||||
|
dhtConf.PeerProtocolPort = conf.PeerPort
|
||||||
|
if len(conf.DhtSeedNodes) > 0 {
|
||||||
dhtConf.SeedNodes = conf.DhtSeedNodes
|
dhtConf.SeedNodes = conf.DhtSeedNodes
|
||||||
|
}
|
||||||
d := dht.New(dhtConf)
|
d := dht.New(dhtConf)
|
||||||
|
|
||||||
c := cluster.New(conf.ClusterPort, conf.ClusterSeedAddr)
|
c := cluster.New(conf.ClusterPort, conf.ClusterSeedAddr)
|
||||||
|
@ -91,6 +97,8 @@ func New(conf *Config) *Prism {
|
||||||
|
|
||||||
// Start starts the components of the application.
|
// Start starts the components of the application.
|
||||||
func (p *Prism) Start() error {
|
func (p *Prism) Start() error {
|
||||||
|
var err error
|
||||||
|
|
||||||
if p.conf.DB == nil {
|
if p.conf.DB == nil {
|
||||||
return errors.Err("db required in conf")
|
return errors.Err("db required in conf")
|
||||||
}
|
}
|
||||||
|
@ -99,7 +107,17 @@ func (p *Prism) Start() error {
|
||||||
return errors.Err("blobs required in conf")
|
return errors.Err("blobs required in conf")
|
||||||
}
|
}
|
||||||
|
|
||||||
err := p.dht.Start()
|
err = p.peer.Start(":" + strconv.Itoa(p.conf.PeerPort))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = p.reflector.Start(":" + strconv.Itoa(p.conf.ReflectorPort))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = p.dht.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -109,28 +127,16 @@ func (p *Prism) Start() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: should not be localhost forever. should prolly be 0.0.0.0, or configurable
|
|
||||||
err = p.peer.Start("localhost:" + strconv.Itoa(p.conf.PeerPort))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: should not be localhost forever. should prolly be 0.0.0.0, or configurable
|
|
||||||
err = p.reflector.Start("localhost:" + strconv.Itoa(p.conf.ReflectorPort))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown gracefully shuts down the different prism components before exiting.
|
// Shutdown gracefully shuts down the different prism components before exiting.
|
||||||
func (p *Prism) Shutdown() {
|
func (p *Prism) Shutdown() {
|
||||||
p.stop.StopAndWait()
|
p.stop.StopAndWait()
|
||||||
p.reflector.Shutdown()
|
|
||||||
p.peer.Shutdown()
|
|
||||||
p.cluster.Shutdown()
|
p.cluster.Shutdown()
|
||||||
p.dht.Shutdown()
|
p.dht.Shutdown()
|
||||||
|
p.reflector.Shutdown()
|
||||||
|
p.peer.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
// AnnounceRange announces the `n`th interval of hashes, out of a total of `total` intervals
|
// AnnounceRange announces the `n`th interval of hashes, out of a total of `total` intervals
|
||||||
|
@ -143,15 +149,19 @@ func (p *Prism) AnnounceRange(n, total int) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var r bits.Range
|
||||||
|
if p.conf.HashRange != nil {
|
||||||
|
r = *p.conf.HashRange
|
||||||
|
} else {
|
||||||
//r := bits.MaxRange().IntervalP(n, total)
|
//r := bits.MaxRange().IntervalP(n, total)
|
||||||
|
|
||||||
// TODO: this is temporary. it lets me test with a small number of hashes. use the full range in production
|
// TODO: this is temporary. it lets me test with a small number of hashes. use the full range in production
|
||||||
min, max, err := p.db.GetHashRange()
|
min, max, err := p.db.GetHashRange()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("%s: error getting hash range: %s", p.dht.ID().HexShort(), err.Error())
|
log.Errorf("%s: error getting hash range: %s", p.dht.ID().HexShort(), err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r := (bits.Range{Start: bits.FromHexP(min), End: bits.FromHexP(max)}).IntervalP(n, total)
|
r = (bits.Range{Start: bits.FromHexP(min), End: bits.FromHexP(max)}).IntervalP(n, total)
|
||||||
|
}
|
||||||
|
|
||||||
log.Infof("%s: hash range is now %s to %s", p.dht.ID().HexShort(), r.Start, r.End)
|
log.Infof("%s: hash range is now %s to %s", p.dht.ID().HexShort(), r.Start, r.End)
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ func (s *Server) Shutdown() {
|
||||||
func (s *Server) Start(address string) error {
|
func (s *Server) Start(address string) error {
|
||||||
//ToDo - We should make this DRY as it is the same code in both servers.
|
//ToDo - We should make this DRY as it is the same code in both servers.
|
||||||
log.Println("reflector listening on " + address)
|
log.Println("reflector listening on " + address)
|
||||||
l, err := net.Listen("tcp", address)
|
l, err := net.Listen("tcp4", address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue