28a299efa8
* Initial rocksdb commit Basic reading from rocksdb works * Try github action thing * try local dockerfile * asdf * qwer * asdf * Try adding test db with git-lfs * update action * cleanup * Don't hardcode stop on read * Progress of reading rocksdb * fixes and arg test * asdf * Fix rocksdb iterator and tests * update script * asdf * Better iterator. Need to implement a lot of keys next, and tests, maybe tests needed. * asdf * asdf * asdf * Implementation, testing, and cleanup. Implemented more prefixes. Figured out a good test that should work for all prefixes. Removed binary databases so we can just store human readable csv files. * more tests, prefixes and small refactor * Another prefix * EffectiveAmount * ActiveAmount * ActivatedClaimAndSupport * PendingActivation * ClaimTakeover * ClaimExpiration * SupportToClaim * ClaimToSupport * Fix bug with variable length keys * ChannelToClaim * ClaimToChannel * ClaimShortID * TXOToClaim * ClaimToTXO * BlockHeader * BlockHash * Undo * HashXHistory * Tx and big refactor * rest the the keys * Refactor and starting to add resolve * asdf * Refactor tests and add column families * changes * more work on implementing resolve * code cleanup, function tests * small code refactoring * start building pieces of the test data set for full resolve. * Export constant, add test * another test * TestGetTxHash * more tests * more tests * More tests * Refactor db functions into three files * added slice backed stack, need to fix tests * fix some issues with test suite * some cleanup and adding arguments and db load / refresh to server command * fix some bugs, start using logrus for leveled logging, upgrade to go 1.17, run go mod tidy * logrus, protobuf updates, resolve grpc endpoint * don't run integration test with unit tests * signal handling and cleanup functions * signal handling code files * Unit tests for db stack * reorganize bisect function so we lock it properly * fix txcounts loading * cleanup some logic around iterators and fix a bug where I was running two detect changes threads * add some metrics * cleanup * blocking and filtering implemented * add params for blocking and filtering channels and streams * updates and fixes for integration tests * use newer version of lbry.go when possible * Add height endpoint and move string functions internal * remove gitattributes, unused * some cleanup * more cleanup / refactor. almost ready for another review * More cleanup * use chainhash.Hash types from lbcd where appropriate * update github action to go-1.17.8 * update go version needed * trying to fix these builds * cleanup * trying to fix memory leak * fix memory leak (iterator never finished so cleanup didn't run) * changes per code review * remove lbry.go v2 * rename sort.go search.go * fix test
97 lines
2 KiB
Go
97 lines
2 KiB
Go
package server
|
|
|
|
import (
|
|
"encoding/binary"
|
|
"net"
|
|
|
|
"github.com/lbryio/hub/internal"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
const NotifierResponseLength = 40
|
|
|
|
// AddHeightSub adds a new height subscriber
|
|
func (s *Server) AddHeightSub(addr net.Addr, conn net.Conn) {
|
|
s.HeightSubsMut.Lock()
|
|
defer s.HeightSubsMut.Unlock()
|
|
s.HeightSubs[addr] = conn
|
|
}
|
|
|
|
// DoNotify sends a notification to all height subscribers
|
|
func (s *Server) DoNotify(heightHash *internal.HeightHash) error {
|
|
buff := make([]byte, NotifierResponseLength)
|
|
toDelete := make([]net.Addr, 0)
|
|
|
|
s.HeightSubsMut.RLock()
|
|
for addr, conn := range s.HeightSubs {
|
|
// struct.pack(b'>Q32s', height, block_hash)
|
|
binary.BigEndian.PutUint64(buff, heightHash.Height)
|
|
copy(buff[8:], heightHash.BlockHash[:32])
|
|
logrus.Tracef("notifying %s", addr)
|
|
n, err := conn.Write(buff)
|
|
if err != nil {
|
|
logrus.Warn(err)
|
|
toDelete = append(toDelete, addr)
|
|
}
|
|
if n != NotifierResponseLength {
|
|
logrus.Warn("not all bytes written")
|
|
}
|
|
}
|
|
s.HeightSubsMut.RUnlock()
|
|
|
|
if len(toDelete) > 0 {
|
|
s.HeightSubsMut.Lock()
|
|
for _, v := range toDelete {
|
|
delete(s.HeightSubs, v)
|
|
}
|
|
s.HeightSubsMut.Unlock()
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// RunNotifier Runs the notfying action forever
|
|
func (s *Server) RunNotifier() error {
|
|
for heightHash := range s.NotifierChan {
|
|
s.DoNotify(heightHash)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// NotifierServer implements the TCP protocol for height/blockheader notifications
|
|
func (s *Server) NotifierServer() error {
|
|
address := ":" + s.Args.NotifierPort
|
|
addr, err := net.ResolveTCPAddr("tcp", address)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
listen, err := net.ListenTCP("tcp", addr)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
defer listen.Close()
|
|
|
|
for {
|
|
|
|
logrus.Info("Waiting for connection")
|
|
conn, err := listen.Accept()
|
|
if err != nil {
|
|
logrus.Warn(err)
|
|
continue
|
|
}
|
|
|
|
addr := conn.RemoteAddr()
|
|
|
|
logrus.Println(addr)
|
|
|
|
// _, err = conn.Write([]byte(addr.String()))
|
|
// if err != nil {
|
|
// logrus.Warn(err)
|
|
// continue
|
|
// }
|
|
|
|
go s.AddHeightSub(addr, conn)
|
|
}
|
|
}
|