reflector.go/reflector/server.go

459 lines
11 KiB
Go
Raw Normal View History

2018-01-29 20:37:26 +01:00
package reflector
import (
"bufio"
"crypto/sha512"
"encoding/hex"
"encoding/json"
"io"
"io/ioutil"
"net"
"time"
2019-12-29 02:42:03 +01:00
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/store"
2019-11-14 01:11:35 +01:00
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/v2/stream"
log "github.com/sirupsen/logrus"
)
const (
// DefaultPort is the port the reflector server listens on if not passed in.
DefaultPort = 5566
// DefaultTimeout is the default timeout to read or write the next message
DefaultTimeout = 5 * time.Second
network = "tcp4"
protocolVersion1 = 0
protocolVersion2 = 1
maxBlobSize = stream.MaxBlobSize
)
2019-12-29 17:57:43 +01:00
var ErrBlobTooBig = errors.Base("blob must be at most %d bytes", maxBlobSize)
// Server is and instance of the reflector server. It houses the blob store and listener.
type Server struct {
2018-08-15 21:50:09 +02:00
Timeout time.Duration // timeout to read or write next message
2018-09-20 17:24:36 +02:00
EnableBlocklist bool // if true, blocklist checking and blob deletion will be enabled
underlyingStore store.BlobStore
outerStore store.BlobStore
grp *stop.Group
}
// NewServer returns an initialized reflector server pointer.
func NewServer(underlying store.BlobStore, outer store.BlobStore) *Server {
return &Server{
Timeout: DefaultTimeout,
underlyingStore: underlying,
outerStore: outer,
grp: stop.New(),
}
}
// Shutdown shuts down the reflector server gracefully.
func (s *Server) Shutdown() {
2018-08-15 16:55:24 +02:00
log.Println("shutting down reflector server...")
s.grp.StopAndWait()
2018-08-15 16:55:24 +02:00
log.Println("reflector server stopped")
}
//Start starts the server to handle connections.
func (s *Server) Start(address string) error {
l, err := net.Listen(network, address)
if err != nil {
return errors.Err(err)
}
2018-08-15 16:55:24 +02:00
log.Println("reflector listening on " + address)
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Dec()
<-s.grp.Ch()
err := l.Close()
if err != nil {
log.Error(errors.Prefix("closing listener", err))
}
s.grp.Done()
}()
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "start").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "start").Dec()
s.listenAndServe(l)
s.grp.Done()
}()
2018-09-20 17:24:36 +02:00
if s.EnableBlocklist {
if b, ok := s.underlyingStore.(store.Blocklister); ok {
2018-09-20 17:24:36 +02:00
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Inc()
2018-09-20 17:24:36 +02:00
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Dec()
2018-09-20 17:24:36 +02:00
s.enableBlocklist(b)
s.grp.Done()
}()
} else {
//s.Shutdown()
return errors.Err("blocklist is enabled but blob store does not support blocklisting")
}
}
return nil
}
func (s *Server) listenAndServe(listener net.Listener) {
for {
conn, err := listener.Accept()
if err != nil {
if s.quitting() {
return
}
2018-01-29 20:37:26 +01:00
log.Error(err)
} else {
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Inc()
go func() {
2021-06-12 00:56:57 +02:00
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Dec()
s.handleConn(conn)
s.grp.Done()
}()
}
}
}
func (s *Server) handleConn(conn net.Conn) {
// all this stuff is to close the connections correctly when we're shutting down the server
connNeedsClosing := make(chan struct{})
defer func() {
close(connNeedsClosing)
}()
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "server-handleconn").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-handleconn").Dec()
defer s.grp.Done()
select {
case <-connNeedsClosing:
case <-s.grp.Ch():
}
err := conn.Close()
if err != nil {
log.Error(errors.Prefix("closing peer conn", err))
}
}()
err := s.doHandshake(conn)
if err != nil {
2018-08-15 17:26:36 +02:00
if errors.Is(err, io.EOF) || s.quitting() {
return
}
err := s.doError(conn, err)
if err != nil {
log.Error(errors.Prefix("sending handshake error", err))
}
return
}
for {
err = s.receiveBlob(conn)
if err != nil {
2018-08-15 17:26:36 +02:00
if errors.Is(err, io.EOF) || s.quitting() {
return
}
err := s.doError(conn, err)
if err != nil {
log.Error(errors.Prefix("sending blob receive error", err))
}
return
}
}
}
func (s *Server) doError(conn net.Conn, err error) error {
2019-12-29 02:42:03 +01:00
if err == nil {
return nil
}
2019-12-29 17:57:43 +01:00
shouldLog := metrics.TrackError(metrics.DirectionUpload, err)
if shouldLog {
log.Errorln(errors.FullTrace(err))
}
if e2, ok := err.(*json.SyntaxError); ok {
2018-08-15 16:55:24 +02:00
log.Errorf("syntax error at byte offset %d", e2.Offset)
}
//resp, err := json.Marshal(errorResponse{Error: err.Error()})
//if err != nil {
// return err
//}
//return s.write(conn, resp)
return nil
}
func (s *Server) receiveBlob(conn net.Conn) error {
2017-08-15 22:02:18 +02:00
blobSize, blobHash, isSdBlob, err := s.readBlobRequest(conn)
if err != nil {
return err
}
2018-09-20 17:24:36 +02:00
var wantsBlob bool
if bl, ok := s.underlyingStore.(store.Blocklister); ok {
2018-09-20 17:24:36 +02:00
wantsBlob, err = bl.Wants(blobHash)
if err != nil {
return err
}
} else {
blobExists, err := s.underlyingStore.Has(blobHash)
2018-09-20 17:24:36 +02:00
if err != nil {
return err
}
wantsBlob = !blobExists
}
2018-08-16 02:17:02 +02:00
var neededBlobs []string
2018-09-20 17:24:36 +02:00
if isSdBlob && !wantsBlob {
if nbc, ok := s.underlyingStore.(neededBlobChecker); ok {
2018-09-20 17:24:36 +02:00
neededBlobs, err = nbc.MissingBlobsForKnownStream(blobHash)
2018-08-16 02:17:02 +02:00
if err != nil {
return err
}
} else {
// if we can't check for blobs in a stream, we have to say that the sd blob is
// missing. if we say we have the sd blob, they wont try to send any content blobs
2018-09-20 17:24:36 +02:00
wantsBlob = true
2018-08-16 02:17:02 +02:00
}
}
2018-09-20 17:24:36 +02:00
err = s.sendBlobResponse(conn, wantsBlob, isSdBlob, neededBlobs)
if err != nil {
return err
}
2018-09-20 17:24:36 +02:00
if !wantsBlob {
return nil
}
blob, err := s.readRawBlob(conn, blobSize)
if err != nil {
sendErr := s.sendTransferResponse(conn, false, isSdBlob)
if sendErr != nil {
return sendErr
}
2018-08-15 20:25:15 +02:00
return errors.Prefix("error reading blob "+blobHash[:8], err)
}
2018-08-15 20:25:15 +02:00
receivedBlobHash := BlobHash(blob)
if blobHash != receivedBlobHash {
sendErr := s.sendTransferResponse(conn, false, isSdBlob)
if sendErr != nil {
return sendErr
}
return errors.Err("hash of received blob data does not match hash from send request")
2017-08-15 22:02:18 +02:00
// this can also happen if the blob size is wrong, because the server will read the wrong number of bytes from the stream
}
2018-08-15 21:52:41 +02:00
log.Debugln("Got blob " + blobHash[:8])
if isSdBlob {
err = s.outerStore.PutSD(blobHash, blob)
} else {
err = s.outerStore.Put(blobHash, blob)
}
if err != nil {
return err
}
metrics.MtrInBytesReflector.Add(float64(len(blob)))
2019-12-29 02:42:03 +01:00
metrics.BlobUploadCount.Inc()
2018-08-28 17:18:06 +02:00
if isSdBlob {
2019-12-29 02:42:03 +01:00
metrics.SDBlobUploadCount.Inc()
2018-08-28 17:18:06 +02:00
}
2017-08-15 22:02:18 +02:00
return s.sendTransferResponse(conn, true, isSdBlob)
}
func (s *Server) doHandshake(conn net.Conn) error {
var handshake handshakeRequestResponse
err := s.read(conn, &handshake)
if err != nil {
return err
2019-06-05 17:03:55 +02:00
} else if handshake.Version == nil {
return errors.Err("handshake is missing protocol version")
} else if *handshake.Version != protocolVersion1 && *handshake.Version != protocolVersion2 {
return errors.Err("protocol version not supported")
}
resp, err := json.Marshal(handshakeRequestResponse{Version: handshake.Version})
if err != nil {
return err
}
return s.write(conn, resp)
}
2017-08-15 22:02:18 +02:00
func (s *Server) readBlobRequest(conn net.Conn) (int, string, bool, error) {
var sendRequest sendBlobRequest
err := s.read(conn, &sendRequest)
if err != nil {
2017-08-15 22:02:18 +02:00
return 0, "", false, err
}
var blobHash string
var blobSize int
isSdBlob := sendRequest.SdBlobHash != ""
if isSdBlob {
blobSize = sendRequest.SdBlobSize
blobHash = sendRequest.SdBlobHash
} else {
blobSize = sendRequest.BlobSize
blobHash = sendRequest.BlobHash
}
2017-08-15 22:02:18 +02:00
if blobHash == "" {
return blobSize, blobHash, isSdBlob, errors.Err("blob hash is empty")
}
if blobSize > maxBlobSize {
2019-12-29 17:57:43 +01:00
return blobSize, blobHash, isSdBlob, errors.Err(ErrBlobTooBig)
}
if blobSize == 0 {
return blobSize, blobHash, isSdBlob, errors.Err("0-byte blob received")
}
2017-08-15 22:02:18 +02:00
return blobSize, blobHash, isSdBlob, nil
}
2018-09-20 17:24:36 +02:00
func (s *Server) sendBlobResponse(conn net.Conn, shouldSendBlob, isSdBlob bool, neededBlobs []string) error {
2017-08-15 22:02:18 +02:00
var response []byte
var err error
if isSdBlob {
2018-09-20 17:24:36 +02:00
response, err = json.Marshal(sendSdBlobResponse{SendSdBlob: shouldSendBlob, NeededBlobs: neededBlobs})
2017-08-15 22:02:18 +02:00
} else {
2018-09-20 17:24:36 +02:00
response, err = json.Marshal(sendBlobResponse{SendBlob: shouldSendBlob})
2017-08-15 22:02:18 +02:00
}
if err != nil {
return err
}
2017-08-15 22:02:18 +02:00
return s.write(conn, response)
}
2017-08-15 22:02:18 +02:00
func (s *Server) sendTransferResponse(conn net.Conn, receivedBlob, isSdBlob bool) error {
var response []byte
var err error
if isSdBlob {
response, err = json.Marshal(sdBlobTransferResponse{ReceivedSdBlob: receivedBlob})
} else {
response, err = json.Marshal(blobTransferResponse{ReceivedBlob: receivedBlob})
}
if err != nil {
return err
}
2017-08-15 22:02:18 +02:00
return s.write(conn, response)
}
func (s *Server) read(conn net.Conn, v interface{}) error {
2018-08-15 21:50:09 +02:00
err := conn.SetReadDeadline(time.Now().Add(s.Timeout))
if err != nil {
return errors.Err(err)
}
dec := json.NewDecoder(conn)
err = dec.Decode(v)
if err != nil {
data, _ := ioutil.ReadAll(dec.Buffered())
if len(data) > 0 {
return errors.Err("%s. Data: %s", err.Error(), hex.EncodeToString(data))
}
return errors.Err(err)
}
return nil
}
func (s *Server) readRawBlob(conn net.Conn, blobSize int) ([]byte, error) {
2018-08-15 21:50:09 +02:00
err := conn.SetReadDeadline(time.Now().Add(s.Timeout))
if err != nil {
return nil, errors.Err(err)
}
blob := make([]byte, blobSize)
_, err = io.ReadFull(bufio.NewReader(conn), blob)
return blob, errors.Err(err)
}
func (s *Server) write(conn net.Conn, b []byte) error {
2018-08-15 21:50:09 +02:00
err := conn.SetWriteDeadline(time.Now().Add(s.Timeout))
if err != nil {
return errors.Err(err)
}
n, err := conn.Write(b)
if err == nil && n != len(b) {
err = io.ErrShortWrite
}
return errors.Err(err)
}
func (s *Server) quitting() bool {
select {
case <-s.grp.Ch():
return true
default:
return false
}
}
// BlobHash returns the sha512 hash hex encoded string of the blob byte slice.
2018-08-15 20:25:15 +02:00
func BlobHash(blob []byte) string {
hashBytes := sha512.Sum384(blob)
return hex.EncodeToString(hashBytes[:])
}
func IsValidJSON(b []byte) bool {
var r json.RawMessage
return json.Unmarshal(b, &r) == nil
}
2018-08-28 21:27:32 +02:00
//type errorResponse struct {
// Error string `json:"error"`
//}
type handshakeRequestResponse struct {
2019-06-05 17:03:55 +02:00
Version *int `json:"version"`
}
type sendBlobRequest struct {
BlobHash string `json:"blob_hash,omitempty"`
BlobSize int `json:"blob_size,omitempty"`
SdBlobHash string `json:"sd_blob_hash,omitempty"`
SdBlobSize int `json:"sd_blob_size,omitempty"`
}
type sendBlobResponse struct {
SendBlob bool `json:"send_blob"`
}
type sendSdBlobResponse struct {
SendSdBlob bool `json:"send_sd_blob"`
NeededBlobs []string `json:"needed_blobs,omitempty"`
}
type blobTransferResponse struct {
ReceivedBlob bool `json:"received_blob"`
}
type sdBlobTransferResponse struct {
ReceivedSdBlob bool `json:"received_sd_blob"`
}
2018-08-16 02:17:02 +02:00
// neededBlobChecker can check which blobs from a known stream are not uploaded yet
type neededBlobChecker interface {
MissingBlobsForKnownStream(string) ([]string, error)
}