2018-01-29 20:37:26 +01:00
|
|
|
package reflector
|
2017-08-11 00:25:42 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
2018-08-09 20:56:49 +02:00
|
|
|
"crypto/sha512"
|
|
|
|
"encoding/hex"
|
2017-08-11 00:25:42 +02:00
|
|
|
"encoding/json"
|
|
|
|
"io"
|
|
|
|
"net"
|
2018-08-09 20:56:49 +02:00
|
|
|
"time"
|
|
|
|
|
2019-12-29 02:42:03 +01:00
|
|
|
"github.com/lbryio/reflector.go/internal/metrics"
|
2018-08-09 20:56:49 +02:00
|
|
|
"github.com/lbryio/reflector.go/store"
|
2018-01-24 17:45:18 +01:00
|
|
|
|
2019-11-14 01:11:35 +01:00
|
|
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
|
|
|
"github.com/lbryio/lbry.go/v2/extras/stop"
|
|
|
|
"github.com/lbryio/lbry.go/v2/stream"
|
2018-01-24 17:45:18 +01:00
|
|
|
|
|
|
|
log "github.com/sirupsen/logrus"
|
2017-08-11 00:25:42 +02:00
|
|
|
)
|
|
|
|
|
2018-08-09 20:56:49 +02:00
|
|
|
const (
|
|
|
|
// DefaultPort is the port the reflector server listens on if not passed in.
|
|
|
|
DefaultPort = 5566
|
|
|
|
// DefaultTimeout is the default timeout to read or write the next message
|
|
|
|
DefaultTimeout = 5 * time.Second
|
|
|
|
|
|
|
|
network = "tcp4"
|
|
|
|
protocolVersion1 = 0
|
|
|
|
protocolVersion2 = 1
|
2018-08-20 23:50:39 +02:00
|
|
|
maxBlobSize = stream.MaxBlobSize
|
2018-08-09 20:56:49 +02:00
|
|
|
)
|
|
|
|
|
2019-12-29 17:57:43 +01:00
|
|
|
var ErrBlobTooBig = errors.Base("blob must be at most %d bytes", maxBlobSize)
|
|
|
|
|
2018-05-30 03:38:55 +02:00
|
|
|
// Server is and instance of the reflector server. It houses the blob store and listener.
|
2017-08-11 00:25:42 +02:00
|
|
|
type Server struct {
|
2018-08-15 21:50:09 +02:00
|
|
|
Timeout time.Duration // timeout to read or write next message
|
|
|
|
|
2018-09-20 17:24:36 +02:00
|
|
|
EnableBlocklist bool // if true, blocklist checking and blob deletion will be enabled
|
|
|
|
|
2021-01-05 05:09:55 +01:00
|
|
|
underlyingStore store.BlobStore
|
|
|
|
outerStore store.BlobStore
|
|
|
|
grp *stop.Group
|
2017-08-11 00:25:42 +02:00
|
|
|
}
|
|
|
|
|
2018-05-30 03:38:55 +02:00
|
|
|
// NewServer returns an initialized reflector server pointer.
|
2021-01-05 05:09:55 +01:00
|
|
|
func NewServer(underlying store.BlobStore, outer store.BlobStore) *Server {
|
2017-08-11 00:25:42 +02:00
|
|
|
return &Server{
|
2021-01-05 05:09:55 +01:00
|
|
|
Timeout: DefaultTimeout,
|
|
|
|
underlyingStore: underlying,
|
|
|
|
outerStore: outer,
|
|
|
|
grp: stop.New(),
|
2017-08-11 00:25:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-30 03:38:55 +02:00
|
|
|
// Shutdown shuts down the reflector server gracefully.
|
2018-05-29 23:19:40 +02:00
|
|
|
func (s *Server) Shutdown() {
|
2018-08-15 16:55:24 +02:00
|
|
|
log.Println("shutting down reflector server...")
|
2018-06-25 22:49:40 +02:00
|
|
|
s.grp.StopAndWait()
|
2018-08-15 16:55:24 +02:00
|
|
|
log.Println("reflector server stopped")
|
2018-05-29 23:19:40 +02:00
|
|
|
}
|
|
|
|
|
2023-03-09 18:41:41 +01:00
|
|
|
// Start starts the server to handle connections.
|
2018-06-07 05:48:07 +02:00
|
|
|
func (s *Server) Start(address string) error {
|
2018-08-09 20:56:49 +02:00
|
|
|
l, err := net.Listen(network, address)
|
2017-08-11 00:25:42 +02:00
|
|
|
if err != nil {
|
2018-08-09 20:56:49 +02:00
|
|
|
return errors.Err(err)
|
2017-08-11 00:25:42 +02:00
|
|
|
}
|
2018-08-15 16:55:24 +02:00
|
|
|
log.Println("reflector listening on " + address)
|
2018-08-09 20:56:49 +02:00
|
|
|
s.grp.Add(1)
|
2021-05-21 00:01:13 +02:00
|
|
|
metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Inc()
|
2018-08-09 20:56:49 +02:00
|
|
|
go func() {
|
2021-05-21 00:01:13 +02:00
|
|
|
defer metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Dec()
|
2018-08-09 20:56:49 +02:00
|
|
|
<-s.grp.Ch()
|
|
|
|
err := l.Close()
|
|
|
|
if err != nil {
|
|
|
|
log.Error(errors.Prefix("closing listener", err))
|
|
|
|
}
|
|
|
|
s.grp.Done()
|
|
|
|
}()
|
2018-06-07 05:48:07 +02:00
|
|
|
|
2018-06-25 22:49:40 +02:00
|
|
|
s.grp.Add(1)
|
2021-05-21 00:01:13 +02:00
|
|
|
metrics.RoutinesQueue.WithLabelValues("reflector", "start").Inc()
|
2018-06-07 05:48:07 +02:00
|
|
|
go func() {
|
2021-05-21 00:01:13 +02:00
|
|
|
defer metrics.RoutinesQueue.WithLabelValues("reflector", "start").Dec()
|
2018-06-07 05:48:07 +02:00
|
|
|
s.listenAndServe(l)
|
2018-06-25 22:49:40 +02:00
|
|
|
s.grp.Done()
|
2018-06-07 05:48:07 +02:00
|
|
|
}()
|
2017-08-11 00:25:42 +02:00
|
|
|
|
2018-09-20 17:24:36 +02:00
|
|
|
if s.EnableBlocklist {
|
2021-01-05 05:09:55 +01:00
|
|
|
if b, ok := s.underlyingStore.(store.Blocklister); ok {
|
2018-09-20 17:24:36 +02:00
|
|
|
s.grp.Add(1)
|
2021-05-21 00:01:13 +02:00
|
|
|
metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Inc()
|
2018-09-20 17:24:36 +02:00
|
|
|
go func() {
|
2021-05-21 00:01:13 +02:00
|
|
|
defer metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Dec()
|
2018-09-20 17:24:36 +02:00
|
|
|
s.enableBlocklist(b)
|
|
|
|
s.grp.Done()
|
|
|
|
}()
|
|
|
|
} else {
|
|
|
|
//s.Shutdown()
|
|
|
|
return errors.Err("blocklist is enabled but blob store does not support blocklisting")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-07 05:48:07 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) listenAndServe(listener net.Listener) {
|
2017-08-11 00:25:42 +02:00
|
|
|
for {
|
2018-06-07 05:48:07 +02:00
|
|
|
conn, err := listener.Accept()
|
2017-08-11 00:25:42 +02:00
|
|
|
if err != nil {
|
2018-08-09 20:56:49 +02:00
|
|
|
if s.quitting() {
|
2018-06-07 05:48:07 +02:00
|
|
|
return
|
2018-05-29 23:19:40 +02:00
|
|
|
}
|
2018-01-29 20:37:26 +01:00
|
|
|
log.Error(err)
|
|
|
|
} else {
|
2018-06-25 22:49:40 +02:00
|
|
|
s.grp.Add(1)
|
2021-05-21 00:01:13 +02:00
|
|
|
metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Inc()
|
2018-06-15 04:30:37 +02:00
|
|
|
go func() {
|
2021-06-12 00:56:57 +02:00
|
|
|
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Dec()
|
2018-06-15 04:30:37 +02:00
|
|
|
s.handleConn(conn)
|
2018-06-25 22:49:40 +02:00
|
|
|
s.grp.Done()
|
2018-06-15 04:30:37 +02:00
|
|
|
}()
|
2017-08-11 00:25:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) handleConn(conn net.Conn) {
|
2018-08-09 20:56:49 +02:00
|
|
|
// all this stuff is to close the connections correctly when we're shutting down the server
|
|
|
|
connNeedsClosing := make(chan struct{})
|
2018-06-15 04:30:37 +02:00
|
|
|
defer func() {
|
2018-08-09 20:56:49 +02:00
|
|
|
close(connNeedsClosing)
|
|
|
|
}()
|
|
|
|
s.grp.Add(1)
|
2021-05-21 00:01:13 +02:00
|
|
|
metrics.RoutinesQueue.WithLabelValues("reflector", "server-handleconn").Inc()
|
2018-08-09 20:56:49 +02:00
|
|
|
go func() {
|
2021-05-21 00:01:13 +02:00
|
|
|
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-handleconn").Dec()
|
2018-08-09 20:56:49 +02:00
|
|
|
defer s.grp.Done()
|
|
|
|
select {
|
|
|
|
case <-connNeedsClosing:
|
|
|
|
case <-s.grp.Ch():
|
|
|
|
}
|
|
|
|
err := conn.Close()
|
|
|
|
if err != nil {
|
2018-06-15 04:30:37 +02:00
|
|
|
log.Error(errors.Prefix("closing peer conn", err))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2017-08-11 00:25:42 +02:00
|
|
|
err := s.doHandshake(conn)
|
|
|
|
if err != nil {
|
2018-08-15 17:26:36 +02:00
|
|
|
if errors.Is(err, io.EOF) || s.quitting() {
|
2017-08-11 00:25:42 +02:00
|
|
|
return
|
|
|
|
}
|
2018-06-15 04:30:37 +02:00
|
|
|
err := s.doError(conn, err)
|
|
|
|
if err != nil {
|
2018-08-09 20:56:49 +02:00
|
|
|
log.Error(errors.Prefix("sending handshake error", err))
|
2018-05-30 03:38:55 +02:00
|
|
|
}
|
2017-08-11 00:25:42 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
err = s.receiveBlob(conn)
|
|
|
|
if err != nil {
|
2018-08-15 17:26:36 +02:00
|
|
|
if errors.Is(err, io.EOF) || s.quitting() {
|
2018-08-09 20:56:49 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
err := s.doError(conn, err)
|
|
|
|
if err != nil {
|
|
|
|
log.Error(errors.Prefix("sending blob receive error", err))
|
2017-08-11 00:25:42 +02:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-24 17:45:18 +01:00
|
|
|
func (s *Server) doError(conn net.Conn, err error) error {
|
2019-12-29 02:42:03 +01:00
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2019-12-29 17:57:43 +01:00
|
|
|
shouldLog := metrics.TrackError(metrics.DirectionUpload, err)
|
2018-08-29 15:04:09 +02:00
|
|
|
if shouldLog {
|
|
|
|
log.Errorln(errors.FullTrace(err))
|
|
|
|
}
|
2018-01-24 17:45:18 +01:00
|
|
|
if e2, ok := err.(*json.SyntaxError); ok {
|
2018-08-15 16:55:24 +02:00
|
|
|
log.Errorf("syntax error at byte offset %d", e2.Offset)
|
2017-08-11 00:25:42 +02:00
|
|
|
}
|
2018-08-20 17:51:00 +02:00
|
|
|
//resp, err := json.Marshal(errorResponse{Error: err.Error()})
|
|
|
|
//if err != nil {
|
|
|
|
// return err
|
|
|
|
//}
|
|
|
|
//return s.write(conn, resp)
|
|
|
|
return nil
|
2017-08-11 00:25:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) receiveBlob(conn net.Conn) error {
|
2017-08-15 22:02:18 +02:00
|
|
|
blobSize, blobHash, isSdBlob, err := s.readBlobRequest(conn)
|
2017-08-11 00:25:42 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-09-20 17:24:36 +02:00
|
|
|
var wantsBlob bool
|
2021-01-05 05:09:55 +01:00
|
|
|
if bl, ok := s.underlyingStore.(store.Blocklister); ok {
|
2018-09-20 17:24:36 +02:00
|
|
|
wantsBlob, err = bl.Wants(blobHash)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
2021-01-05 05:09:55 +01:00
|
|
|
blobExists, err := s.underlyingStore.Has(blobHash)
|
2018-09-20 17:24:36 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
wantsBlob = !blobExists
|
2017-08-11 00:25:42 +02:00
|
|
|
}
|
|
|
|
|
2018-08-16 02:17:02 +02:00
|
|
|
var neededBlobs []string
|
|
|
|
|
2018-09-20 17:24:36 +02:00
|
|
|
if isSdBlob && !wantsBlob {
|
2021-01-05 05:09:55 +01:00
|
|
|
if nbc, ok := s.underlyingStore.(neededBlobChecker); ok {
|
2018-09-20 17:24:36 +02:00
|
|
|
neededBlobs, err = nbc.MissingBlobsForKnownStream(blobHash)
|
2018-08-16 02:17:02 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
2018-08-20 23:50:39 +02:00
|
|
|
// if we can't check for blobs in a stream, we have to say that the sd blob is
|
|
|
|
// missing. if we say we have the sd blob, they wont try to send any content blobs
|
2018-09-20 17:24:36 +02:00
|
|
|
wantsBlob = true
|
2018-08-16 02:17:02 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-20 17:24:36 +02:00
|
|
|
err = s.sendBlobResponse(conn, wantsBlob, isSdBlob, neededBlobs)
|
2017-08-11 00:25:42 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-09-20 17:24:36 +02:00
|
|
|
if !wantsBlob {
|
2017-08-11 00:41:39 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-08-09 20:56:49 +02:00
|
|
|
blob, err := s.readRawBlob(conn, blobSize)
|
2017-08-11 00:25:42 +02:00
|
|
|
if err != nil {
|
2018-08-20 17:51:00 +02:00
|
|
|
sendErr := s.sendTransferResponse(conn, false, isSdBlob)
|
|
|
|
if sendErr != nil {
|
|
|
|
return sendErr
|
|
|
|
}
|
2018-08-15 20:25:15 +02:00
|
|
|
return errors.Prefix("error reading blob "+blobHash[:8], err)
|
2017-08-11 00:25:42 +02:00
|
|
|
}
|
|
|
|
|
2018-08-15 20:25:15 +02:00
|
|
|
receivedBlobHash := BlobHash(blob)
|
2017-08-11 00:41:39 +02:00
|
|
|
if blobHash != receivedBlobHash {
|
2018-08-20 17:51:00 +02:00
|
|
|
sendErr := s.sendTransferResponse(conn, false, isSdBlob)
|
|
|
|
if sendErr != nil {
|
|
|
|
return sendErr
|
|
|
|
}
|
2018-01-24 17:45:18 +01:00
|
|
|
return errors.Err("hash of received blob data does not match hash from send request")
|
2017-08-15 22:02:18 +02:00
|
|
|
// this can also happen if the blob size is wrong, because the server will read the wrong number of bytes from the stream
|
2017-08-11 00:25:42 +02:00
|
|
|
}
|
2018-02-02 22:49:20 +01:00
|
|
|
|
2018-08-15 21:52:41 +02:00
|
|
|
log.Debugln("Got blob " + blobHash[:8])
|
2017-08-11 00:25:42 +02:00
|
|
|
|
2018-02-02 22:49:20 +01:00
|
|
|
if isSdBlob {
|
2021-01-05 05:09:55 +01:00
|
|
|
err = s.outerStore.PutSD(blobHash, blob)
|
2018-02-02 22:49:20 +01:00
|
|
|
} else {
|
2021-01-05 05:09:55 +01:00
|
|
|
err = s.outerStore.Put(blobHash, blob)
|
2018-02-02 22:49:20 +01:00
|
|
|
}
|
2017-08-11 00:25:42 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-10-15 02:59:12 +02:00
|
|
|
metrics.MtrInBytesReflector.Add(float64(len(blob)))
|
2019-12-29 02:42:03 +01:00
|
|
|
metrics.BlobUploadCount.Inc()
|
2018-08-28 17:18:06 +02:00
|
|
|
if isSdBlob {
|
2019-12-29 02:42:03 +01:00
|
|
|
metrics.SDBlobUploadCount.Inc()
|
2018-08-28 17:18:06 +02:00
|
|
|
}
|
2017-08-15 22:02:18 +02:00
|
|
|
return s.sendTransferResponse(conn, true, isSdBlob)
|
2017-08-11 00:25:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) doHandshake(conn net.Conn) error {
|
|
|
|
var handshake handshakeRequestResponse
|
2018-08-09 20:56:49 +02:00
|
|
|
err := s.read(conn, &handshake)
|
2017-08-11 00:25:42 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-06-05 17:03:55 +02:00
|
|
|
} else if handshake.Version == nil {
|
|
|
|
return errors.Err("handshake is missing protocol version")
|
|
|
|
} else if *handshake.Version != protocolVersion1 && *handshake.Version != protocolVersion2 {
|
2018-01-24 17:45:18 +01:00
|
|
|
return errors.Err("protocol version not supported")
|
2017-08-11 00:25:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := json.Marshal(handshakeRequestResponse{Version: handshake.Version})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-08-09 20:56:49 +02:00
|
|
|
return s.write(conn, resp)
|
2017-08-11 00:25:42 +02:00
|
|
|
}
|
|
|
|
|
2017-08-15 22:02:18 +02:00
|
|
|
func (s *Server) readBlobRequest(conn net.Conn) (int, string, bool, error) {
|
2017-08-11 00:41:39 +02:00
|
|
|
var sendRequest sendBlobRequest
|
2018-08-09 20:56:49 +02:00
|
|
|
err := s.read(conn, &sendRequest)
|
2017-08-11 00:41:39 +02:00
|
|
|
if err != nil {
|
2017-08-15 22:02:18 +02:00
|
|
|
return 0, "", false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var blobHash string
|
|
|
|
var blobSize int
|
|
|
|
isSdBlob := sendRequest.SdBlobHash != ""
|
|
|
|
|
|
|
|
if isSdBlob {
|
|
|
|
blobSize = sendRequest.SdBlobSize
|
|
|
|
blobHash = sendRequest.SdBlobHash
|
|
|
|
} else {
|
|
|
|
blobSize = sendRequest.BlobSize
|
|
|
|
blobHash = sendRequest.BlobHash
|
2017-08-11 00:41:39 +02:00
|
|
|
}
|
2017-08-15 22:02:18 +02:00
|
|
|
|
2018-08-09 20:56:49 +02:00
|
|
|
if blobHash == "" {
|
|
|
|
return blobSize, blobHash, isSdBlob, errors.Err("blob hash is empty")
|
|
|
|
}
|
|
|
|
if blobSize > maxBlobSize {
|
2019-12-29 17:57:43 +01:00
|
|
|
return blobSize, blobHash, isSdBlob, errors.Err(ErrBlobTooBig)
|
2018-08-09 20:56:49 +02:00
|
|
|
}
|
|
|
|
if blobSize == 0 {
|
|
|
|
return blobSize, blobHash, isSdBlob, errors.Err("0-byte blob received")
|
|
|
|
}
|
|
|
|
|
2017-08-15 22:02:18 +02:00
|
|
|
return blobSize, blobHash, isSdBlob, nil
|
2017-08-11 00:41:39 +02:00
|
|
|
}
|
|
|
|
|
2018-09-20 17:24:36 +02:00
|
|
|
func (s *Server) sendBlobResponse(conn net.Conn, shouldSendBlob, isSdBlob bool, neededBlobs []string) error {
|
2017-08-15 22:02:18 +02:00
|
|
|
var response []byte
|
|
|
|
var err error
|
|
|
|
|
|
|
|
if isSdBlob {
|
2018-09-20 17:24:36 +02:00
|
|
|
response, err = json.Marshal(sendSdBlobResponse{SendSdBlob: shouldSendBlob, NeededBlobs: neededBlobs})
|
2017-08-15 22:02:18 +02:00
|
|
|
} else {
|
2018-09-20 17:24:36 +02:00
|
|
|
response, err = json.Marshal(sendBlobResponse{SendBlob: shouldSendBlob})
|
2017-08-15 22:02:18 +02:00
|
|
|
}
|
2017-08-11 00:41:39 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-08-15 22:02:18 +02:00
|
|
|
|
2018-08-09 20:56:49 +02:00
|
|
|
return s.write(conn, response)
|
2017-08-11 00:41:39 +02:00
|
|
|
}
|
|
|
|
|
2017-08-15 22:02:18 +02:00
|
|
|
func (s *Server) sendTransferResponse(conn net.Conn, receivedBlob, isSdBlob bool) error {
|
|
|
|
var response []byte
|
|
|
|
var err error
|
|
|
|
|
|
|
|
if isSdBlob {
|
|
|
|
response, err = json.Marshal(sdBlobTransferResponse{ReceivedSdBlob: receivedBlob})
|
|
|
|
} else {
|
|
|
|
response, err = json.Marshal(blobTransferResponse{ReceivedBlob: receivedBlob})
|
|
|
|
}
|
2017-08-11 00:41:39 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-08-15 22:02:18 +02:00
|
|
|
|
2018-08-09 20:56:49 +02:00
|
|
|
return s.write(conn, response)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) read(conn net.Conn, v interface{}) error {
|
2018-08-15 21:50:09 +02:00
|
|
|
err := conn.SetReadDeadline(time.Now().Add(s.Timeout))
|
2018-08-09 20:56:49 +02:00
|
|
|
if err != nil {
|
|
|
|
return errors.Err(err)
|
|
|
|
}
|
|
|
|
|
2019-05-01 21:39:14 +02:00
|
|
|
dec := json.NewDecoder(conn)
|
|
|
|
err = dec.Decode(v)
|
|
|
|
if err != nil {
|
2023-03-09 18:41:41 +01:00
|
|
|
data, _ := io.ReadAll(dec.Buffered())
|
2019-05-01 21:39:14 +02:00
|
|
|
if len(data) > 0 {
|
|
|
|
return errors.Err("%s. Data: %s", err.Error(), hex.EncodeToString(data))
|
|
|
|
}
|
|
|
|
return errors.Err(err)
|
|
|
|
}
|
|
|
|
return nil
|
2018-08-09 20:56:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) readRawBlob(conn net.Conn, blobSize int) ([]byte, error) {
|
2018-08-15 21:50:09 +02:00
|
|
|
err := conn.SetReadDeadline(time.Now().Add(s.Timeout))
|
2018-08-09 20:56:49 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Err(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
blob := make([]byte, blobSize)
|
|
|
|
_, err = io.ReadFull(bufio.NewReader(conn), blob)
|
|
|
|
return blob, errors.Err(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) write(conn net.Conn, b []byte) error {
|
2018-08-15 21:50:09 +02:00
|
|
|
err := conn.SetWriteDeadline(time.Now().Add(s.Timeout))
|
2018-08-09 20:56:49 +02:00
|
|
|
if err != nil {
|
|
|
|
return errors.Err(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
n, err := conn.Write(b)
|
|
|
|
if err == nil && n != len(b) {
|
|
|
|
err = io.ErrShortWrite
|
|
|
|
}
|
|
|
|
return errors.Err(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) quitting() bool {
|
|
|
|
select {
|
|
|
|
case <-s.grp.Ch():
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-02 18:13:20 +02:00
|
|
|
// BlobHash returns the sha512 hash hex encoded string of the blob byte slice.
|
2018-08-15 20:25:15 +02:00
|
|
|
func BlobHash(blob []byte) string {
|
2018-08-09 20:56:49 +02:00
|
|
|
hashBytes := sha512.Sum384(blob)
|
|
|
|
return hex.EncodeToString(hashBytes[:])
|
|
|
|
}
|
|
|
|
|
2019-07-02 18:13:20 +02:00
|
|
|
func IsValidJSON(b []byte) bool {
|
|
|
|
var r json.RawMessage
|
|
|
|
return json.Unmarshal(b, &r) == nil
|
|
|
|
}
|
|
|
|
|
2018-08-28 21:27:32 +02:00
|
|
|
//type errorResponse struct {
|
|
|
|
// Error string `json:"error"`
|
|
|
|
//}
|
2018-08-09 20:56:49 +02:00
|
|
|
|
|
|
|
type handshakeRequestResponse struct {
|
2019-06-05 17:03:55 +02:00
|
|
|
Version *int `json:"version"`
|
2018-08-09 20:56:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type sendBlobRequest struct {
|
|
|
|
BlobHash string `json:"blob_hash,omitempty"`
|
|
|
|
BlobSize int `json:"blob_size,omitempty"`
|
|
|
|
SdBlobHash string `json:"sd_blob_hash,omitempty"`
|
|
|
|
SdBlobSize int `json:"sd_blob_size,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type sendBlobResponse struct {
|
|
|
|
SendBlob bool `json:"send_blob"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type sendSdBlobResponse struct {
|
|
|
|
SendSdBlob bool `json:"send_sd_blob"`
|
|
|
|
NeededBlobs []string `json:"needed_blobs,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type blobTransferResponse struct {
|
|
|
|
ReceivedBlob bool `json:"received_blob"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type sdBlobTransferResponse struct {
|
|
|
|
ReceivedSdBlob bool `json:"received_sd_blob"`
|
2017-08-11 00:41:39 +02:00
|
|
|
}
|
2018-08-16 02:17:02 +02:00
|
|
|
|
|
|
|
// neededBlobChecker can check which blobs from a known stream are not uploaded yet
|
|
|
|
type neededBlobChecker interface {
|
|
|
|
MissingBlobsForKnownStream(string) ([]string, error)
|
|
|
|
}
|