2018-01-29 20:37:26 +01:00
|
|
|
package peer
|
|
|
|
|
|
|
|
import (
|
2018-01-31 02:15:21 +01:00
|
|
|
"bufio"
|
2018-01-29 20:37:26 +01:00
|
|
|
"encoding/json"
|
|
|
|
"io"
|
|
|
|
"net"
|
2019-03-18 21:29:28 +01:00
|
|
|
"strings"
|
2018-01-31 02:15:21 +01:00
|
|
|
"time"
|
2018-01-29 20:37:26 +01:00
|
|
|
|
2019-09-10 23:18:44 +02:00
|
|
|
"github.com/lbryio/lbry.go/stream"
|
|
|
|
|
2019-02-08 20:56:41 +01:00
|
|
|
"github.com/lbryio/reflector.go/reflector"
|
|
|
|
"github.com/lbryio/reflector.go/store"
|
|
|
|
|
2019-01-09 23:52:30 +01:00
|
|
|
"github.com/lbryio/lbry.go/extras/errors"
|
|
|
|
"github.com/lbryio/lbry.go/extras/stop"
|
2018-01-29 20:37:26 +01:00
|
|
|
|
|
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2018-05-30 03:38:55 +02:00
|
|
|
// DefaultPort is the port the peer server listens on if not passed in.
|
|
|
|
DefaultPort = 3333
|
|
|
|
// LbrycrdAddress to be used when paying for data. Not implemented yet.
|
2018-02-07 21:21:20 +01:00
|
|
|
LbrycrdAddress = "bJxKvpD96kaJLriqVajZ7SaQTsWWyrGQct"
|
2018-01-29 20:37:26 +01:00
|
|
|
)
|
|
|
|
|
2018-05-30 03:38:55 +02:00
|
|
|
// Server is an instance of a peer server that houses the listener and store.
|
2018-01-29 20:37:26 +01:00
|
|
|
type Server struct {
|
2019-02-08 20:56:41 +01:00
|
|
|
StatLogger *log.Logger // logger to log stats
|
|
|
|
StatReportFrequency time.Duration // how often to log stats
|
|
|
|
|
2018-05-29 23:19:40 +02:00
|
|
|
store store.BlobStore
|
|
|
|
closed bool
|
2018-06-07 05:48:07 +02:00
|
|
|
|
2019-02-08 20:56:41 +01:00
|
|
|
grp *stop.Group
|
|
|
|
stats *reflector.Stats
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
|
|
|
|
2018-05-30 03:38:55 +02:00
|
|
|
// NewServer returns an initialized Server pointer.
|
2018-01-29 20:37:26 +01:00
|
|
|
func NewServer(store store.BlobStore) *Server {
|
|
|
|
return &Server{
|
|
|
|
store: store,
|
2018-06-25 22:49:40 +02:00
|
|
|
grp: stop.New(),
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-30 03:38:55 +02:00
|
|
|
// Shutdown gracefully shuts down the peer server.
|
2018-05-29 23:19:40 +02:00
|
|
|
func (s *Server) Shutdown() {
|
2018-06-07 05:48:07 +02:00
|
|
|
log.Debug("shutting down peer server...")
|
2019-02-08 20:56:41 +01:00
|
|
|
s.stats.Shutdown()
|
2018-06-25 22:49:40 +02:00
|
|
|
s.grp.StopAndWait()
|
2018-06-15 04:30:37 +02:00
|
|
|
log.Debug("peer server stopped")
|
2018-05-29 23:19:40 +02:00
|
|
|
}
|
|
|
|
|
2018-06-07 05:48:07 +02:00
|
|
|
// Start starts the server listener to handle connections.
|
|
|
|
func (s *Server) Start(address string) error {
|
|
|
|
log.Println("peer listening on " + address)
|
2018-06-21 17:26:48 +02:00
|
|
|
l, err := net.Listen("tcp4", address)
|
2018-01-29 20:37:26 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-06-07 05:48:07 +02:00
|
|
|
go s.listenForShutdown(l)
|
2018-06-25 22:49:40 +02:00
|
|
|
s.grp.Add(1)
|
2018-06-07 05:48:07 +02:00
|
|
|
go func() {
|
|
|
|
s.listenAndServe(l)
|
2018-06-25 22:49:40 +02:00
|
|
|
s.grp.Done()
|
2018-06-07 05:48:07 +02:00
|
|
|
}()
|
|
|
|
|
2019-02-08 20:56:41 +01:00
|
|
|
s.stats = reflector.NewStatLogger("DOWNLOAD", s.StatLogger, s.StatReportFrequency, s.grp.Child())
|
|
|
|
if s.StatLogger != nil && s.StatReportFrequency > 0 {
|
|
|
|
s.stats.Start()
|
|
|
|
}
|
|
|
|
|
2018-06-07 05:48:07 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) listenForShutdown(listener net.Listener) {
|
2018-06-25 22:49:40 +02:00
|
|
|
<-s.grp.Ch()
|
2018-06-07 05:48:07 +02:00
|
|
|
s.closed = true
|
2018-06-15 04:30:37 +02:00
|
|
|
err := listener.Close()
|
|
|
|
if err != nil {
|
2018-06-07 05:48:07 +02:00
|
|
|
log.Error("error closing listener for peer server - ", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) listenAndServe(listener net.Listener) {
|
2018-01-29 20:37:26 +01:00
|
|
|
for {
|
2018-06-07 05:48:07 +02:00
|
|
|
conn, err := listener.Accept()
|
2018-01-29 20:37:26 +01:00
|
|
|
if err != nil {
|
2018-05-29 23:19:40 +02:00
|
|
|
if s.closed {
|
2018-06-07 05:48:07 +02:00
|
|
|
return
|
2018-05-29 23:19:40 +02:00
|
|
|
}
|
2019-02-08 23:28:09 +01:00
|
|
|
log.Error(errors.Prefix("accepting conn", err))
|
2018-01-29 20:37:26 +01:00
|
|
|
} else {
|
2018-06-25 22:49:40 +02:00
|
|
|
s.grp.Add(1)
|
2018-06-15 04:30:37 +02:00
|
|
|
go func() {
|
|
|
|
s.handleConnection(conn)
|
2018-06-25 22:49:40 +02:00
|
|
|
s.grp.Done()
|
2018-06-15 04:30:37 +02:00
|
|
|
}()
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-31 02:15:21 +01:00
|
|
|
func (s *Server) handleConnection(conn net.Conn) {
|
2018-06-15 04:30:37 +02:00
|
|
|
defer func() {
|
|
|
|
if err := conn.Close(); err != nil {
|
|
|
|
log.Error(errors.Prefix("closing peer conn", err))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2019-03-22 04:22:31 +01:00
|
|
|
timeoutDuration := 1 * time.Minute
|
2019-09-11 18:30:01 +02:00
|
|
|
buf := bufio.NewReader(conn)
|
2018-01-29 20:37:26 +01:00
|
|
|
|
|
|
|
for {
|
2018-01-31 02:15:21 +01:00
|
|
|
var request []byte
|
|
|
|
var response []byte
|
|
|
|
|
2018-08-07 17:38:55 +02:00
|
|
|
err := conn.SetReadDeadline(time.Now().Add(timeoutDuration))
|
2018-06-15 04:30:37 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Error(errors.FullTrace(err))
|
2018-05-30 03:38:55 +02:00
|
|
|
}
|
2018-06-15 04:30:37 +02:00
|
|
|
|
2019-09-11 18:30:01 +02:00
|
|
|
request, err = readNextMessage(buf)
|
2018-01-29 20:37:26 +01:00
|
|
|
if err != nil {
|
|
|
|
if err != io.EOF {
|
2019-01-25 22:49:45 +01:00
|
|
|
s.logError(err)
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2018-06-15 04:30:37 +02:00
|
|
|
|
|
|
|
err = conn.SetReadDeadline(time.Time{})
|
|
|
|
if err != nil {
|
|
|
|
log.Error(errors.FullTrace(err))
|
2018-05-30 03:38:55 +02:00
|
|
|
}
|
2018-01-31 02:15:21 +01:00
|
|
|
|
2019-01-15 20:23:08 +01:00
|
|
|
response, err = s.handleCompositeRequest(request)
|
2018-01-31 02:15:21 +01:00
|
|
|
if err != nil {
|
|
|
|
log.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-01-25 22:49:45 +01:00
|
|
|
err = conn.SetWriteDeadline(time.Now().Add(timeoutDuration))
|
|
|
|
if err != nil {
|
|
|
|
log.Error(errors.FullTrace(err))
|
|
|
|
}
|
|
|
|
|
2018-01-31 02:15:21 +01:00
|
|
|
n, err := conn.Write(response)
|
|
|
|
if err != nil {
|
2019-03-18 21:29:28 +01:00
|
|
|
if !strings.Contains(err.Error(), "connection reset by peer") { // means the other side closed the connection using TCP reset
|
|
|
|
s.logError(err)
|
|
|
|
}
|
2018-01-31 02:15:21 +01:00
|
|
|
return
|
|
|
|
} else if n != len(response) {
|
|
|
|
log.Errorln(io.ErrShortWrite)
|
|
|
|
return
|
|
|
|
}
|
2019-01-25 22:49:45 +01:00
|
|
|
|
|
|
|
err = conn.SetWriteDeadline(time.Time{})
|
|
|
|
if err != nil {
|
|
|
|
log.Error(errors.FullTrace(err))
|
|
|
|
}
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-31 02:15:21 +01:00
|
|
|
func (s *Server) handleAvailabilityRequest(data []byte) ([]byte, error) {
|
2018-01-29 20:37:26 +01:00
|
|
|
var request availabilityRequest
|
2018-01-31 02:15:21 +01:00
|
|
|
err := json.Unmarshal(data, &request)
|
2018-01-29 20:37:26 +01:00
|
|
|
if err != nil {
|
2018-01-31 02:15:21 +01:00
|
|
|
return []byte{}, err
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
availableBlobs := []string{}
|
|
|
|
for _, blobHash := range request.RequestedBlobs {
|
|
|
|
exists, err := s.store.Has(blobHash)
|
|
|
|
if err != nil {
|
2018-01-31 02:15:21 +01:00
|
|
|
return []byte{}, err
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
|
|
|
if exists {
|
|
|
|
availableBlobs = append(availableBlobs, blobHash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-07 21:21:20 +01:00
|
|
|
return json.Marshal(availabilityResponse{LbrycrdAddress: LbrycrdAddress, AvailableBlobs: availableBlobs})
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
|
|
|
|
2018-01-31 02:15:21 +01:00
|
|
|
func (s *Server) handlePaymentRateNegotiation(data []byte) ([]byte, error) {
|
2018-01-29 20:37:26 +01:00
|
|
|
var request paymentRateRequest
|
2018-01-31 02:15:21 +01:00
|
|
|
err := json.Unmarshal(data, &request)
|
2018-01-29 20:37:26 +01:00
|
|
|
if err != nil {
|
2018-01-31 02:15:21 +01:00
|
|
|
return []byte{}, err
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
offerReply := paymentRateAccepted
|
|
|
|
if request.BlobDataPaymentRate < 0 {
|
|
|
|
offerReply = paymentRateTooLow
|
|
|
|
}
|
|
|
|
|
2018-01-31 02:15:21 +01:00
|
|
|
return json.Marshal(paymentRateResponse{BlobDataPaymentRate: offerReply})
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
|
|
|
|
2018-01-31 02:15:21 +01:00
|
|
|
func (s *Server) handleBlobRequest(data []byte) ([]byte, error) {
|
2018-01-29 20:37:26 +01:00
|
|
|
var request blobRequest
|
2018-01-31 02:15:21 +01:00
|
|
|
err := json.Unmarshal(data, &request)
|
2018-01-29 20:37:26 +01:00
|
|
|
if err != nil {
|
2018-01-31 02:15:21 +01:00
|
|
|
return []byte{}, err
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
|
|
|
|
2019-01-17 22:49:19 +01:00
|
|
|
log.Debugln("Sending blob " + request.RequestedBlob[:8])
|
2018-01-29 20:37:26 +01:00
|
|
|
|
|
|
|
blob, err := s.store.Get(request.RequestedBlob)
|
|
|
|
if err != nil {
|
2018-01-31 02:15:21 +01:00
|
|
|
return []byte{}, err
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
response, err := json.Marshal(blobResponse{IncomingBlob: incomingBlob{
|
2019-07-02 18:13:20 +02:00
|
|
|
BlobHash: reflector.BlobHash(blob),
|
2018-01-29 20:37:26 +01:00
|
|
|
Length: len(blob),
|
|
|
|
}})
|
|
|
|
if err != nil {
|
2018-01-31 02:15:21 +01:00
|
|
|
return []byte{}, err
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
|
|
|
|
2018-01-31 02:15:21 +01:00
|
|
|
return append(response, blob...), nil
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
|
|
|
|
2019-01-15 20:23:08 +01:00
|
|
|
func (s *Server) handleCompositeRequest(data []byte) ([]byte, error) {
|
|
|
|
var request compositeRequest
|
|
|
|
err := json.Unmarshal(data, &request)
|
|
|
|
if err != nil {
|
|
|
|
return []byte{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
response := compositeResponse{
|
|
|
|
LbrycrdAddress: LbrycrdAddress,
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(request.RequestedBlobs) > 0 {
|
|
|
|
var availableBlobs []string
|
|
|
|
for _, blobHash := range request.RequestedBlobs {
|
|
|
|
exists, err := s.store.Has(blobHash)
|
|
|
|
if err != nil {
|
|
|
|
return []byte{}, err
|
|
|
|
}
|
|
|
|
if exists {
|
|
|
|
availableBlobs = append(availableBlobs, blobHash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
response.AvailableBlobs = availableBlobs
|
|
|
|
}
|
|
|
|
|
|
|
|
response.BlobDataPaymentRate = paymentRateAccepted
|
|
|
|
if request.BlobDataPaymentRate < 0 {
|
|
|
|
response.BlobDataPaymentRate = paymentRateTooLow
|
|
|
|
}
|
|
|
|
|
|
|
|
var blob []byte
|
|
|
|
if request.RequestedBlob != "" {
|
2019-09-10 23:18:44 +02:00
|
|
|
if len(request.RequestedBlob) != stream.BlobHashHexLength {
|
|
|
|
return nil, errors.Err("Invalid blob hash length")
|
|
|
|
}
|
|
|
|
|
2019-01-17 22:49:19 +01:00
|
|
|
log.Debugln("Sending blob " + request.RequestedBlob[:8])
|
2019-01-15 20:23:08 +01:00
|
|
|
|
|
|
|
blob, err = s.store.Get(request.RequestedBlob)
|
|
|
|
if errors.Is(err, store.ErrBlobNotFound) {
|
|
|
|
response.IncomingBlob = incomingBlob{
|
|
|
|
Error: err.Error(),
|
|
|
|
}
|
|
|
|
} else if err != nil {
|
|
|
|
return []byte{}, err
|
|
|
|
} else {
|
|
|
|
response.IncomingBlob = incomingBlob{
|
2019-07-02 18:13:20 +02:00
|
|
|
BlobHash: reflector.BlobHash(blob),
|
2019-01-15 20:23:08 +01:00
|
|
|
Length: len(blob),
|
|
|
|
}
|
2019-05-01 21:42:23 +02:00
|
|
|
s.stats.AddBlob()
|
2019-01-15 20:23:08 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
respData, err := json.Marshal(response)
|
|
|
|
if err != nil {
|
|
|
|
return []byte{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return append(respData, blob...), nil
|
|
|
|
}
|
|
|
|
|
2019-01-25 22:49:45 +01:00
|
|
|
func (s *Server) logError(e error) {
|
|
|
|
if e == nil {
|
|
|
|
return
|
|
|
|
}
|
2019-02-08 20:56:41 +01:00
|
|
|
shouldLog := s.stats.AddError(e)
|
|
|
|
if shouldLog {
|
|
|
|
log.Errorln(errors.FullTrace(e))
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
// old stuff below. its here for posterity, because we're gonna have to deal with these errors someday for real
|
2019-01-25 22:49:45 +01:00
|
|
|
|
2019-02-08 23:29:31 +01:00
|
|
|
//err := errors.Wrap(e, 0)
|
2019-01-25 22:49:45 +01:00
|
|
|
|
|
|
|
// these happen because the peer protocol does not have a way to cancel blob downloads
|
|
|
|
// so the client will just close the connection if its in the middle of downloading a blob
|
2019-02-06 21:01:42 +01:00
|
|
|
// but receives the blob from a different peer first or simply goes offline (timeout)
|
2019-02-08 23:29:31 +01:00
|
|
|
//if strings.Contains(err.Error(), "connection reset by peer") ||
|
|
|
|
// strings.Contains(err.Error(), "i/o timeout") ||
|
|
|
|
// strings.Contains(err.Error(), "broken pipe") {
|
|
|
|
// return
|
|
|
|
//}
|
|
|
|
//
|
|
|
|
//log.Error(errors.FullTrace(e))
|
2019-01-25 22:49:45 +01:00
|
|
|
}
|
|
|
|
|
2019-09-11 18:30:01 +02:00
|
|
|
func readNextMessage(buf *bufio.Reader) ([]byte, error) {
|
2018-01-31 02:15:21 +01:00
|
|
|
request := make([]byte, 0)
|
|
|
|
eof := false
|
|
|
|
|
2018-01-29 20:37:26 +01:00
|
|
|
for {
|
2018-01-31 02:15:21 +01:00
|
|
|
chunk, err := buf.ReadBytes('}')
|
2018-01-29 20:37:26 +01:00
|
|
|
if err != nil {
|
|
|
|
if err != io.EOF {
|
2019-02-09 02:19:58 +01:00
|
|
|
//log.Errorln("readBytes error:", err) // logged by caller
|
2018-01-31 02:15:21 +01:00
|
|
|
return request, err
|
|
|
|
}
|
|
|
|
eof = true
|
|
|
|
}
|
|
|
|
|
|
|
|
//log.Debugln("got", len(chunk), "bytes.")
|
|
|
|
//spew.Dump(chunk)
|
|
|
|
|
|
|
|
if len(chunk) > 0 {
|
|
|
|
request = append(request, chunk...)
|
|
|
|
|
|
|
|
if len(request) > maxRequestSize {
|
|
|
|
return request, errRequestTooLarge
|
|
|
|
}
|
|
|
|
|
|
|
|
// yes, this is how the peer protocol knows when the request finishes
|
2019-07-02 18:13:20 +02:00
|
|
|
if reflector.IsValidJSON(request) {
|
2018-01-31 02:15:21 +01:00
|
|
|
break
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
2018-01-31 02:15:21 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if eof {
|
2018-01-29 20:37:26 +01:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2018-01-31 02:15:21 +01:00
|
|
|
|
|
|
|
//log.Debugln("total size:", len(request))
|
|
|
|
//if len(request) > 0 {
|
|
|
|
// spew.Dump(request)
|
|
|
|
//}
|
|
|
|
|
|
|
|
if len(request) == 0 && eof {
|
|
|
|
return []byte{}, io.EOF
|
2018-01-29 20:37:26 +01:00
|
|
|
}
|
2018-01-31 02:15:21 +01:00
|
|
|
|
|
|
|
return request, nil
|
|
|
|
}
|
|
|
|
|
2018-05-29 23:19:40 +02:00
|
|
|
const (
|
|
|
|
maxRequestSize = 64 * (2 ^ 10) // 64kb
|
|
|
|
paymentRateAccepted = "RATE_ACCEPTED"
|
|
|
|
paymentRateTooLow = "RATE_TOO_LOW"
|
2018-05-30 03:38:55 +02:00
|
|
|
//ToDo: paymentRateUnset is not used but exists in the protocol.
|
|
|
|
//paymentRateUnset = "RATE_UNSET"
|
2018-05-29 23:19:40 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
var errRequestTooLarge = errors.Base("request is too large")
|
|
|
|
|
|
|
|
type availabilityRequest struct {
|
|
|
|
LbrycrdAddress bool `json:"lbrycrd_address"`
|
|
|
|
RequestedBlobs []string `json:"requested_blobs"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type availabilityResponse struct {
|
|
|
|
LbrycrdAddress string `json:"lbrycrd_address"`
|
|
|
|
AvailableBlobs []string `json:"available_blobs"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type paymentRateRequest struct {
|
|
|
|
BlobDataPaymentRate float64 `json:"blob_data_payment_rate"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type paymentRateResponse struct {
|
|
|
|
BlobDataPaymentRate string `json:"blob_data_payment_rate"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type blobRequest struct {
|
|
|
|
RequestedBlob string `json:"requested_blob"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type incomingBlob struct {
|
|
|
|
Error string `json:"error,omitempty"`
|
|
|
|
BlobHash string `json:"blob_hash"`
|
|
|
|
Length int `json:"length"`
|
|
|
|
}
|
|
|
|
type blobResponse struct {
|
|
|
|
IncomingBlob incomingBlob `json:"incoming_blob"`
|
|
|
|
}
|
2019-01-15 20:23:08 +01:00
|
|
|
|
|
|
|
type compositeRequest struct {
|
|
|
|
LbrycrdAddress bool `json:"lbrycrd_address"`
|
|
|
|
RequestedBlobs []string `json:"requested_blobs"`
|
|
|
|
BlobDataPaymentRate float64 `json:"blob_data_payment_rate"`
|
|
|
|
RequestedBlob string `json:"requested_blob"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type compositeResponse struct {
|
|
|
|
LbrycrdAddress string `json:"lbrycrd_address,omitempty"`
|
|
|
|
AvailableBlobs []string `json:"available_blobs,omitempty"`
|
|
|
|
BlobDataPaymentRate string `json:"blob_data_payment_rate,omitempty"`
|
|
|
|
IncomingBlob incomingBlob `json:"incoming_blob,omitempty"`
|
|
|
|
}
|