Remove excessive INFO-level logging

This commit is contained in:
Andrey Beletsky 2019-12-16 21:52:51 +07:00
parent 834733b675
commit 95eb94f5a7
3 changed files with 18 additions and 21 deletions

View file

@ -140,13 +140,13 @@ func (c *Client) GetBlob(hash string) (stream.Blob, error) {
return nil, errors.Prefix(hash[:8], resp.IncomingBlob.Error)
}
if resp.IncomingBlob.BlobHash != hash {
return nil, errors.Prefix(hash[:8], "Blob hash in response does not match requested hash")
return nil, errors.Prefix(hash[:8], "blob hash in response does not match requested hash")
}
if resp.IncomingBlob.Length <= 0 {
return nil, errors.Prefix(hash[:8], "Length reported as <= 0")
return nil, errors.Prefix(hash[:8], "length reported as <= 0")
}
log.Printf("Receiving blob %s from %s", hash[:8], c.conn.RemoteAddr())
log.Debugf("receiving blob %s from %s", hash[:8], c.conn.RemoteAddr())
blob, err := c.readRawBlob(resp.IncomingBlob.Length)
if err != nil {
@ -167,7 +167,7 @@ func (c *Client) read(v interface{}) error {
return err
}
log.Debugf("Read %d bytes from %s", len(m), c.conn.RemoteAddr())
log.Debugf("read %d bytes from %s", len(m), c.conn.RemoteAddr())
err = json.Unmarshal(m, v)
return errors.Err(err)
@ -181,7 +181,7 @@ func (c *Client) readRawBlob(blobSize int) ([]byte, error) {
blob := make([]byte, blobSize)
n, err := io.ReadFull(c.buf, blob)
log.Debugf("Read %d bytes from %s", n, c.conn.RemoteAddr())
log.Debugf("read %d bytes from %s", n, c.conn.RemoteAddr())
return blob, errors.Err(err)
}
@ -191,7 +191,7 @@ func (c *Client) write(b []byte) error {
return errors.Err(err)
}
log.Debugf("Writing %d bytes to %s", len(b), c.conn.RemoteAddr())
log.Debugf("writing %d bytes to %s", len(b), c.conn.RemoteAddr())
n, err := c.conn.Write(b)
if err == nil && n != len(b) {

View file

@ -118,5 +118,5 @@ func (s *Stats) log() {
errStr = errStr[:len(errStr)-2] // trim last comma and space
}
s.logger.Printf("%s stats: %d blobs, %d streams, errors: %s", s.name, blobs, streams, errStr)
s.logger.Debugf("%s stats: %d blobs, %d streams, errors: %s", s.name, blobs, streams, errStr)
}

View file

@ -68,7 +68,7 @@ func (u *Uploader) Upload(dirOrFilePath string) error {
hashes[i] = path.Base(p)
}
log.Infoln("checking for existing blobs")
log.Debug("checking for existing blobs")
var exists map[string]bool
if !u.skipExistsCheck {
@ -79,7 +79,7 @@ func (u *Uploader) Upload(dirOrFilePath string) error {
u.count.AlreadyStored = len(exists)
}
log.Infof("%d new blobs to upload", u.count.Total-u.count.AlreadyStored)
log.Debugf("%d new blobs to upload", u.count.Total-u.count.AlreadyStored)
workerWG := sync.WaitGroup{}
pathChan := make(chan string)
@ -119,13 +119,10 @@ Upload:
countWG.Wait()
u.stopper.Stop()
log.Infoln("SUMMARY")
log.Infof("%d blobs total", u.count.Total)
log.Infof("%d blobs already stored", u.count.AlreadyStored)
log.Infof("%d SD blobs uploaded", u.count.Sd)
log.Infof("%d content blobs uploaded", u.count.Blob)
log.Infof("%d errors encountered", u.count.Err)
log.Debugf(
"upload stats: %d blobs total, %d already stored, %d SD blobs uploaded, %d content blobs uploaded, %d errors",
u.count.Total, u.count.AlreadyStored, u.count.Sd, u.count.Blob, u.count.Err,
)
return nil
}
@ -167,17 +164,17 @@ func (u *Uploader) uploadBlob(filepath string) (err error) {
}
if IsValidJSON(blob) {
log.Debugf("Uploading SD blob %s", hash)
log.Debugf("uploading SD blob %s", hash)
err := u.store.PutSD(hash, blob)
if err != nil {
return errors.Prefix("Uploading SD blob "+hash, err)
return errors.Prefix("uploading SD blob "+hash, err)
}
u.inc(sdInc)
} else {
log.Debugf("Uploading blob %s", hash)
log.Debugf("uploading blob %s", hash)
err = u.store.Put(hash, blob)
if err != nil {
return errors.Prefix("Uploading blob "+hash, err)
return errors.Prefix("uploading blob "+hash, err)
}
u.inc(blobInc)
}
@ -208,7 +205,7 @@ func (u *Uploader) counter() {
}
}
if (u.count.Sd+u.count.Blob)%50 == 0 {
log.Infof("%d of %d done (%s elapsed, %.3fs per blob)", u.count.Sd+u.count.Blob, u.count.Total-u.count.AlreadyStored, time.Since(start).String(), time.Since(start).Seconds()/float64(u.count.Sd+u.count.Blob))
log.Debugf("%d of %d done (%s elapsed, %.3fs per blob)", u.count.Sd+u.count.Blob, u.count.Total-u.count.AlreadyStored, time.Since(start).String(), time.Since(start).Seconds()/float64(u.count.Sd+u.count.Blob))
}
}
}