Merge pull request #40 from lbryio/delete-after-upload

add flag to delete blobs after upload
This commit is contained in:
Andrey Beletsky 2020-05-05 13:36:41 +07:00 committed by GitHub
commit bc95ca61d5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 25 additions and 16 deletions

View file

@ -14,6 +14,7 @@ import (
var uploadWorkers int var uploadWorkers int
var uploadSkipExistsCheck bool var uploadSkipExistsCheck bool
var uploadDeleteBlobsAfterUpload bool
func init() { func init() {
var cmd = &cobra.Command{ var cmd = &cobra.Command{
@ -24,6 +25,7 @@ func init() {
} }
cmd.PersistentFlags().IntVar(&uploadWorkers, "workers", 1, "How many worker threads to run at once") cmd.PersistentFlags().IntVar(&uploadWorkers, "workers", 1, "How many worker threads to run at once")
cmd.PersistentFlags().BoolVar(&uploadSkipExistsCheck, "skipExistsCheck", false, "Dont check if blobs exist before uploading") cmd.PersistentFlags().BoolVar(&uploadSkipExistsCheck, "skipExistsCheck", false, "Dont check if blobs exist before uploading")
cmd.PersistentFlags().BoolVar(&uploadDeleteBlobsAfterUpload, "deleteBlobsAfterUpload", false, "Delete blobs after uploading them")
rootCmd.AddCommand(cmd) rootCmd.AddCommand(cmd)
} }
@ -36,7 +38,7 @@ func uploadCmd(cmd *cobra.Command, args []string) {
store.NewS3BlobStore(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName), store.NewS3BlobStore(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName),
db) db)
uploader := reflector.NewUploader(db, st, uploadWorkers, uploadSkipExistsCheck) uploader := reflector.NewUploader(db, st, uploadWorkers, uploadSkipExistsCheck, uploadDeleteBlobsAfterUpload)
interruptChan := make(chan os.Signal, 1) interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM) signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM)

View file

@ -29,24 +29,26 @@ type Summary struct {
} }
type Uploader struct { type Uploader struct {
db *db.SQL db *db.SQL
store *store.DBBackedStore // could just be store.BlobStore interface store *store.DBBackedStore // could just be store.BlobStore interface
workers int workers int
skipExistsCheck bool skipExistsCheck bool
stopper *stop.Group deleteBlobsAfterUpload bool
countChan chan increment stopper *stop.Group
countChan chan increment
count Summary count Summary
} }
func NewUploader(db *db.SQL, store *store.DBBackedStore, workers int, skipExistsCheck bool) *Uploader { func NewUploader(db *db.SQL, store *store.DBBackedStore, workers int, skipExistsCheck, deleteBlobsAfterUpload bool) *Uploader {
return &Uploader{ return &Uploader{
db: db, db: db,
store: store, store: store,
workers: workers, workers: workers,
skipExistsCheck: skipExistsCheck, skipExistsCheck: skipExistsCheck,
stopper: stop.New(), deleteBlobsAfterUpload: deleteBlobsAfterUpload,
countChan: make(chan increment), stopper: stop.New(),
countChan: make(chan increment),
} }
} }
@ -126,7 +128,7 @@ Upload:
return nil return nil
} }
// worker reads paths from a channel and uploads them // worker reads paths from a channel, uploads them, and optionally deletes them
func (u *Uploader) worker(pathChan chan string) { func (u *Uploader) worker(pathChan chan string) {
for { for {
select { select {
@ -140,6 +142,11 @@ func (u *Uploader) worker(pathChan chan string) {
err := u.uploadBlob(filepath) err := u.uploadBlob(filepath)
if err != nil { if err != nil {
log.Errorln(err) log.Errorln(err)
} else if u.deleteBlobsAfterUpload {
err = os.Remove(filepath)
if err != nil {
log.Errorln(errors.Prefix("deleting blob", err))
}
} }
} }
} }
@ -155,7 +162,7 @@ func (u *Uploader) uploadBlob(filepath string) (err error) {
blob, err := ioutil.ReadFile(filepath) blob, err := ioutil.ReadFile(filepath)
if err != nil { if err != nil {
return err return errors.Err(err)
} }
hash := BlobHash(blob) hash := BlobHash(blob)