TUS: handle URL removal on 4xx errors
## Issue The TUS client automatically removes the upload fingerprint whenever there is a 4xx error. When we try to resume later, we couldn't find the the fingerprint and ended up creating a new upload ID. ## Changes Since we are also storing the uploadUrl ourselves, provided that to override the tus client's default behavior of restarting a new session on 4xx errors.
This commit is contained in:
parent
352ee7bfaa
commit
b6e9c7aabf
1 changed files with 11 additions and 1 deletions
|
@ -48,8 +48,18 @@ export function makeResumableUploadRequest(
|
||||||
id: new Date().getTime(),
|
id: new Date().getTime(),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const urlOptions = {};
|
||||||
|
if (params.uploadUrl) {
|
||||||
|
// Resuming from previous upload. TUS clears the resume fingerprint on any
|
||||||
|
// 4xx error, so we need to use the fixed URL mode instead.
|
||||||
|
urlOptions.uploadUrl = params.uploadUrl;
|
||||||
|
} else {
|
||||||
|
// New upload, so use `endpoint`.
|
||||||
|
urlOptions.endpoint = RESUMABLE_ENDPOINT;
|
||||||
|
}
|
||||||
|
|
||||||
const uploader = new tus.Upload(file, {
|
const uploader = new tus.Upload(file, {
|
||||||
endpoint: RESUMABLE_ENDPOINT,
|
...urlOptions,
|
||||||
chunkSize: UPLOAD_CHUNK_SIZE_BYTE,
|
chunkSize: UPLOAD_CHUNK_SIZE_BYTE,
|
||||||
retryDelays: [0, 5000, 10000, 15000],
|
retryDelays: [0, 5000, 10000, 15000],
|
||||||
parallelUploads: 1,
|
parallelUploads: 1,
|
||||||
|
|
Loading…
Reference in a new issue