Socket
Socket
Sign inDemoInstall

@mux/upchunk

Package Overview
Dependencies
Maintainers
17
Versions
27
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@mux/upchunk - npm Package Compare versions

Comparing version 2.0.0 to 2.1.0

README_INTERNAL.md

9

package.json
{
"name": "@mux/upchunk",
"version": "2.0.0",
"version": "2.1.0",
"description": "Dead simple chunked file uploads using Fetch",

@@ -12,3 +12,3 @@ "main": "dist/upchunk.js",

"chunked upload",
"file uplooad",
"file upload",
"gcs upload",

@@ -34,3 +34,4 @@ "google cloud storage upload",

"@types/jest": "^25.2.3",
"jest": "^26.0.1",
"jest": "^26.6.3",
"nock": "^13.0.5",
"ts-jest": "^26.1.0",

@@ -49,3 +50,3 @@ "ts-loader": "^7.0.5",

"event-target-shim": "^4.0.3",
"xhr": "^2.5.0"
"xhr": "^2.6.0"
},

@@ -52,0 +53,0 @@ "volta": {

![UpChunk](banner.png)
# UpChunk <img src="https://travis-ci.org/muxinc/upchunk.svg?branch=master" alt="Build Status">
# UpChunk <img src="https://github.com/muxinc/upchunk/workflows/CI/badge.svg" alt="Build Status">

@@ -32,3 +32,3 @@ UpChunk uploads chunks of files! It's a JavaScript module for handling large file uploads via chunking and making a `put` request for each chunk with the correct range request headers. Uploads can be paused and resumed, they're fault tolerant,

```
<script src="https://unpkg.com/@mux/upchunk@1"></script>
<script src="https://unpkg.com/@mux/upchunk@2"></script>
```

@@ -127,2 +127,6 @@

- `method` <small>type: `"PUT" | "PATCH" | "POST"`, default: `PUT`</small>
The HTTP method to use when uploading each chunk.
### UpChunk Instance Methods

@@ -164,3 +168,3 @@

Fired after successful chunk uploads and returns the current percentage of the file that's been uploaded (in terms of chunks).
Fired continuously with incremental upload progress. This returns the current percentage of the file that's been uploaded.

@@ -167,0 +171,0 @@ - `success`

/**
* This is more of an integration test. We can't test these in TS, because
* our handy dandy typechecks will make it so we can't compile with invalid
* our handy dandy typechecks will make it so we can't compile with invalid
* parameters. We don't have that luxury in normal JS, however, so make sure

@@ -5,0 +5,0 @@ * we still yell when we're supposed to.

@@ -1,5 +0,260 @@

// import { createUpload } from './upchunk';
import * as nock from 'nock';
test('basic', () => {
expect(2 + 2).toBe(4);
import { UpChunk, createUpload, UpChunkOptions } from './upchunk';
beforeEach(() => {
if (!nock.isActive()) {
nock.activate();
}
});
afterEach(() => {
nock.restore();
nock.cleanAll();
});
const createUploadFixture = (
options?: Partial<UpChunkOptions>,
specifiedFile?: File
) => {
const file = specifiedFile || new File([new ArrayBuffer(524288)], 'test.mp4');
return createUpload({
file,
endpoint: `https://example.com/upload/endpoint`,
chunkSize: 256,
...options,
});
};
test('files can be uploading using POST', (done) => {
nock('https://example.com')
.post('/upload/endpoint')
.twice()
.reply(200)
const upload = createUploadFixture({
method: 'POST',
});
upload.on('success', () => {
done();
});
});
test('files can be uploading using PATCH', (done) => {
nock('https://example.com')
.patch('/upload/endpoint')
.twice()
.reply(200);
const upload = createUploadFixture({
method: 'PATCH',
});
upload.on('success', () => {
done();
});
});
test('a file is uploaded using the correct content-range headers', (done) => {
const fileBytes = 524288;
const upload = createUploadFixture(
{},
new File([new ArrayBuffer(fileBytes)], 'test.mp4')
);
const scopes = [
nock('https://example.com')
.matchHeader('content-range', `bytes 0-${fileBytes / 2 - 1}/${fileBytes}`)
.put('/upload/endpoint')
.reply(200),
nock('https://example.com')
.matchHeader(
'content-range',
`bytes ${fileBytes / 2}-${fileBytes - 1}/${fileBytes}`
)
.put('/upload/endpoint')
.reply(200),
];
upload.on('error', (err) => {
done(err);
});
upload.on('success', () => {
scopes.forEach((scope) => {
if (!scope.isDone()) {
done('All scopes not completed');
}
});
done();
});
});
test('an error is thrown if a request does not complete', (done) => {
nock('https://example.com').put('/upload/endpoint').reply(500);
const upload = createUploadFixture();
upload.on('error', (err) => {
done();
});
upload.on('success', () => {
done('Ironic failure, should not have been successful');
});
});
test('fires an attempt event before each attempt', (done) => {
let ATTEMPT_COUNT = 0;
const MAX_ATTEMPTS = 2; // because we set the chunk size to 256kb, half of our file size in bytes.
nock('https://example.com')
.put('/upload/endpoint')
.reply(200)
.put('/upload/endpoint')
.reply(200);
const upload = createUploadFixture();
upload.on('attempt', (err) => {
ATTEMPT_COUNT += 1;
});
upload.on('success', () => {
if (ATTEMPT_COUNT === MAX_ATTEMPTS) {
done();
} else {
done(
`Attempted ${ATTEMPT_COUNT} times and it should have been ${MAX_ATTEMPTS}`
);
}
});
});
test('a chunk failing to upload fires an attemptFailure event', (done) => {
nock('https://example.com').put('/upload/endpoint').reply(502);
const upload = createUploadFixture();
upload.on('attemptFailure', (err) => {
upload.pause();
done();
});
});
test('a single chunk failing is retried multiple times until successful', (done) => {
let ATTEMPT_FAILURE_COUNT = 0;
const FAILURES = 2;
nock('https://example.com')
.put('/upload/endpoint')
.times(FAILURES)
.reply(502)
.put('/upload/endpoint')
.twice()
.reply(200);
const upload = createUploadFixture({ delayBeforeAttempt: 0.1 });
upload.on('attemptFailure', (err) => {
ATTEMPT_FAILURE_COUNT += 1;
});
upload.on('error', done);
upload.on('success', () => {
if (ATTEMPT_FAILURE_COUNT === FAILURES) {
return done();
}
done(
`Expected ${FAILURES} attempt failures, received ${ATTEMPT_FAILURE_COUNT}`
);
});
});
test('a single chunk failing the max number of times fails the upload', (done) => {
nock('https://example.com')
.put('/upload/endpoint')
.times(5)
.reply(502)
.put('/upload/endpoint')
.twice()
.reply(200);
const upload = createUploadFixture({ delayBeforeAttempt: 0.1 });
upload.on('error', (err) => {
try {
expect(err.detail.chunk).toBe(0);
expect(err.detail.attempts).toBe(5);
done();
} catch (err) {
done(err);
}
});
upload.on('success', () => {
done(`Expected upload to fail due to failed attempts`);
});
});
test('chunkSuccess event is fired after each successful upload', (done) => {
nock('https://example.com')
.put('/upload/endpoint')
.reply(200)
.put('/upload/endpoint')
.reply(200);
const upload = createUploadFixture();
const successCallback = jest.fn();
upload.on('chunkSuccess', successCallback);
upload.on('success', () => {
expect(successCallback).toBeCalledTimes(2);
done();
});
});
test('abort pauses the upload and cancels the current XHR request', (done) => {
/*
This is hacky and I don't love it, but the gist is:
- Set up a chunkSuccess callback listener
- We abort the upload during the first request stub before responding
- In the attempt callback, we'll set a short timeout, where we check if the scope is done, meaning all the stubs have been called. If that's the case, make sure that chunkSuccess was never called.
*/
let upload: UpChunk;
const scope = nock('https://example.com')
.put('/upload/endpoint')
.reply(() => {
upload.abort();
return [200, 'success'];
});
upload = createUploadFixture();
const chunkSuccessCallback = jest.fn();
upload.on('attempt', (e) => {
setTimeout(() => {
expect(scope.isDone()).toBeTruthy();
expect(chunkSuccessCallback).toHaveBeenCalledTimes(0);
done();
}, 10);
});
// upload.on('chunkSuccess', chunkSuccessCallback);
upload.on('chunkSuccess', (e) => console.log(e.detail))
upload.on('success', () => {
done('Upload should not have successfully completed');
});
});

@@ -10,2 +10,3 @@ import { EventTarget } from 'event-target-shim';

| 'attemptFailure'
| 'chunkSuccess'
| 'error'

@@ -17,5 +18,11 @@ | 'offline'

interface IOptions {
type AllowedMethods =
| 'PUT'
| 'POST'
| 'PATCH';
export interface UpChunkOptions {
endpoint: string | ((file?: File) => Promise<string>);
file: File;
method?: AllowedMethods;
headers?: XhrHeaders;

@@ -31,2 +38,3 @@ chunkSize?: number;

public headers: XhrHeaders;
public method: AllowedMethods;
public chunkSize: number;

@@ -44,2 +52,3 @@ public attempts: number;

private paused: boolean;
private currentXhr?: XMLHttpRequest;

@@ -49,6 +58,7 @@ private reader: FileReader;

constructor(options: IOptions) {
constructor(options: UpChunkOptions) {
this.endpoint = options.endpoint;
this.file = options.file;
this.headers = options.headers || ({} as XhrHeaders);
this.method = options.method || 'PUT';
this.chunkSize = options.chunkSize || 5120;

@@ -98,2 +108,7 @@ this.attempts = options.attempts || 5;

public abort() {
this.pause();
this.currentXhr?.abort();
}
public pause() {

@@ -211,3 +226,4 @@ this.paused = true;

return new Promise((resolve, reject) => {
xhr({ ...options, beforeSend }, (err, resp) => {
this.currentXhr = xhr({ ...options, beforeSend }, (err, resp) => {
this.currentXhr = undefined;
if (err) {

@@ -242,3 +258,3 @@ return reject(err);

url: this.endpointValue,
method: 'PUT',
method: this.method,
body: this.chunk,

@@ -253,3 +269,2 @@ });

if (this.attemptCount < this.attempts) {
this.attemptCount = this.attemptCount + 1;
setTimeout(() => this.sendChunks(), this.delayBeforeAttempt * 1000);

@@ -285,4 +300,14 @@ this.dispatch('attemptFailure', {

.then((res) => {
this.attemptCount = this.attemptCount + 1;
if (SUCCESSFUL_CHUNK_UPLOAD_CODES.includes(res.statusCode)) {
this.dispatch('chunkSuccess', {
chunk: this.chunkCount,
attempts: this.attemptCount,
response: res,
});
this.attemptCount = 0;
this.chunkCount = this.chunkCount + 1;
if (this.chunkCount < this.totalChunks) {

@@ -327,2 +352,2 @@ this.sendChunks();

export const createUpload = (options: IOptions) => new UpChunk(options);
export const createUpload = (options: UpChunkOptions) => new UpChunk(options);
SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc