Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

unzipit

Package Overview
Dependencies
Maintainers
1
Versions
33
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

unzipit - npm Package Compare versions

Comparing version 0.1.5 to 0.1.6

11

dist/unzipit-worker.js

@@ -1,2 +0,2 @@

/* unzipit@0.1.5, license MIT */
/* unzipit@0.1.6, license MIT */
(function (factory) {

@@ -590,2 +590,7 @@ typeof define === 'function' && define.amd ? define(factory) :

async function readBlobAsUint8Array(blob) {
const arrayBuffer = await readBlobAsArrayBuffer(blob);
return new Uint8Array(arrayBuffer);
}
function isBlob(v) {

@@ -644,4 +649,4 @@ return typeof Blob !== 'undefined' && v instanceof Blob;

let srcData;
if (isBlob(srcData)) {
srcData = await readBlobAsArrayBuffer(src);
if (isBlob(src)) {
srcData = await readBlobAsUint8Array(src);
} else {

@@ -648,0 +653,0 @@ srcData = new Uint8Array(src);

@@ -1,2 +0,2 @@

/* unzipit@0.1.5, license MIT */
/* unzipit@0.1.6, license MIT */
function deflateRaw(data, out, opos, lvl) {

@@ -585,2 +585,7 @@ var opts = [

async function readBlobAsUint8Array(blob) {
const arrayBuffer = await readBlobAsArrayBuffer(blob);
return new Uint8Array(arrayBuffer);
}
function isBlob(v) {

@@ -639,4 +644,4 @@ return typeof Blob !== 'undefined' && v instanceof Blob;

let srcData;
if (isBlob(srcData)) {
srcData = await readBlobAsArrayBuffer(src);
if (isBlob(src)) {
srcData = await readBlobAsUint8Array(src);
} else {

@@ -643,0 +648,0 @@ srcData = new Uint8Array(src);

@@ -1,2 +0,2 @@

/* unzipit@0.1.5, license MIT */
/* unzipit@0.1.6, license MIT */
(function (global, factory) {

@@ -24,2 +24,11 @@ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :

async function readBlobAsUint8Array(blob) {
const arrayBuffer = await readBlobAsArrayBuffer(blob);
return new Uint8Array(arrayBuffer);
}
function isBlob(v) {
return typeof Blob !== 'undefined' && v instanceof Blob;
}
function isSharedArrayBuffer(b) {

@@ -35,2 +44,6 @@ return typeof SharedArrayBuffer !== 'undefined' && b instanceof SharedArrayBuffer;

function isTypedArraySameAsArrayBuffer(typedArray) {
return typedArray.byteOffset === 0 && typedArray.byteLength === typedArray.buffer.byteLength;
}
class ArrayBufferReader {

@@ -57,3 +70,3 @@ constructor(arrayBufferOrView) {

}
async read(offset, length, ) {
async read(offset, length) {
const blob = this.blob.slice(offset, offset + length);

@@ -63,2 +76,5 @@ const arrayBuffer = await readBlobAsArrayBuffer(blob);

}
async sliceAsBlob(offset, length, type = '') {
return this.blob.slice(offset, offset + length, type);
}
}

@@ -751,2 +767,6 @@

// @param {Uint8Array} src
// @param {number} uncompressedSize
// @param {string} [type] mime-type
// @returns {ArrayBuffer|Blob} ArrayBuffer if type is falsy or Blob otherwise.
function inflateRawLocal(src, uncompressedSize, type, resolve) {

@@ -788,6 +808,2 @@ const dst = new Uint8Array(uncompressedSize);

//
// We could hack in sending a Blob but it feels like a hack (too many ifs)
// We'd only send a blob if the source is a BlobReader and I actually have
// no idea if slicing a Blob is efficient.
//
//if (!isBlob(src) && !isSharedArrayBuffer(src)) {

@@ -812,3 +828,7 @@ // transferables.push(src);

const {src, uncompressedSize, type, resolve} = waitingForWorkerQueue.shift();
inflateRawLocal(src, uncompressedSize, type, resolve);
let data = src;
if (isBlob(src)) {
data = await readBlobAsUint8Array(src);
}
inflateRawLocal(data, uncompressedSize, type, resolve);
}

@@ -826,7 +846,2 @@

// type: undefined or mimeType string (eg: 'image/png')
//
// if `type` is falsy then an ArrayBuffer is returned
//
//
// It has to take non-zero time to put a large typed array in a Blob since the very

@@ -839,2 +854,7 @@ // next instruction you could change the contents of the array. So, if you're reading

// since the worker can transfer its ArrayBuffer zero copy.
//
// @param {Uint8Array|Blob} src
// @param {number} uncompressedSize
// @param {string} [type] falsy or mimeType string (eg: 'image/png')
// @returns {ArrayBuffer|Blob} ArrayBuffer if type is falsy or Blob otherwise.
function inflateRawAsync(src, uncompressedSize, type) {

@@ -917,7 +937,7 @@ return new Promise((resolve, reject) => {

async blob(type = 'application/octet-stream') {
return await readEntryData(this._reader, this._entry, type);
return await readEntryDataAsBlob(this._reader, this._entry, type);
}
// returns a promise that returns an ArrayBuffer for this entry
async arrayBuffer() {
return await readEntryData(this._reader, this._entry);
return await readEntryDataAsArrayBuffer(this._reader, this._entry);
}

@@ -944,2 +964,29 @@ // returns text, assumes the text is valid utf8. If you want more options decode arrayBuffer yourself

// The point of this function is we want to be able to pass the data
// to a worker as fast as possible so when decompressing if the data
// is already a blob and we can get a blob then get a blob.
//
// I'm not sure what a better way to refactor this is. We've got examples
// of multiple readers. Ideally, for every type of reader we could ask
// it, "give me a type that is zero copy both locally and when sent to a worker".
//
// The problem is the worker would also have to know the how to handle this
// opaque type. I suppose the correct solution is to register different
// reader handlers in the worker so BlobReader would register some
// `handleZeroCopyType<BlobReader>`. At the moment I don't feel like
// refactoring. As it is you just pass in an instance of the reader
// but instead you'd have to register the reader and some how get the
// source for the `handleZeroCopyType` handler function into the worker.
// That sounds like a huge PITA, requiring you to put the implementation
// in a separate file so the worker can load it or some other workaround
// hack.
//
// For now this hack works even if it's not generic.
async function readAsBlobOrTypedArray(reader, offset, length, type) {
if (reader.sliceAsBlob) {
return await reader.sliceAsBlob(offset, length, type);
}
return await reader.read(offset, length);
}
const crc$1 = {

@@ -1236,4 +1283,3 @@ unsigned() {

// returns an ArrayBuffer if type is falsy or a Blob of mime-type 'type'
async function readEntryData(reader, entry, type) {
async function readEntryDataHeader(reader, entry) {
const buffer = await readAs(reader, entry.relativeOffsetOfLocalHeader, 30);

@@ -1286,11 +1332,44 @@ // note: maybe this should be passed in or cached on entry

}
const data = await readAs(reader, fileDataStart, entry.compressedSize);
return {
decompress,
fileDataStart,
};
}
async function readEntryDataAsArrayBuffer(reader, entry) {
const {decompress, fileDataStart} = await readEntryDataHeader(reader, entry);
if (!decompress) {
if (type) {
return new Blob([isSharedArrayBuffer(data.buffer) ? new Uint8Array(data) : data], {type});
const dataView = await readAs(reader, fileDataStart, entry.compressedSize);
// make copy?
//
// 1. The source is a Blob/file. In this case we'll get back TypedArray we can just hand to the user
// 2. The source is a TypedArray. In this case we'll get back TypedArray that is a view into a larger buffer
// but because ultimately this is used to return an ArrayBuffer to `someEntry.arrayBuffer()`
// we need to return copy since we need the `ArrayBuffer`, not the TypedArray to exactly match the data.
// Note: We could add another API function `bytes()` or something that returned a `Uint8Array`
// instead of an `ArrayBuffer`. This would let us skip a copy here. But this case only happens for uncompressed
// data. That seems like a rare enough case that adding a new API is not worth it? Or is it? A zip of jpegs or mp3s
// might not be compressed. For now that's a TBD.
return isTypedArraySameAsArrayBuffer(dataView) ? dataView.buffer : dataView.slice().buffer;
}
// see comment in readEntryDateAsBlob
const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, entry.compressedSize);
const result = await inflateRawAsync(typedArrayOrBlob, entry.uncompressedSize);
return result;
}
async function readEntryDataAsBlob(reader, entry, type) {
const {decompress, fileDataStart} = await readEntryDataHeader(reader, entry);
if (!decompress) {
const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, entry.compressedSize, type);
if (isBlob(typedArrayOrBlob)) {
return typedArrayOrBlob;
}
return data.slice().buffer;
return new Blob([isSharedArrayBuffer(typedArrayOrBlob.buffer) ? new Uint8Array(typedArrayOrBlob) : typedArrayOrBlob], {type});
}
const result = await inflateRawAsync(data, entry.uncompressedSize, type);
// Here's the issue with this mess (should refactor?)
// if the source is a blob then we really want to pass a blob to inflateRawAsync to avoid a large
// copy if we're going to a worker.
const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, entry.compressedSize);
const result = await inflateRawAsync(typedArrayOrBlob, entry.uncompressedSize, type);
return result;

@@ -1297,0 +1376,0 @@ }

@@ -1,2 +0,2 @@

/* unzipit@0.1.5, license MIT */
/* unzipit@0.1.6, license MIT */
/* global SharedArrayBuffer, process */

@@ -18,2 +18,11 @@

async function readBlobAsUint8Array(blob) {
const arrayBuffer = await readBlobAsArrayBuffer(blob);
return new Uint8Array(arrayBuffer);
}
function isBlob(v) {
return typeof Blob !== 'undefined' && v instanceof Blob;
}
function isSharedArrayBuffer(b) {

@@ -29,2 +38,6 @@ return typeof SharedArrayBuffer !== 'undefined' && b instanceof SharedArrayBuffer;

function isTypedArraySameAsArrayBuffer(typedArray) {
return typedArray.byteOffset === 0 && typedArray.byteLength === typedArray.buffer.byteLength;
}
class ArrayBufferReader {

@@ -51,3 +64,3 @@ constructor(arrayBufferOrView) {

}
async read(offset, length, ) {
async read(offset, length) {
const blob = this.blob.slice(offset, offset + length);

@@ -57,2 +70,5 @@ const arrayBuffer = await readBlobAsArrayBuffer(blob);

}
async sliceAsBlob(offset, length, type = '') {
return this.blob.slice(offset, offset + length, type);
}
}

@@ -745,2 +761,6 @@

// @param {Uint8Array} src
// @param {number} uncompressedSize
// @param {string} [type] mime-type
// @returns {ArrayBuffer|Blob} ArrayBuffer if type is falsy or Blob otherwise.
function inflateRawLocal(src, uncompressedSize, type, resolve) {

@@ -782,6 +802,2 @@ const dst = new Uint8Array(uncompressedSize);

//
// We could hack in sending a Blob but it feels like a hack (too many ifs)
// We'd only send a blob if the source is a BlobReader and I actually have
// no idea if slicing a Blob is efficient.
//
//if (!isBlob(src) && !isSharedArrayBuffer(src)) {

@@ -806,3 +822,7 @@ // transferables.push(src);

const {src, uncompressedSize, type, resolve} = waitingForWorkerQueue.shift();
inflateRawLocal(src, uncompressedSize, type, resolve);
let data = src;
if (isBlob(src)) {
data = await readBlobAsUint8Array(src);
}
inflateRawLocal(data, uncompressedSize, type, resolve);
}

@@ -820,7 +840,2 @@

// type: undefined or mimeType string (eg: 'image/png')
//
// if `type` is falsy then an ArrayBuffer is returned
//
//
// It has to take non-zero time to put a large typed array in a Blob since the very

@@ -833,2 +848,7 @@ // next instruction you could change the contents of the array. So, if you're reading

// since the worker can transfer its ArrayBuffer zero copy.
//
// @param {Uint8Array|Blob} src
// @param {number} uncompressedSize
// @param {string} [type] falsy or mimeType string (eg: 'image/png')
// @returns {ArrayBuffer|Blob} ArrayBuffer if type is falsy or Blob otherwise.
function inflateRawAsync(src, uncompressedSize, type) {

@@ -911,7 +931,7 @@ return new Promise((resolve, reject) => {

async blob(type = 'application/octet-stream') {
return await readEntryData(this._reader, this._entry, type);
return await readEntryDataAsBlob(this._reader, this._entry, type);
}
// returns a promise that returns an ArrayBuffer for this entry
async arrayBuffer() {
return await readEntryData(this._reader, this._entry);
return await readEntryDataAsArrayBuffer(this._reader, this._entry);
}

@@ -938,2 +958,29 @@ // returns text, assumes the text is valid utf8. If you want more options decode arrayBuffer yourself

// The point of this function is we want to be able to pass the data
// to a worker as fast as possible so when decompressing if the data
// is already a blob and we can get a blob then get a blob.
//
// I'm not sure what a better way to refactor this is. We've got examples
// of multiple readers. Ideally, for every type of reader we could ask
// it, "give me a type that is zero copy both locally and when sent to a worker".
//
// The problem is the worker would also have to know the how to handle this
// opaque type. I suppose the correct solution is to register different
// reader handlers in the worker so BlobReader would register some
// `handleZeroCopyType<BlobReader>`. At the moment I don't feel like
// refactoring. As it is you just pass in an instance of the reader
// but instead you'd have to register the reader and some how get the
// source for the `handleZeroCopyType` handler function into the worker.
// That sounds like a huge PITA, requiring you to put the implementation
// in a separate file so the worker can load it or some other workaround
// hack.
//
// For now this hack works even if it's not generic.
async function readAsBlobOrTypedArray(reader, offset, length, type) {
if (reader.sliceAsBlob) {
return await reader.sliceAsBlob(offset, length, type);
}
return await reader.read(offset, length);
}
const crc$1 = {

@@ -1230,4 +1277,3 @@ unsigned() {

// returns an ArrayBuffer if type is falsy or a Blob of mime-type 'type'
async function readEntryData(reader, entry, type) {
async function readEntryDataHeader(reader, entry) {
const buffer = await readAs(reader, entry.relativeOffsetOfLocalHeader, 30);

@@ -1280,11 +1326,44 @@ // note: maybe this should be passed in or cached on entry

}
const data = await readAs(reader, fileDataStart, entry.compressedSize);
return {
decompress,
fileDataStart,
};
}
async function readEntryDataAsArrayBuffer(reader, entry) {
const {decompress, fileDataStart} = await readEntryDataHeader(reader, entry);
if (!decompress) {
if (type) {
return new Blob([isSharedArrayBuffer(data.buffer) ? new Uint8Array(data) : data], {type});
const dataView = await readAs(reader, fileDataStart, entry.compressedSize);
// make copy?
//
// 1. The source is a Blob/file. In this case we'll get back TypedArray we can just hand to the user
// 2. The source is a TypedArray. In this case we'll get back TypedArray that is a view into a larger buffer
// but because ultimately this is used to return an ArrayBuffer to `someEntry.arrayBuffer()`
// we need to return copy since we need the `ArrayBuffer`, not the TypedArray to exactly match the data.
// Note: We could add another API function `bytes()` or something that returned a `Uint8Array`
// instead of an `ArrayBuffer`. This would let us skip a copy here. But this case only happens for uncompressed
// data. That seems like a rare enough case that adding a new API is not worth it? Or is it? A zip of jpegs or mp3s
// might not be compressed. For now that's a TBD.
return isTypedArraySameAsArrayBuffer(dataView) ? dataView.buffer : dataView.slice().buffer;
}
// see comment in readEntryDateAsBlob
const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, entry.compressedSize);
const result = await inflateRawAsync(typedArrayOrBlob, entry.uncompressedSize);
return result;
}
async function readEntryDataAsBlob(reader, entry, type) {
const {decompress, fileDataStart} = await readEntryDataHeader(reader, entry);
if (!decompress) {
const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, entry.compressedSize, type);
if (isBlob(typedArrayOrBlob)) {
return typedArrayOrBlob;
}
return data.slice().buffer;
return new Blob([isSharedArrayBuffer(typedArrayOrBlob.buffer) ? new Uint8Array(typedArrayOrBlob) : typedArrayOrBlob], {type});
}
const result = await inflateRawAsync(data, entry.uncompressedSize, type);
// Here's the issue with this mess (should refactor?)
// if the source is a blob then we really want to pass a blob to inflateRawAsync to avoid a large
// copy if we're going to a worker.
const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, entry.compressedSize);
const result = await inflateRawAsync(typedArrayOrBlob, entry.uncompressedSize, type);
return result;

@@ -1291,0 +1370,0 @@ }

{
"name": "unzipit",
"version": "0.1.5",
"version": "0.1.6",
"description": "random access unzip library for JavaScript",

@@ -32,2 +32,3 @@ "main": "dist/unzipit.js",

"eslint": "^6.0.1",
"eslint-plugin-html": "^6.0.0",
"eslint-plugin-one-variable-per-var": "0.0.3",

@@ -34,0 +35,0 @@ "eslint-plugin-optional-comma-spacing": "0.0.4",

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc