You're Invited:Meet the Socket Team at BlackHat and DEF CON in Las Vegas, Aug 4-6.RSVP
Socket
Book a DemoInstallSign in
Socket

ipfs-unixfs-exporter

Package Overview
Dependencies
Maintainers
3
Versions
146
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

ipfs-unixfs-exporter - npm Package Compare versions

Comparing version

to
13.4.0

33

dist/src/index.d.ts

@@ -79,5 +79,33 @@ /**

export interface ExporterOptions extends ProgressOptions<ExporterProgressEvents> {
/**
* An optional offset to start reading at.
*
* If the CID resolves to a file this will be a byte offset within that file,
* otherwise if it's a directory it will be a directory entry offset within
* the directory listing. (default: undefined)
*/
offset?: number;
/**
* An optional length to read.
*
* If the CID resolves to a file this will be the number of bytes read from
* the file, otherwise if it's a directory it will be the number of directory
* entries read from the directory listing. (default: undefined)
*/
length?: number;
/**
* This signal can be used to abort any long-lived operations such as fetching
* blocks from the network. (default: undefined)
*/
signal?: AbortSignal;
/**
* When a DAG layer is encountered, all child nodes are loaded in parallel but
* processed as they arrive. This allows us to load sibling nodes in advance
* of yielding their bytes. Pass a value here to control the number of blocks
* loaded in parallel. If a strict depth-first traversal is required, this
* value should be set to `1`, otherwise the traversal order will tend to
* resemble a breadth-first fan-out and yield a have stable ordering.
* (default: undefined)
*/
blockReadConcurrency?: number;
}

@@ -122,2 +150,4 @@ export interface Exportable<T> {

/**
* @example File content
*
* When `entry` is a file or a `raw` node, `offset` and/or `length` arguments can be passed to `entry.content()` to return slices of data:

@@ -142,2 +172,4 @@ *

*
* @example Directory content
*
* If `entry` is a directory, passing `offset` and/or `length` to `entry.content()` will limit the number of files returned from the directory.

@@ -157,3 +189,2 @@ *

* ```
*
*/

@@ -160,0 +191,0 @@ content(options?: ExporterOptions): AsyncGenerator<T, void, unknown>;

5

dist/src/resolvers/unixfs-v1/content/directory.js

@@ -21,3 +21,6 @@ import filter from 'it-filter';

};
}), source => parallel(source, { ordered: true }), source => filter(source, entry => entry != null));
}), source => parallel(source, {
ordered: true,
concurrency: options.blockReadConcurrency
}), source => filter(source, entry => entry != null));
}

@@ -24,0 +27,0 @@ return yieldDirectoryContent;

@@ -67,3 +67,4 @@ import * as dagPb from '@ipld/dag-pb';

}), (source) => parallel(source, {
ordered: true
ordered: true,
concurrency: options.blockReadConcurrency
}), async (source) => {

@@ -70,0 +71,0 @@ for await (const { link, block, blockStart } of source) {

@@ -50,3 +50,6 @@ import { decode } from '@ipld/dag-pb';

};
}), source => parallel(source, { ordered: true }));
}), source => parallel(source, {
ordered: true,
concurrency: options.blockReadConcurrency
}));
for await (const { entries } of results) {

@@ -53,0 +56,0 @@ yield* entries;

{
"name": "ipfs-unixfs-exporter",
"version": "13.3.1",
"version": "13.4.0",
"description": "JavaScript implementation of the UnixFs exporter used by IPFS",

@@ -81,2 +81,3 @@ "license": "Apache-2.0 OR MIT",

"it-buffer-stream": "^3.0.0",
"it-drain": "^3.0.5",
"it-first": "^3.0.2",

@@ -83,0 +84,0 @@ "it-to-buffer": "^4.0.2",

@@ -97,5 +97,36 @@ /**

export interface ExporterOptions extends ProgressOptions<ExporterProgressEvents> {
/**
* An optional offset to start reading at.
*
* If the CID resolves to a file this will be a byte offset within that file,
* otherwise if it's a directory it will be a directory entry offset within
* the directory listing. (default: undefined)
*/
offset?: number
/**
* An optional length to read.
*
* If the CID resolves to a file this will be the number of bytes read from
* the file, otherwise if it's a directory it will be the number of directory
* entries read from the directory listing. (default: undefined)
*/
length?: number
/**
* This signal can be used to abort any long-lived operations such as fetching
* blocks from the network. (default: undefined)
*/
signal?: AbortSignal
/**
* When a DAG layer is encountered, all child nodes are loaded in parallel but
* processed as they arrive. This allows us to load sibling nodes in advance
* of yielding their bytes. Pass a value here to control the number of blocks
* loaded in parallel. If a strict depth-first traversal is required, this
* value should be set to `1`, otherwise the traversal order will tend to
* resemble a breadth-first fan-out and yield a have stable ordering.
* (default: undefined)
*/
blockReadConcurrency?: number
}

@@ -147,2 +178,4 @@

/**
* @example File content
*
* When `entry` is a file or a `raw` node, `offset` and/or `length` arguments can be passed to `entry.content()` to return slices of data:

@@ -167,2 +200,4 @@ *

*
* @example Directory content
*
* If `entry` is a directory, passing `offset` and/or `length` to `entry.content()` will limit the number of files returned from the directory.

@@ -182,3 +217,2 @@ *

* ```
*
*/

@@ -185,0 +219,0 @@ content(options?: ExporterOptions): AsyncGenerator<T, void, unknown>

@@ -28,3 +28,6 @@ import filter from 'it-filter'

}),
source => parallel(source, { ordered: true }),
source => parallel(source, {
ordered: true,
concurrency: options.blockReadConcurrency
}),
source => filter(source, entry => entry != null)

@@ -31,0 +34,0 @@ )

@@ -87,3 +87,4 @@ import * as dagPb from '@ipld/dag-pb'

(source) => parallel(source, {
ordered: true
ordered: true,
concurrency: options.blockReadConcurrency
}),

@@ -90,0 +91,0 @@ async (source) => {

@@ -65,3 +65,6 @@ import { decode, type PBNode } from '@ipld/dag-pb'

}),
source => parallel(source, { ordered: true })
source => parallel(source, {
ordered: true,
concurrency: options.blockReadConcurrency
})
)

@@ -68,0 +71,0 @@

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet