@parcel/cache
Advanced tools
Comparing version 2.0.0-canary.1608 to 2.0.0-canary.1609
@@ -48,5 +48,10 @@ "use strict"; | ||
class FSCache { | ||
constructor(fs, cacheDir) { | ||
/** | ||
* Maximum number of bytes per large blob file | ||
*/ | ||
#writeLimitChunk; | ||
constructor(fs, cacheDir, writeLimitChunk = _constants.WRITE_LIMIT_CHUNK) { | ||
this.fs = fs; | ||
this.dir = cacheDir; | ||
this.#writeLimitChunk = writeLimitChunk; | ||
} | ||
@@ -117,3 +122,3 @@ async ensure() { | ||
async setLargeBlob(key, contents, options) { | ||
const chunks = Math.ceil(contents.length / _constants.WRITE_LIMIT_CHUNK); | ||
const chunks = Math.ceil(contents.length / this.#writeLimitChunk); | ||
const writePromises = []; | ||
@@ -127,3 +132,3 @@ if (chunks === 1) { | ||
for (let i = 0; i < chunks; i += 1) { | ||
writePromises.push(this.fs.writeFile(this.#getFilePath(key, i), typeof contents === 'string' ? contents.slice(i * _constants.WRITE_LIMIT_CHUNK, (i + 1) * _constants.WRITE_LIMIT_CHUNK) : contents.subarray(i * _constants.WRITE_LIMIT_CHUNK, (i + 1) * _constants.WRITE_LIMIT_CHUNK), { | ||
writePromises.push(this.fs.writeFile(this.#getFilePath(key, i), typeof contents === 'string' ? contents.slice(i * this.#writeLimitChunk, (i + 1) * this.#writeLimitChunk) : contents.subarray(i * this.#writeLimitChunk, (i + 1) * this.#writeLimitChunk), { | ||
signal: options === null || options === void 0 ? void 0 : options.signal | ||
@@ -130,0 +135,0 @@ })); |
@@ -7,2 +7,9 @@ "use strict"; | ||
exports.LMDBCache = void 0; | ||
function _crypto() { | ||
const data = _interopRequireDefault(require("crypto")); | ||
_crypto = function () { | ||
return data; | ||
}; | ||
return data; | ||
} | ||
function _stream() { | ||
@@ -54,4 +61,8 @@ const data = _interopRequireDefault(require("stream")); | ||
// flowlint-next-line untyped-import:off | ||
// $FlowFixMe | ||
const pipeline = (0, _util().promisify)(_stream().default.pipeline); | ||
/** | ||
* See `LMDBCache::setLargeBlob` | ||
*/ | ||
class LMDBCache { | ||
@@ -113,17 +124,52 @@ // $FlowFixMe | ||
} | ||
hasLargeBlob(key) { | ||
return this.fs.exists(this.#getFilePath(key, 0)); | ||
async hasLargeBlob(key) { | ||
if (!(await this.has(key))) { | ||
return false; | ||
} | ||
const entry = await this.get(key); | ||
if ((entry === null || entry === void 0 ? void 0 : entry.type) !== 'LARGE_BLOB') { | ||
return false; | ||
} | ||
return this.fsCache.hasLargeBlob(entry.largeBlobKey); | ||
} | ||
// eslint-disable-next-line require-await | ||
async getLargeBlob(key) { | ||
return this.fsCache.getLargeBlob(key); | ||
if (!(await this.has(key))) { | ||
throw new Error(`No large blob entry found for key=${key}`); | ||
} | ||
const entry = await this.get(key); | ||
if ((entry === null || entry === void 0 ? void 0 : entry.type) !== 'LARGE_BLOB') { | ||
throw new Error(`Invalid entry at large blob key=${key}`); | ||
} | ||
return this.fsCache.getLargeBlob(entry.largeBlobKey); | ||
} | ||
// eslint-disable-next-line require-await | ||
/** | ||
* Set large blob into LMDB. | ||
* This stores large blobs as files on a delegate FSCache, | ||
* but uses an LMDB entry to provide transactional behaviour. | ||
* | ||
* On its own the FSCache implementation is not transactional and | ||
* may result in corrupted caches. Furthermore, it may result in | ||
* partially written or read caches, where we are concatenating bytes | ||
* from different cache writes. | ||
*/ | ||
async setLargeBlob(key, contents, options) { | ||
return this.fsCache.setLargeBlob(key, contents, options); | ||
// $FlowFixMe flow libs are outdated but we only support node>16 so randomUUID is present | ||
const largeBlobKey = `${key}_${_crypto().default.randomUUID()}`; | ||
await this.fsCache.setLargeBlob(largeBlobKey, contents, options); | ||
await this.set(key, { | ||
type: 'LARGE_BLOB', | ||
largeBlobKey | ||
}); | ||
} | ||
deleteLargeBlob(key) { | ||
return this.fsCache.deleteLargeBlob(key); | ||
async deleteLargeBlob(key) { | ||
if (!(await this.has(key))) { | ||
return; | ||
} | ||
const entry = await this.get(key); | ||
if ((entry === null || entry === void 0 ? void 0 : entry.type) !== 'LARGE_BLOB') { | ||
return; | ||
} | ||
await this.store.remove(key); | ||
return this.fsCache.deleteLargeBlob(entry.largeBlobKey); | ||
} | ||
@@ -130,0 +176,0 @@ refresh() { |
{ | ||
"name": "@parcel/cache", | ||
"description": "Interface for defining caches and file-system, IDB and LMDB implementations.", | ||
"version": "2.0.0-canary.1608+e1d3cbe91", | ||
"version": "2.0.0-canary.1609+d988abeb1", | ||
"license": "MIT", | ||
@@ -28,9 +28,9 @@ "publishConfig": { | ||
"dependencies": { | ||
"@parcel/fs": "2.0.0-canary.1608+e1d3cbe91", | ||
"@parcel/logger": "2.0.0-canary.1608+e1d3cbe91", | ||
"@parcel/utils": "2.0.0-canary.1608+e1d3cbe91", | ||
"@parcel/fs": "2.0.0-canary.1609+d988abeb1", | ||
"@parcel/logger": "2.0.0-canary.1609+d988abeb1", | ||
"@parcel/utils": "2.0.0-canary.1609+d988abeb1", | ||
"lmdb": "2.8.5" | ||
}, | ||
"peerDependencies": { | ||
"@parcel/core": "^2.0.0-canary.1606+e1d3cbe91" | ||
"@parcel/core": "^2.0.0-canary.1607+d988abeb1" | ||
}, | ||
@@ -44,3 +44,3 @@ "devDependencies": { | ||
}, | ||
"gitHead": "e1d3cbe910e746380822e9b5a8a18c9574e6925c" | ||
"gitHead": "d988abeb1c666ad2649d4c2b43a108eba0d7a226" | ||
} |
@@ -24,6 +24,15 @@ // @flow strict-local | ||
dir: FilePath; | ||
/** | ||
* Maximum number of bytes per large blob file | ||
*/ | ||
#writeLimitChunk: number; | ||
constructor(fs: FileSystem, cacheDir: FilePath) { | ||
constructor( | ||
fs: FileSystem, | ||
cacheDir: FilePath, | ||
writeLimitChunk: number = WRITE_LIMIT_CHUNK, | ||
) { | ||
this.fs = fs; | ||
this.dir = cacheDir; | ||
this.#writeLimitChunk = writeLimitChunk; | ||
} | ||
@@ -119,3 +128,3 @@ | ||
): Promise<void> { | ||
const chunks = Math.ceil(contents.length / WRITE_LIMIT_CHUNK); | ||
const chunks = Math.ceil(contents.length / this.#writeLimitChunk); | ||
@@ -137,8 +146,8 @@ const writePromises: Promise<void>[] = []; | ||
? contents.slice( | ||
i * WRITE_LIMIT_CHUNK, | ||
(i + 1) * WRITE_LIMIT_CHUNK, | ||
i * this.#writeLimitChunk, | ||
(i + 1) * this.#writeLimitChunk, | ||
) | ||
: contents.subarray( | ||
i * WRITE_LIMIT_CHUNK, | ||
(i + 1) * WRITE_LIMIT_CHUNK, | ||
i * this.#writeLimitChunk, | ||
(i + 1) * this.#writeLimitChunk, | ||
), | ||
@@ -145,0 +154,0 @@ {signal: options?.signal}, |
// @flow strict-local | ||
import crypto from 'crypto'; | ||
import type {FilePath} from '@parcel/types'; | ||
@@ -13,3 +14,2 @@ import type {Cache} from './types'; | ||
import packageJson from '../package.json'; | ||
// $FlowFixMe | ||
import lmdb from 'lmdb'; | ||
@@ -23,2 +23,7 @@ | ||
/** | ||
* See `LMDBCache::setLargeBlob` | ||
*/ | ||
type LargeBlobEntry = {|type: 'LARGE_BLOB', largeBlobKey: string|}; | ||
export class LMDBCache implements Cache { | ||
@@ -104,12 +109,34 @@ fs: NodeFS; | ||
hasLargeBlob(key: string): Promise<boolean> { | ||
return this.fs.exists(this.#getFilePath(key, 0)); | ||
async hasLargeBlob(key: string): Promise<boolean> { | ||
if (!(await this.has(key))) { | ||
return false; | ||
} | ||
const entry = await this.get<LargeBlobEntry>(key); | ||
if (entry?.type !== 'LARGE_BLOB') { | ||
return false; | ||
} | ||
return this.fsCache.hasLargeBlob(entry.largeBlobKey); | ||
} | ||
// eslint-disable-next-line require-await | ||
async getLargeBlob(key: string): Promise<Buffer> { | ||
return this.fsCache.getLargeBlob(key); | ||
if (!(await this.has(key))) { | ||
throw new Error(`No large blob entry found for key=${key}`); | ||
} | ||
const entry = await this.get<LargeBlobEntry>(key); | ||
if (entry?.type !== 'LARGE_BLOB') { | ||
throw new Error(`Invalid entry at large blob key=${key}`); | ||
} | ||
return this.fsCache.getLargeBlob(entry.largeBlobKey); | ||
} | ||
// eslint-disable-next-line require-await | ||
/** | ||
* Set large blob into LMDB. | ||
* This stores large blobs as files on a delegate FSCache, | ||
* but uses an LMDB entry to provide transactional behaviour. | ||
* | ||
* On its own the FSCache implementation is not transactional and | ||
* may result in corrupted caches. Furthermore, it may result in | ||
* partially written or read caches, where we are concatenating bytes | ||
* from different cache writes. | ||
*/ | ||
async setLargeBlob( | ||
@@ -120,7 +147,19 @@ key: string, | ||
): Promise<void> { | ||
return this.fsCache.setLargeBlob(key, contents, options); | ||
// $FlowFixMe flow libs are outdated but we only support node>16 so randomUUID is present | ||
const largeBlobKey = `${key}_${crypto.randomUUID()}`; | ||
await this.fsCache.setLargeBlob(largeBlobKey, contents, options); | ||
const entry: LargeBlobEntry = {type: 'LARGE_BLOB', largeBlobKey}; | ||
await this.set(key, entry); | ||
} | ||
deleteLargeBlob(key: string): Promise<void> { | ||
return this.fsCache.deleteLargeBlob(key); | ||
async deleteLargeBlob(key: string): Promise<void> { | ||
if (!(await this.has(key))) { | ||
return; | ||
} | ||
const entry = await this.get<LargeBlobEntry>(key); | ||
if (entry?.type !== 'LARGE_BLOB') { | ||
return; | ||
} | ||
await this.store.remove(key); | ||
return this.fsCache.deleteLargeBlob(entry.largeBlobKey); | ||
} | ||
@@ -127,0 +166,0 @@ |
Manifest confusion
Supply chain riskThis package has inconsistent metadata. This could be malicious or caused by an error when publishing the package.
Found 1 instance in 1 package
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 1 instance in 1 package
Manifest confusion
Supply chain riskThis package has inconsistent metadata. This could be malicious or caused by an error when publishing the package.
Found 1 instance in 1 package
38945
20
1174
1