Socket
Socket
Sign inDemoInstall

cacache

Package Overview
Dependencies
Maintainers
6
Versions
102
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

cacache - npm Package Compare versions

Comparing version 15.1.0 to 15.2.0

56

get.js

@@ -35,3 +35,3 @@ 'use strict'

integrity: memoized.entry.integrity,
size: memoized.entry.size
size: memoized.entry.size,
}

@@ -42,8 +42,8 @@ )

(entry) => {
if (!entry && !byDigest) {
if (!entry && !byDigest)
throw new index.NotFoundError(cache, key)
}
return read(cache, byDigest ? key : entry.integrity, {
integrity,
size
size,
})

@@ -57,11 +57,11 @@ .then((data) =>

size: entry.size,
integrity: entry.integrity
integrity: entry.integrity,
}
)
.then((res) => {
if (memoize && byDigest) {
if (memoize && byDigest)
memo.put.byDigest(cache, key, res, opts)
} else if (memoize) {
else if (memoize)
memo.put(cache, entry, res.data, opts)
}
return res

@@ -92,12 +92,12 @@ })

integrity: memoized.entry.integrity,
size: memoized.entry.size
size: memoized.entry.size,
}
}
const entry = !byDigest && index.find.sync(cache, key, opts)
if (!entry && !byDigest) {
if (!entry && !byDigest)
throw new index.NotFoundError(cache, key)
}
const data = read.sync(cache, byDigest ? key : entry.integrity, {
integrity: integrity,
size: size
size: size,
})

@@ -110,9 +110,9 @@ const res = byDigest

size: entry.size,
integrity: entry.integrity
integrity: entry.integrity,
}
if (memoize && byDigest) {
if (memoize && byDigest)
memo.put.byDigest(cache, key, res, opts)
} else if (memoize) {
else if (memoize)
memo.put(cache, entry, res.data, opts)
}
return res

@@ -137,5 +137,4 @@ }

const memoized = memo.get(cache, key, opts)
if (memoized && memoize !== false) {
if (memoized && memoize !== false)
return getMemoizedStream(memoized)
}

@@ -146,5 +145,5 @@ const stream = new Pipeline()

.then((entry) => {
if (!entry) {
if (!entry)
throw new index.NotFoundError(cache, key)
}
stream.emit('metadata', entry.metadata)

@@ -188,5 +187,5 @@ stream.emit('integrity', entry.integrity)

const stream = read.readStream(cache, integrity, opts)
if (!memoize) {
if (!memoize)
return stream
}
const memoStream = new Collect.PassThrough()

@@ -208,7 +207,6 @@ memoStream.on('collect', data => memo.put.byDigest(

const memoized = memo.get(cache, key, opts)
if (memoized && memoize !== false) {
if (memoized && memoize !== false)
return Promise.resolve(memoized.entry)
} else {
else
return index.find(cache, key)
}
}

@@ -236,5 +234,5 @@

).then((entry) => {
if (!entry && !byDigest) {
if (!entry && !byDigest)
throw new index.NotFoundError(cache, key)
}
return read

@@ -248,3 +246,3 @@ .copy(cache, byDigest ? key : entry.integrity, dest, opts)

size: entry.size,
integrity: entry.integrity
integrity: entry.integrity,
}

@@ -262,3 +260,3 @@ })

size: res.size,
integrity: res.integrity
integrity: res.integrity,
}

@@ -265,0 +263,0 @@ })

@@ -23,13 +23,12 @@ 'use strict'

}).then(({ stat, cpath, sri }) => {
if (typeof size === 'number' && stat.size !== size) {
if (typeof size === 'number' && stat.size !== size)
throw sizeError(size, stat.size)
}
if (stat.size > MAX_SINGLE_READ_SIZE) {
if (stat.size > MAX_SINGLE_READ_SIZE)
return readPipeline(cpath, stat.size, sri, new Pipeline()).concat()
}
return readFile(cpath, null).then((data) => {
if (!ssri.checkData(data, sri)) {
if (!ssri.checkData(data, sri))
throw integrityError(sri, cpath)
}
return data

@@ -44,7 +43,7 @@ })

size,
readSize: MAX_SINGLE_READ_SIZE
readSize: MAX_SINGLE_READ_SIZE,
}),
ssri.integrityStream({
integrity: sri,
size
size,
})

@@ -61,9 +60,7 @@ )

const data = fs.readFileSync(cpath)
if (typeof size === 'number' && size !== data.length) {
if (typeof size === 'number' && size !== data.length)
throw sizeError(size, data.length)
}
if (ssri.checkData(data, sri)) {
if (ssri.checkData(data, sri))
return data
}

@@ -84,5 +81,5 @@ throw integrityError(sri, cpath)

}).then(({ stat, cpath, sri }) => {
if (typeof size === 'number' && size !== stat.size) {
if (typeof size === 'number' && size !== stat.size)
return stream.emit('error', sizeError(size, stat.size))
}
readPipeline(cpath, stat.size, sri, stream)

@@ -116,18 +113,17 @@ }, er => stream.emit('error', er))

function hasContent (cache, integrity) {
if (!integrity) {
if (!integrity)
return Promise.resolve(false)
}
return withContentSri(cache, integrity, (cpath, sri) => {
return lstat(cpath).then((stat) => ({ size: stat.size, sri, stat }))
}).catch((err) => {
if (err.code === 'ENOENT') {
if (err.code === 'ENOENT')
return false
}
if (err.code === 'EPERM') {
/* istanbul ignore else */
if (process.platform !== 'win32') {
if (process.platform !== 'win32')
throw err
} else {
else
return false
}
}

@@ -140,5 +136,5 @@ })

function hasContentSync (cache, integrity) {
if (!integrity) {
if (!integrity)
return false
}
return withContentSriSync(cache, integrity, (cpath, sri) => {

@@ -149,12 +145,11 @@ try {

} catch (err) {
if (err.code === 'ENOENT') {
if (err.code === 'ENOENT')
return false
}
if (err.code === 'EPERM') {
/* istanbul ignore else */
if (process.platform !== 'win32') {
if (process.platform !== 'win32')
throw err
} else {
else
return false
}
}

@@ -177,3 +172,4 @@ }

} else {
// Can't use race here because a generic error can happen before a ENOENT error, and can happen before a valid result
// Can't use race here because a generic error can happen before
// a ENOENT error, and can happen before a valid result
return Promise

@@ -195,11 +191,9 @@ .all(digests.map((meta) => {

const result = results.find((r) => !(r instanceof Error))
if (result) {
if (result)
return result
}
// Throw the No matching content found error
const enoentError = results.find((r) => r.code === 'ENOENT')
if (enoentError) {
if (enoentError)
throw enoentError
}

@@ -206,0 +200,0 @@ // Throw generic error

@@ -14,8 +14,7 @@ 'use strict'

// ~pretty~ sure we can't end up with a content lacking sri, but be safe
if (content && content.sri) {
if (content && content.sri)
return rimraf(contentPath(cache, content.sri)).then(() => true)
} else {
else
return false
}
})
}

@@ -25,12 +25,11 @@ 'use strict'

const { algorithms, size, integrity } = opts
if (algorithms && algorithms.length > 1) {
if (algorithms && algorithms.length > 1)
throw new Error('opts.algorithms only supports a single algorithm for now')
}
if (typeof size === 'number' && data.length !== size) {
if (typeof size === 'number' && data.length !== size)
return Promise.reject(sizeError(size, data.length))
}
const sri = ssri.fromData(data, algorithms ? { algorithms } : {})
if (integrity && !ssri.checkData(data, integrity, opts)) {
if (integrity && !ssri.checkData(data, integrity, opts))
return Promise.reject(checksumError(integrity, sri))
}

@@ -116,9 +115,13 @@ return disposer(makeTmp(cache, opts), makeTmpDisposer,

algorithms: opts.algorithms,
size: opts.size
size: opts.size,
})
hashStream.on('integrity', i => { integrity = i })
hashStream.on('size', s => { size = s })
hashStream.on('integrity', i => {
integrity = i
})
hashStream.on('size', s => {
size = s
})
const outStream = new fsm.WriteStream(tmpTarget, {
flags: 'wx'
flags: 'wx',
})

@@ -137,3 +140,5 @@

.then(() => ({ integrity, size }))
.catch(er => rimraf(tmpTarget).then(() => { throw er }))
.catch(er => rimraf(tmpTarget).then(() => {
throw er
}))
}

@@ -145,3 +150,3 @@

target: tmpTarget,
moved: false
moved: false,
}))

@@ -151,5 +156,5 @@ }

function makeTmpDisposer (tmp) {
if (tmp.moved) {
if (tmp.moved)
return Promise.resolve()
}
return rimraf(tmp.target)

@@ -156,0 +161,0 @@ }

@@ -17,3 +17,5 @@ 'use strict'

const moveFile = require('@npmcli/move-file')
const rimraf = util.promisify(require('rimraf'))
const _rimraf = require('rimraf')
const rimraf = util.promisify(_rimraf)
rimraf.sync = _rimraf.sync

@@ -39,11 +41,26 @@ const appendFile = util.promisify(fs.appendFile)

const entries = await bucketEntries(bucket)
// reduceRight because the bottom-most result is the newest
const newEntries = []
// we loop backwards because the bottom-most result is the newest
// since we add new entries with appendFile
const newEntries = entries.reduceRight((acc, newEntry) => {
if (!acc.find((oldEntry) => matchFn(oldEntry, newEntry))) {
acc.push(newEntry)
}
for (let i = entries.length - 1; i >= 0; --i) {
const entry = entries[i]
// a null integrity could mean either a delete was appended
// or the user has simply stored an index that does not map
// to any content. we determine if the user wants to keep the
// null integrity based on the validateEntry function passed in options.
// if the integrity is null and no validateEntry is provided, we break
// as we consider the null integrity to be a deletion of everything
// that came before it.
if (entry.integrity === null && !opts.validateEntry)
break
return acc
}, [])
// if this entry is valid, and it is either the first entry or
// the newEntries array doesn't already include an entry that
// matches this one based on the provided matchFn, then we add
// it to the beginning of our list
if ((!opts.validateEntry || opts.validateEntry(entry) === true) &&
(newEntries.length === 0 ||
!newEntries.find((oldEntry) => matchFn(oldEntry, entry))))
newEntries.unshift(entry)
}

@@ -61,3 +78,3 @@ const newIndex = '\n' + newEntries.map((entry) => {

target,
moved: false
moved: false,
}

@@ -67,5 +84,4 @@ }

const teardown = async (tmp) => {
if (!tmp.moved) {
if (!tmp.moved)
return rimraf(tmp.target)
}
}

@@ -83,5 +99,4 @@

} catch (err) {
if (err.code !== 'ENOENT') {
if (err.code !== 'ENOENT')
throw err
}
}

@@ -93,3 +108,8 @@ }

return newEntries.map((entry) => formatEntry(cache, entry, true))
// we reverse the list we generated such that the newest
// entries come first in order to make looping through them easier
// the true passed to formatEntry tells it to keep null
// integrity values, if they made it this far it's because
// validateEntry returned true, and as such we should return it
return newEntries.reverse().map((entry) => formatEntry(cache, entry, true))
}

@@ -107,3 +127,3 @@

size,
metadata
metadata,
}

@@ -120,3 +140,4 @@ return fixOwner

//
// Thanks to @isaacs for the whiteboarding session that ended up with this.
// Thanks to @isaacs for the whiteboarding session that ended up with
// this.
return appendFile(bucket, `\n${hashEntry(stringified)}\t${stringified}`)

@@ -126,5 +147,5 @@ })

.catch((err) => {
if (err.code === 'ENOENT') {
if (err.code === 'ENOENT')
return undefined
}
throw err

@@ -152,3 +173,3 @@ // There's a class of race conditions that happen when things get deleted

size,
metadata
metadata,
}

@@ -161,5 +182,4 @@ fixOwner.mkdirfix.sync(cache, path.dirname(bucket))

} catch (err) {
if (err.code !== 'ENOENT') {
if (err.code !== 'ENOENT')
throw err
}
}

@@ -176,15 +196,13 @@ return formatEntry(cache, entry)

return entries.reduce((latest, next) => {
if (next && next.key === key) {
if (next && next.key === key)
return formatEntry(cache, next)
} else {
else
return latest
}
}, null)
})
.catch((err) => {
if (err.code === 'ENOENT') {
if (err.code === 'ENOENT')
return null
} else {
else
throw err
}
})

@@ -199,14 +217,12 @@ }

return bucketEntriesSync(bucket).reduce((latest, next) => {
if (next && next.key === key) {
if (next && next.key === key)
return formatEntry(cache, next)
} else {
else
return latest
}
}, null)
} catch (err) {
if (err.code === 'ENOENT') {
if (err.code === 'ENOENT')
return null
} else {
else
throw err
}
}

@@ -217,4 +233,8 @@ }

function del (cache, key, opts) {
return insert(cache, key, null, opts)
function del (cache, key, opts = {}) {
if (!opts.removeFully)
return insert(cache, key, null, opts)
const bucket = bucketPath(cache, key)
return rimraf(bucket)
}

@@ -224,4 +244,8 @@

function delSync (cache, key, opts) {
return insertSync(cache, key, null, opts)
function delSync (cache, key, opts = {}) {
if (!opts.removeFully)
return insertSync(cache, key, null, opts)
const bucket = bucketPath(cache, key)
return rimraf.sync(bucket)
}

@@ -257,8 +281,8 @@

const formatted = formatEntry(cache, entry)
if (formatted) {
if (formatted)
stream.write(formatted)
}
}
}).catch(err => {
if (err.code === 'ENOENT') { return undefined }
if (err.code === 'ENOENT')
return undefined
throw err

@@ -307,5 +331,5 @@ })

data.split('\n').forEach((entry) => {
if (!entry) {
if (!entry)
return
}
const pieces = entry.split('\t')

@@ -324,5 +348,4 @@ if (!pieces[1] || hashEntry(pieces[1]) !== pieces[0]) {

}
if (obj) {
if (obj)
entries.push(obj)
}
})

@@ -369,5 +392,5 @@ return entries

// Treat null digests as deletions. They'll shadow any previous entries.
if (!entry.integrity && !keepAll) {
if (!entry.integrity && !keepAll)
return null
}
return {

@@ -379,3 +402,3 @@ key: entry.key,

time: entry.time,
metadata: entry.metadata
metadata: entry.metadata,
}

@@ -386,5 +409,4 @@ }

return readdir(dir).catch((err) => {
if (err.code === 'ENOENT' || err.code === 'ENOTDIR') {
if (err.code === 'ENOENT' || err.code === 'ENOTDIR')
return []
}

@@ -391,0 +413,0 @@ throw err

@@ -11,3 +11,3 @@ 'use strict'

maxAge: MAX_AGE,
length: (entry, key) => key.startsWith('key:') ? entry.data.length : entry.length
length: (entry, key) => key.startsWith('key:') ? entry.data.length : entry.length,
})

@@ -66,11 +66,10 @@

function pickMem (opts) {
if (!opts || !opts.memoize) {
if (!opts || !opts.memoize)
return MEMOIZED
} else if (opts.memoize.get && opts.memoize.set) {
else if (opts.memoize.get && opts.memoize.set)
return opts.memoize
} else if (typeof opts.memoize === 'object') {
else if (typeof opts.memoize === 'object')
return new ObjProxy(opts.memoize)
} else {
else
return MEMOIZED
}
}

@@ -11,5 +11,5 @@ 'use strict'

() => {
if (shouldThrow) {
if (shouldThrow)
throw result
}
return result

@@ -16,0 +16,0 @@ },

@@ -52,5 +52,4 @@ 'use strict'

// No need to override if it's already what we used.
if (self.uid === uid && self.gid === gid) {
if (self.uid === uid && self.gid === gid)
return
}

@@ -63,5 +62,5 @@ return inflight('fixOwner: fixing ownership on ' + filepath, () =>

).catch((err) => {
if (err.code === 'ENOENT') {
if (err.code === 'ENOENT')
return null
}
throw err

@@ -99,5 +98,5 @@ })

// only catch ENOENT, any other error is a problem.
if (err.code === 'ENOENT') {
if (err.code === 'ENOENT')
return null
}
throw err

@@ -117,10 +116,9 @@ }

.then((made) => {
if (made) {
if (made)
return fixOwner(cache, made).then(() => made)
}
})
.catch((err) => {
if (err.code === 'EEXIST') {
if (err.code === 'EEXIST')
return fixOwner(cache, p).then(() => null)
}
throw err

@@ -145,6 +143,5 @@ })

return null
} else {
} else
throw err
}
}
}

@@ -41,8 +41,6 @@ 'use strict'

return resolve()
} else {
} else
return reject(err)
}
} else {
} else
return resolve()
}
})

@@ -54,3 +52,3 @@ })

unlink(src),
!isWindows && chmod(dest, '0444')
!isWindows && chmod(dest, '0444'),
])

@@ -57,0 +55,0 @@ })

@@ -27,3 +27,3 @@ 'use strict'

log: { silly () {} },
...opts
...opts,
})

@@ -44,3 +44,3 @@

writeVerifile,
markEndTime
markEndTime,
]

@@ -59,5 +59,5 @@

const end = new Date()
if (!stats.runTime) {
if (!stats.runTime)
stats.runTime = {}
}
stats.runTime[label] = end - start

@@ -114,5 +114,5 @@ return Promise.resolve(stats)

indexStream.on('data', (entry) => {
if (opts.filter && !opts.filter(entry)) {
if (opts.filter && !opts.filter(entry))
return
}
liveContent.add(entry.integrity.toString())

@@ -127,3 +127,3 @@ })

nodir: true,
nosort: true
nosort: true,
}).then((files) => {

@@ -135,3 +135,3 @@ return Promise.resolve({

badContentCount: 0,
keptSize: 0
keptSize: 0,
}).then((stats) =>

@@ -180,3 +180,3 @@ pMap(

size: s.size,
valid: true
valid: true,
}

@@ -186,5 +186,5 @@ return ssri

.catch((err) => {
if (err.code !== 'EINTEGRITY') {
if (err.code !== 'EINTEGRITY')
throw err
}
return rimraf(filepath).then(() => {

@@ -197,5 +197,5 @@ contentInfo.valid = false

.catch((err) => {
if (err.code === 'ENOENT') {
if (err.code === 'ENOENT')
return { size: 0, valid: false }
}
throw err

@@ -211,3 +211,3 @@ })

rejectedEntries: 0,
totalEntries: 0
totalEntries: 0,
}

@@ -222,5 +222,5 @@ const buckets = {}

excluded && stats.rejectedEntries++
if (buckets[hashed] && !excluded) {
if (buckets[hashed] && !excluded)
buckets[hashed].push(entry)
} else if (buckets[hashed] && excluded) {
else if (buckets[hashed] && excluded) {
// skip

@@ -258,3 +258,3 @@ } else if (excluded) {

metadata: entry.metadata,
size: entry.size
size: entry.size,
})

@@ -261,0 +261,0 @@ .then(() => {

{
"name": "cacache",
"version": "15.1.0",
"version": "15.2.0",
"cache-version": {

@@ -16,11 +16,13 @@ "content": "2",

"benchmarks": "node test/benchmarks",
"lint": "standard",
"postrelease": "npm publish",
"posttest": "npm run lint",
"prepublishOnly": "git push --follow-tags",
"prerelease": "npm t",
"release": "standard-version -s",
"preversion": "npm test",
"postversion": "npm publish",
"prepublishOnly": "git push origin --follow-tags",
"test": "tap",
"snap": "tap",
"coverage": "tap",
"test-docker": "docker run -it --rm --name pacotest -v \"$PWD\":/tmp -w /tmp node:latest npm test"
"test-docker": "docker run -it --rm --name pacotest -v \"$PWD\":/tmp -w /tmp node:latest npm test",
"lint": "npm run npmclilint -- \"*.*js\" \"lib/**/*.*js\" \"test/**/*.*js\"",
"npmclilint": "npmcli-lint",
"lintfix": "npm run lint -- --fix",
"postsnap": "npm run lintfix --"
},

@@ -43,19 +45,2 @@ "repository": "https://github.com/npm/cacache",

],
"author": {
"name": "Kat MarchΓ‘n",
"email": "kzm@sykosomatic.org",
"twitter": "maybekatz"
},
"contributors": [
{
"name": "Charlotte Spencer",
"email": "charlottelaspencer@gmail.com",
"twitter": "charlotteis"
},
{
"name": "Rebecca Turner",
"email": "me@re-becca.org",
"twitter": "ReBeccaOrg"
}
],
"license": "ISC",

@@ -82,7 +67,6 @@ "dependencies": {

"devDependencies": {
"@npmcli/lint": "^1.0.1",
"benchmark": "^2.1.4",
"chalk": "^4.0.0",
"require-inject": "^1.4.4",
"standard": "^14.3.1",
"standard-version": "^7.1.0",
"tacks": "^1.3.0",

@@ -89,0 +73,0 @@ "tap": "^15.0.9"

@@ -12,3 +12,3 @@ 'use strict'

algorithms: ['sha512'],
...opts
...opts,
})

@@ -25,5 +25,5 @@

.then((entry) => {
if (memoize) {
if (memoize)
memo.put(cache, entry, data, opts)
}
return res.integrity

@@ -72,13 +72,12 @@ })

.then((entry) => {
if (memoize && memoData) {
if (memoize && memoData)
memo.put(cache, entry, memoData, opts)
}
if (integrity) {
if (integrity)
pipeline.emit('integrity', integrity)
}
if (size) {
if (size)
pipeline.emit('size', size)
}
})
}
},
}))

@@ -85,0 +84,0 @@

@@ -461,3 +461,3 @@ # cacache [![npm version](https://img.shields.io/npm/v/cacache.svg)](https://npm.im/cacache) [![license](https://img.shields.io/npm/l/cacache.svg)](https://npm.im/cacache) [![Travis](https://img.shields.io/travis/npm/cacache.svg)](https://travis-ci.org/npm/cacache) [![AppVeyor](https://ci.appveyor.com/api/projects/status/github/npm/cacache?svg=true)](https://ci.appveyor.com/project/npm/cacache) [![Coverage Status](https://coveralls.io/repos/github/npm/cacache/badge.svg?branch=latest)](https://coveralls.io/github/npm/cacache?branch=latest)

#### <a name="rm-entry"></a> `> cacache.rm.entry(cache, key) -> Promise`
#### <a name="rm-entry"></a> `> cacache.rm.entry(cache, key, [opts]) -> Promise`

@@ -469,2 +469,6 @@ Alias: `cacache.rm`

By default, this appends a new entry to the index with an integrity of `null`.
If `opts.removeFully` is set to `true` then the index file itself will be
physically deleted rather than appending a `null`.
To remove the content itself (which might still be used by other entries), use

@@ -496,3 +500,3 @@ [`rm.content`](#rm-content). Or, to safely vacuum any unused content, use

#### <a name="index-compact"></a> `> cacache.index.compact(cache, key, matchFn) -> Promise`
#### <a name="index-compact"></a> `> cacache.index.compact(cache, key, matchFn, [opts]) -> Promise`

@@ -503,2 +507,11 @@ Uses `matchFn`, which must be a synchronous function that accepts two entries

If `opts.validateEntry` is provided, it will be called as a function with the
only parameter being a single index entry. The function must return a Boolean,
if it returns `true` the entry is considered valid and will be kept in the index,
if it returns `false` the entry will be removed from the index.
If `opts.validateEntry` is not provided, however, every entry in the index will
be deduplicated and kept until the first `null` integrity is reached, removing
all entries that were written before the `null`.
The deduplicated list of entries is both written to the index, replacing the

@@ -505,0 +518,0 @@ existing content, and returned in the Promise.

@@ -14,5 +14,5 @@ 'use strict'

function entry (cache, key) {
function entry (cache, key, opts) {
memo.clearMemoized()
return index.delete(cache, key)
return index.delete(cache, key, opts)
}

@@ -19,0 +19,0 @@

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚑️ by Socket Inc