Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

block-cache

Package Overview
Dependencies
Maintainers
1
Versions
2
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

block-cache - npm Package Compare versions

Comparing version 1.0.0 to 2.0.0

169

Cache.js
'use strict'
const err = require('./lib/err')
const megaByte10 = (10 * 1024 * 1024)
const createLRUCache = require('lru-cache')
const CachedFile = require('./CachedFile')
const promisifyAsync = require('./lib/promisifyAsync')
const mem = require('./lib/memorizeAsync')
const DEFAULT_CACHE_SIZE = (10 * 1024 * 1024)
module.exports = class Cache {
constructor (fs, opts) {
function disconnected () {
return err('ERR_DISCONNECTED', 'The filesystem has been disconnected')
}
function sendDisconnected (cb) {
setImmediate(() => cb(disconnected()))
}
const DISCONNECTED_FS = {
open (path, opts, cb) {
sendDisconnected(cb)
},
stat (path, cb) {
sendDisconnected(cb)
},
close (path, cb) {
sendDisconnected(cb)
},
read (fp, buffer, position, size, start, cb) {
sendDisconnected(cb)
}
}
// Keeps track of all open file pointers
class FpMemory {
constructor (fs) {
this.fs = fs
this.opened = []
this.allClosed = false
}
open (path, opts, cb) {
this.fs.open(path, opts, (err, fp) => {
if (err) return cb(err)
if (this.allClosed) {
// In case closeAll happened between
// fs.open and the return, close the fp again
// and err as disconnected
return this.fs.close(fp, () => cb(disconnected()))
}
this.opened.push(fp)
cb(null, fp)
})
}
close (fp, cb) {
const index = this.opened.indexOf(fp)
if (index !== -1) {
this.opened.splice(index, 1)
}
this.fs.close(fp, cb)
}
closeAll (cb) {
this.allClosed = true
let returned = 0
let errors = null
if (this.opened.length === 0) {
return cb(null)
}
this.opened.forEach(fp => this.fs.close(fp, err => {
returned += 1
if (err) {
if (errors === null) errors = []
errors.push(err)
}
if (returned === this.opened.length) {
cb(errors)
}
}))
}
}
function wrapFs (fs, cacheOpts) {
let fpMemory = new FpMemory(fs)
return {
open: (path, opts, cb) => fpMemory.open(path, opts, cb),
close: (fp, cb) => fpMemory.close(fp, cb),
disconnect: mem.promise(cb => {
const _fpMemory = fpMemory
fpMemory = DISCONNECTED_FS
fs = DISCONNECTED_FS
_fpMemory.closeAll(cb)
}),
stat: (path, cb) => fs.stat(path, cb),
read (fp, prefix, start, end, cb) {
const key = `${cacheOpts.prefix}${prefix}${start}:${end}`
const cached = cacheOpts.cache.get(key)
if (cached) return cb(null, cached)
const size = end - start
const buffer = Buffer.allocUnsafe(size)
fs.read(fp, buffer, 0, size, start, err => {
if (err) return cb(err)
cacheOpts.cache.set(key, buffer)
cb(null, buffer)
})
}
}
}
class Cache {
constructor (fs, cacheOpts) {
if (!fs) throw err('ERR_INVALID_ARG_TYPE', 'fs option required, this package doesnt assume which fs you want to use, see: hyperdrive')
opts = Object.assign({
cacheSize: megaByte10
}, opts)
cacheOpts = Object.assign({
cacheSize: DEFAULT_CACHE_SIZE,
prefix: ''
}, cacheOpts)
if (!opts.cache) {
opts.cache = createLRUCache({
max: opts.cacheSize,
if (!cacheOpts.cache) {
cacheOpts.cache = createLRUCache({
max: cacheOpts.cacheSize,
length: buf => buf.length

@@ -23,4 +122,25 @@ })

this.fs = fs
this.opts = opts
const internal = wrapFs(fs, cacheOpts)
Object.defineProperties(this, {
_readCached: {
value: internal.read,
enumerable: false
},
disconnect: {
value: internal.disconnect
},
openSync: {
value: (path, fileOpts) => {
if (!path) throw err('ERR_INVALID_ARG_TYPE', 'path required')
fileOpts = Object.assign({
blkSize: cacheOpts.blkSize
}, fileOpts)
const file = new CachedFile(internal, path, fileOpts)
Object.freeze(file)
return file
},
enumerable: false
}
})
}

@@ -35,7 +155,2 @@

openSync (path, opts) {
if (!path) throw err('ERR_INVALID_ARG_TYPE', 'path required')
return new CachedFile(this, path, opts)
}
close (fd, cb) {

@@ -48,5 +163,3 @@ return fd.close(cb)

const stream = fp.createReadStream(opts)
stream.on('end', () => {
fp.close()
})
stream.on('end', () => fp.close())
return stream

@@ -58,17 +171,5 @@ }

}
}
Cache.DEFAULT_CACHE_SIZE = DEFAULT_CACHE_SIZE
_readCached (fd, prefix, start, end, cb) {
const key = `${prefix}${start}:${end}`
const cached = this.opts.cache.get(key)
if (cached) {
return cb(null, cached)
}
const size = end - start
const buffer = Buffer.allocUnsafe(size)
this.fs.read(fd, buffer, 0, size, start, err => {
if (err) return cb(err)
this.opts.cache.set(key, buffer)
cb(null, buffer)
})
}
}
module.exports = Cache

@@ -17,73 +17,91 @@ 'use strict'

function getSafeSize (start, end, size) {
let safeStart = start
if (start === -1 || start === undefined || start === null) {
safeStart = 0
function setterGetter (initial) {
let value = initial
return {
get: () => value,
set: newValue => { value = newValue }
}
let safeEnd = end
if (end === -1 || end === undefined || end === null) {
safeEnd = size
}
function property (value) {
return { value }
}
function trimBlock (data, index, range, block) {
let rightCut = block.size
let leftCut = 0
if (index === range.lastIndex) {
rightCut = block.size - (block.end - range.end)
}
return {start: safeStart, end: safeEnd, size: safeEnd - safeStart}
if (index === range.firstIndex) {
leftCut = range.start - block.start
}
if (leftCut > 0 || rightCut < block.size) {
// TODO: Data.slice creates a new Buffer, which is unnecessary
// for `read` but neccesseary for `readStream` maybe can be split?
data = data.slice(leftCut, rightCut)
}
return data
}
class CachedFile {
constructor (fsCache, path, opts) {
this.fsCache = fsCache
this.path = path
this.position = 0
this._reading = 0
this.blkSize = (opts && opts.blkSize) || fsCache.opts.blkSize || CachedFile.DEFAULT_BLK_SIZE
this.fd = mem.promise(cb => fsCache.fs.open(path, 'r', cb))
this.stat = mem.promise(cb => fsCache.fs.stat(path, cb))
this.prefix = mem.promise(cb => this.stat((err, stat) => {
if (err) return cb(err)
cb(null, `${path}:${stat.mtime.getTime().toString(32)}:`)
}))
this.size = mem.promise(cb => this.stat((err, stat) => {
if (err) return cb(err)
cb(null, sizeForStat(stat))
}))
this._fdSize = mem.props({
fd: this.fd,
size: this.size,
prefix: this.prefix
constructor (internal, path, opts) {
let isClosed = false
let closeCb
let reading = 0
const init = mem.props({
fp: cb => internal.open(path, 'r', cb),
prefix: cb => this.stat((err, stat) => {
if (err) return cb(err)
cb(null, `${path}:${stat.mtime.getTime().toString(32)}:`)
})
})
this.close = mem.promise(cb =>
this.fd((err, fd) => {
Object.defineProperties(this, {
_isClosed: { get: () => isClosed },
position: setterGetter(0),
blkSize: property((opts && opts.blkSize) || CachedFile.DEFAULT_BLK_SIZE),
close: mem.property(cb => {
isClosed = true
init((err, parts) => {
if (err) return cb(err)
closeCb = () => internal.close(parts.fp, cb)
if (reading === 0) {
closeCb()
}
})
}),
stat: mem.property(cb => internal.stat(path, cb)),
size: mem.property(cb => this.stat((err, stat) => {
if (err) return cb(err)
const noReader = () => {
this.fsCache.fs.close(fd, cb)
}
const check = () => {
if (this._reading === 0) {
this._closed = () => {}
noReader()
}
}
this._closed = check
check()
})
)
cb(null, sizeForStat(stat))
})),
_readCached: property((block, cb) => init((error, parts) => {
if (error) return cb(error)
if (isClosed) return cb(err('ERR_CLOSED', `File pointer has been closed.`))
reading++
internal.read(parts.fp, parts.prefix, block.start, block.end, (err, data) => {
reading--
if (closeCb !== undefined && reading === 0) closeCb()
cb(err, data)
})
}))
})
}
getRange (rangeIndex, size) {
let rangeStart = rangeIndex * this.blkSize
let rangeEnd = rangeStart + this.blkSize
let rangeSize = this.blkSize
if (rangeEnd > size) {
rangeEnd = size
rangeSize = size - rangeStart
_getBlock (blkIndex, total) {
let size = this.blkSize
let start = blkIndex * size
let end = start + size
if (end > total) {
end = total
size = total - start
}
return {rangeStart, rangeEnd, rangeSize}
return {start, end, size}
}
_readRange (start, end, process, cb) {
this._fdSize((error, fdSize) => {
_getSafeRange (start, end, cb) {
this.size((error, size) => {
if (error) return cb(error)
const size = fdSize.size
const fd = fdSize.fd
const prefix = fdSize.prefix
if (start < 0 || end > size) {
return cb(err('ERR_RANGE', `Invalid Range: ${start}:${end} of '${this.path}' (size: ${size})`))
return cb(err('ERR_RANGE', `Invalid Range: ${start}:${end} (size: ${size})`))
}

@@ -93,37 +111,34 @@ if (end !== null && end !== undefined && end < start) {

}
const safe = getSafeSize(start, end, size)
if (safe.size === 0) {
return cb(null, Buffer.allocUnsafe(0), 0, safe)
if (start === -1 || start === undefined || start === null) {
start = 0
}
const firstIndex = safe.start / this.blkSize | 0
const lastIndex = (safe.end - 1) / this.blkSize | 0
if (end === -1 || end === undefined || end === null) {
end = size
}
cb(null, {
start,
end,
size: end - start,
total: size,
firstIndex: start / this.blkSize | 0,
lastIndex: (end - 1) / this.blkSize | 0
})
})
}
_readRange (start, end, process, cb) {
this._getSafeRange(start, end, (error, range) => {
if (error) return cb(error)
if (range.total === 0) {
return cb(null, Buffer.allocUnsafe(0), 0)
}
const nextRange = index => {
const range = this.getRange(index, size)
const rangeEnd = range.rangeEnd
const rangeStart = range.rangeStart
let rangeSize = range.rangeSize
if (this._closed !== undefined) return cb(err('ERR_CLOSED', `File pointer has been closed.`))
this._reading++
this.fsCache._readCached(fd, prefix, rangeStart, rangeEnd, (err, data) => {
this._reading--
if (this._closed !== undefined) this._closed()
const block = this._getBlock(index, range.total)
this._readCached(block, (err, data) => {
if (err) return cb(err)
let rightCut = rangeSize
let leftCut = 0
if (index === lastIndex) {
rightCut = rangeSize - (rangeEnd - safe.end)
}
if (index === firstIndex) {
leftCut = safe.start - rangeStart
}
if (leftCut > 0 || rightCut < rangeSize) {
// TODO: Data.slice creates a new Buffer, which is unnecessary
// for `read` but neccesseary for `readStream` maybe can be split?
data = data.slice(leftCut, rightCut)
rangeSize = rightCut - leftCut
}
if (index === lastIndex) {
return cb(null, data, rangeSize, safe)
data = trimBlock(data, index, range, block)
if (index === range.lastIndex) {
return cb(null, data, data.length)
} else {
process(data, rangeSize, safe)
process(data, data.length, range)
nextRange(index + 1)

@@ -133,3 +148,3 @@ }

}
nextRange(firstIndex)
nextRange(range.firstIndex)
})

@@ -139,3 +154,3 @@ }

createReadStream (opts) {
if (this._closed) {
if (this._isClosed) {
throw err('ERR_CLOSED', 'File pointer has been closed.')

@@ -188,5 +203,5 @@ }

end,
(partBuffer, bufferLength, safe) => {
(partBuffer, bufferLength, range) => {
if (buffer === undefined || buffer === null) {
buffer = Buffer.allocUnsafe(safe.size)
buffer = Buffer.allocUnsafe(range.size)
}

@@ -196,3 +211,3 @@ partBuffer.copy(buffer, offset, 0, bufferLength)

},
(err, endBuffer, endBufferLength, safe) => {
(err, endBuffer, endBufferLength) => {
if (err) return cb2(err)

@@ -199,0 +214,0 @@ if (buffer === undefined || buffer === null) {

@@ -5,3 +5,23 @@ # Change Log

<a name="2.0.0"></a>
# [2.0.0](https://github.com/martinheidegger/block-cache/compare/v1.0.0...v2.0.0) (2018-03-06)
### Features
* **api:** Added disconnect api that allows the disconnection of a Cache instance from its `fs` ([f87135e](https://github.com/martinheidegger/block-cache/commit/f87135e))
* **api:** Exposed DEFAULT_CACHE_SIZE (10MB) in a tested fashion ([c2f6dbd](https://github.com/martinheidegger/block-cache/commit/c2f6dbd))
* **api:** New option prefix on Cache allows reuse of underlying lru-cache. ([2862257](https://github.com/martinheidegger/block-cache/commit/2862257))
* **freeze:** file pointers created through Cache.open and Cache.openSync are frozen now. ([e5a00ce](https://github.com/martinheidegger/block-cache/commit/e5a00ce))
* **freeze:** Using defineProperties for CachedFile properties to make sure that the instances are freezable. ([f3222c1](https://github.com/martinheidegger/block-cache/commit/f3222c1))
* **sandbox:** Sandboxing the Cache to make sure users of Cache can not access/modify the filesystem. ([ac90063](https://github.com/martinheidegger/block-cache/commit/ac90063))
### BREAKING CHANGES
* **sandbox:** The documented method `Cached.fd` and `Cached.prefix` were giving informations about the implementation details and they have been removed.
<a name="1.0.0"></a>
# 1.0.0 (2018-03-05)

@@ -43,3 +43,8 @@ 'use strict'

}
memorizeAsync.property = op => {
return {
value: memorizeAsync.promise(op)
}
}
module.exports = memorizeAsync
{
"name": "block-cache",
"version": "1.0.0",
"version": "2.0.0",
"description": "fs read (incl. stream) operations cached in an lru-cache for consistent memory usage with custom fs support (hyperdrive).",

@@ -5,0 +5,0 @@ "main": "index.js",

@@ -13,2 +13,5 @@ # block-cache

The cache does not expose the passed-in API at any point which makes it
suitable as a Sandbox.
`npm i block-cache --save`

@@ -20,3 +23,3 @@

[`fs`](https://nodejs.org/api/fs.html) API but all callbacks are optional and
if omitted will result in a promise returned.
if omitted will result in a Promise returned.

@@ -77,12 +80,14 @@ Here is a simple example of reading a file into the local cache.

- [`.open`](#cache.open)
- [`.close`](#cache.close)
- [`.disconnect`](#cache.disconnect)
- [`.openSync`](#cache.openSync)
- [`.read`](#cache.read)
- [`.createReadStream`](#cache.createReadStream)
- [`DEFAULT_CACHE_SIZE`](#Cache.DEFAULT_CACHE_SIZE)
- [`CachedFile`](#CachedFile)
- [`.close`](#cachedFile.close)
- [`.read`](#cachedFile.read)
- [`.createReadStream`](#cachedFile.createReadStream)
- [`.fd`](#cachedFile.fd)
- [`.size`](#cachedFile.size)
- [`.stat`](#cachedFile.stat)
- [`.prefix`](#cachedFile.prefix)
- [`DEFAULT_BLK_SIZE`](#CachedFile.DEFAULT_BLK_SIZE)

@@ -103,5 +108,9 @@

- `opts.cacheSize` is the size of the lru-cache to be created in case a
`opts.cache` is missing. 10MB by default (integer).
`opts.cache` is missing. Defaults to
[`Cache.DEFAULT_CACHE_SIZE`](#Cache.DEFAULT_CACHE_SIZE) (integer).
- `opts.blkSize` is the default size in bytes of a cache-block. Defaults to
[`CachedFile.DEFAULT_BLK_SIZE`](#CachedFile.DEFAULT_BLK_SIZE). (integer).
- `opts.prefix` is an optional prefix that can be added to the cached data,
useful if you want to reuse the same `opts.cache` for multiple
`Cache` instances. Defaults to `''`. (string)

@@ -127,2 +136,31 @@ ---

<a name="cache.close"></a>
```javascript
cache.close(fp[, cb])
```
Closes a created file pointer reference. After closing, future requests
on the `CachedFile` will result in an `err.code === 'ERR_CLOSED` error.
- `fp` is a [`CachedFile`](#CachedFile) instance, created
with [`.open`](#cache.open) or [`.openSync`](#cache.openSync)
- `cb(Error)` is an optional async callback handler method.
The method will return a `Promise` if the callback is not defined.
---
<a name="cache.disconnect"></a>
```javascript
cache.disconnect()
```
Disconnects the cache from the file system instance. Any future operations on
the Cache or CachedFile instances create with the Cache will result in
an `err.code === 'ERR_DISCONNECTED'` error. Disconnect also closes all open
file pointer references on the underlying file system.
---
<a name="cache.openSync"></a>

@@ -173,3 +211,3 @@

- `opts.blkSize` is the block size for each block to be cached. Defaults
to [`CachedFile.DEFAULT_BLK_SIZE`](#CachedFile.DEFAULT_BLK_SIZE). (integer).
to [`cache.opts.blkSize`](#Cache). (integer).
- `opts.start` is the start from while to read the file. Defaults to 0. (integer)

@@ -179,2 +217,11 @@ - `opts.end` is the end until which to read the file. Defaults to the end of

<a name="Cache.DEFAULT_CACHE_SIZE"></a>
```javascript
Cache.DEFAULT_CACHE_SIZE
```
The default size of a cache created if `opts.cache` is not passed in: 10485760
(integer, equals 10 MegaByte)
---

@@ -189,12 +236,35 @@

Creates a new instance for reading one file. The blocks will still be stored in
the passed-in `cache` object.
the passed-in `cache` object. While it is possible to instantiate a new
`CachedFile`, you can not pass-in a cache directly, use the
[`.open`](#cache.open), [`.openSync`](#cache.openSync) or
[`.createReadStream`](#cache.createReadStream) to interact with the cache
- `cache` is a [`Cache`](#Cache) instance.
- `path` is the path to read the file from (string).
- `cacheInternal` a subset of the `Cache` API that is not accessible from
outside.
- `cacheInternal.open(path, opts, cb)` opens a file pointer to a given `path`
on the underlying `fs`.
- `cacheInternal.stat(path, cb)` receives the `stat` file from the underlying
`fs`
- `cacheInternal.close(fp, cb)` closes a file pointer on the underlying `fs`.
- `cacheInternal.read(fp, prefix, start, end, cb)` reads bytes from the
underlying `fs` into a buffer.
- `opts.blkSize` specifies the block size for this file pointer (integer).
Defaults to `cache.opts.blkSize` or to
[`CachedFile.DEFAULT_BLK_SIZE`](#CachedFile.DEFAULT_BLK_SIZE).
Defaults to [`CachedFile.DEFAULT_BLK_SIZE`](#CachedFile.DEFAULT_BLK_SIZE).
---
<a name="cachedFile.close"></a>
```javascript
cachedFile.close([cb])
```
Closes the instance. After closing, future requests
on the `CachedFile` will result in an `err.code === 'ERR_CLOSED` error.
- `cb(Error)` is an optional async callback handler method.
The method will return a `Promise` if the callback is not defined.
---
<a name="cachedFile.read"></a>

@@ -221,12 +291,2 @@

<a name="cachedFile.fd"></a>
```javascript
cachedFile.fd([cb])
```
Retreives the actual file descriptor for that path on the file system.
---
<a name="cachedFile.size"></a>

@@ -242,12 +302,2 @@

<a name="cachedFile.prefix"></a>
```javascript
cachedFile.prefix([cb])
```
The prefix for ranges of the file stored in cache.
---
<a name="cachedFile.stat"></a>

@@ -271,6 +321,10 @@

The default blk size used for caching.
The default `opts.blkSize` used for caching: 512 (integer, equals 512 Byte).
## Acknowledgement
This project was made for and supported by [dotloom](https://github.com/dotloom).
## License
MIT

@@ -32,2 +32,46 @@ 'use strict'

test('default cache size', t => {
t.equals(Cache.DEFAULT_CACHE_SIZE, 10 * 1024 * 1024)
const c = new Cache({
read (fp, buffer, position, length, start, cb) {
cb(null, buffer)
}
})
const fp = {}
const testSize = (size, smallEnough) => {
return new Promise((resolve, reject) => {
c._readCached(fp, '', 0, size, (err, a) => {
if (err) return reject(err)
c._readCached(fp, '', 0, size, (err, b) => {
if (err) return reject(err)
if (a !== b) {
if (smallEnough) {
return reject(new Error(`${size} is supposed to be small enough to be cached by default but wasnt`))
}
} else {
if (!smallEnough) {
return reject(new Error(`${size} is supposed to be too big to be cached by default but was`))
}
}
resolve()
})
})
})
}
return Promise.all([
testSize(Cache.DEFAULT_CACHE_SIZE, true),
testSize(Cache.DEFAULT_CACHE_SIZE + 1, false)
])
})
test('cached files are frozen', t =>
createDrive([{ name: 'hello', data: 'world' }])
.then(drive => {
const c = new Cache(drive)
return c.open('hello')
.then(fp => t.equals(Object.isFrozen(fp), true))
})
)
test('convenience API: createReadStream', t =>

@@ -111,2 +155,129 @@ createDrive([{ name: 'hello', data: 'world' }])

test('disconnecting should result in disconnected errors', t =>
createDrive([{ name: 'hello', data: 'world' }])
.then(drive => {
const c = new Cache(drive)
const fp = c.openSync('hello', {blkSize: 1})
return fp.read(undefined, undefined, 3, 0)
.then(() => {
c.disconnect()
const checkDisconnected = (name, op) =>
Promise.resolve()
.then(op)
.then(() => t.fail(`'${name}' ran through even though disconnected`))
.catch(err => {
t.equals(err.code, 'ERR_DISCONNECTED', `'${name}' should be disconnected`)
})
const fp2 = c.openSync('holla', {blksize: 2})
return Promise.all([
checkDisconnected('createReadStream', () => new Promise((resolve, reject) => {
const stream = fp2.createReadStream()
stream.on('error', reject)
stream.on('end', resolve)
})),
checkDisconnected('fp.read', () => fp.read(undefined, undefined, 3, 2)),
checkDisconnected('fp.close', () => fp.close()),
checkDisconnected('fp2.read', () => fp2.read()),
checkDisconnected('fp2.stat', () => fp2.stat()),
checkDisconnected('fp2.close', () => fp2.close())
])
})
})
)
test('disconnecting should close open file pointers', t => {
let closeCalled = 0
let openCalled = 0
let statCalled = 0
const fs = {
open (path, opts, cb) {
setImmediate(() => {
openCalled += 1
cb(null, openCalled)
})
},
stat (path, cb) {
statCalled += 1
cb(null, { mtime: new Date() })
},
close (fp, cb) {
closeCalled += 1
setImmediate(() => cb(null))
},
read (fp, buffer, position, size, start, cb) {
setImmediate(() => cb(null))
}
}
const c = new Cache(fs)
return Promise.all([
// make sure that the request to open is actually triggered
c.openSync('hello').read(),
c.openSync('hello2').read()
])
.then(() => {
return Promise.all([
c.openSync('hello').read()
.then(() => t.fail('Second read shouldnt work'))
.catch(err => t.equals(err.code, 'ERR_DISCONNECTED', 'Second read should be assumed disconnected')),
Promise.resolve()
.then(() => c.disconnect())
])
})
.then(() => {
t.equals(openCalled, 3, 'Making sure that the file was actually opened')
t.equals(statCalled, 3, 'Making sure that the stat was called too')
t.equals(closeCalled, 3, 'The file should have been closed by disconnect')
})
})
test('disconnecting should pass errors when closing files', t => {
const fs = {
open (path, opts, cb) {
setImmediate(() => cb(null))
},
stat (path, cb) {
cb(null, { mtime: new Date() })
},
close (fp, cb) {
setImmediate(() => cb(new Error('test')))
},
read (fp, buffer, position, size, start, cb) {
setImmediate(() => cb(null))
}
}
const c = new Cache(fs)
return Promise.all([
// make sure that the request to open is actually triggered
c.openSync('hello').read(),
c.openSync('hello2').read()
])
.then(() => c.disconnect())
.then(() => t.fail('Disconnecting should result in an error'))
.catch(errors => {
t.type(errors, Array, 'The errors should be returned as array')
t.equals(errors.length, 2, '')
t.equals(errors[0].message, 'test', 'Making sure that the error is passed-through')
t.equals(errors[1].message, 'test', 'Making sure that the error is passed-through')
})
})
test('disconnecting should pass errors when closing files', t => {
const fs = {
open (path, opts, cb) {
setImmediate(() => cb(null))
},
stat (path, cb) {
cb(null, { mtime: new Date() })
},
close (fp, cb) {
setImmediate(() => cb(new Error('test')))
},
read (fp, buffer, position, size, start, cb) {
setImmediate(() => cb(null))
}
}
const c = new Cache(fs)
return c.disconnect()
})
test('reading a file', t => {

@@ -240,1 +411,23 @@ const fs = {

})
test('prefixes used with custom caches', t => {
const fs = {
read (fd, buffer, offset, size, start, cb) {
cb()
}
}
const cacheImp = {
get: (key) => {
t.same(key, 'xxxy1:3')
return true
}
}
const cache = new Cache(fs, {
cache: cacheImp,
prefix: 'xxx'
})
cache._readCached(null, 'y', 1, 3, err => {
t.equals(err, null)
t.end()
})
})

@@ -10,3 +10,3 @@ 'use strict'

function cachedFile (drive, path, opts) {
return new CachedFile(new Cache(drive), path, opts)
return Object.freeze(new Cache(drive).openSync(path, opts))
}

@@ -33,4 +33,6 @@

testErr('stat', () => c.stat()),
testErr('fd', () => c.fd()),
testErr('read', () => c.read())
testErr('read', () => c.read()),
testErr('_readCached', () => new Promise((resolve, reject) => {
c._readCached({start: 0, end: 10}, err => err ? reject(err) : resolve())
}))
])

@@ -49,7 +51,5 @@ })

return Promise.all([
testErr('fd', () => c.fd()),
testErr('close', () => c.close()),
c.stat(),
c.size(),
c.prefix()
c.size()
])

@@ -79,4 +79,3 @@ })

c.stat(),
c.size(),
c.fd()
c.size()
])

@@ -88,3 +87,3 @@ })

t.equals(CachedFile.DEFAULT_BLK_SIZE, 512)
const c = cachedFile({open: noop, stat: noop})
const c = cachedFile(require('fs'), './Readme.md', {open: noop, stat: noop})
t.equals(c.blkSize, CachedFile.DEFAULT_BLK_SIZE)

@@ -246,6 +245,6 @@ t.end()

drive.read = (fd, buffer, offset, length, position, cb) => {
fdA.fd(() => {
setImmediate(() => {
fdA.close(closed)
setImmediate(() => {
t.notEqual(fdA._closed, undefined)
t.equal(fdA._isClosed, true)
drive._read(fd, buffer, offset, length, position, cb)

@@ -293,2 +292,12 @@ })

test('Reading a file over multiple blocks', t =>
createDrive([{ name: 'hello', data: 'itstheendoftheworldasweknowit' }])
.then(drive => {
const fd = new Cache(drive).openSync('hello', {blkSize: 5})
return fd.read(null, undefined, 21, 2)
}).then(buffer => {
t.equals(buffer.toString(), 'stheendoftheworldaswe')
})
)
test('Reading the whole stream', t =>

@@ -295,0 +304,0 @@ createDrive([{ name: 'hello', data: 'world' }])

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc