Comparing version 6.0.2 to 6.0.3-check-chunks-in-disk-f95c55e5552840e1962a66ad5945387360ff06c0
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.BufferDisk = exports.FileDisk = exports.Disk = exports.openFile = exports.DiskStream = exports.DiskChunk = exports.DiscardDiskChunk = exports.BufferDiskChunk = void 0; | ||
const tslib_1 = require("tslib"); | ||
const Bluebird = require("bluebird"); | ||
@@ -24,31 +23,29 @@ const stream_1 = require("stream"); | ||
} | ||
__read(size) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
if (this.isReading) { | ||
// We're already reading, return. | ||
async __read(size) { | ||
if (this.isReading) { | ||
// We're already reading, return. | ||
return; | ||
} | ||
this.isReading = true; | ||
while (this.isReading) { | ||
// Don't read out of bounds | ||
const length = Math.min(size, this.capacity - this.position); | ||
if (length <= 0) { | ||
// Nothing left to read: push null to signal end of stream. | ||
this.isReading = this.push(null); | ||
return; | ||
} | ||
this.isReading = true; | ||
while (this.isReading) { | ||
// Don't read out of bounds | ||
const length = Math.min(size, this.capacity - this.position); | ||
if (length <= 0) { | ||
// Nothing left to read: push null to signal end of stream. | ||
this.isReading = this.push(null); | ||
return; | ||
} | ||
let bytesRead; | ||
let buffer; | ||
try { | ||
({ bytesRead, buffer } = yield this.disk.read(Buffer.allocUnsafe(length), 0, length, this.position)); | ||
} | ||
catch (err) { | ||
this.emit('error', err); | ||
return; | ||
} | ||
this.position += bytesRead; | ||
// this.push() returns true if we need to continue reading. | ||
this.isReading = this.push(buffer); | ||
let bytesRead; | ||
let buffer; | ||
try { | ||
({ bytesRead, buffer } = await this.disk.read(Buffer.allocUnsafe(length), 0, length, this.position)); | ||
} | ||
}); | ||
catch (err) { | ||
this.emit('error', err); | ||
return; | ||
} | ||
this.position += bytesRead; | ||
// this.push() returns true if we need to continue reading. | ||
this.isReading = this.push(buffer); | ||
} | ||
} | ||
@@ -143,60 +140,48 @@ _read(size) { | ||
} | ||
read(buffer, _bufferOffset, length, fileOffset) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
const plan = this.createReadPlan(fileOffset, length); | ||
return yield this.readAccordingToPlan(buffer, plan); | ||
}); | ||
async read(buffer, _bufferOffset, length, fileOffset) { | ||
const plan = this.createReadPlan(fileOffset, length); | ||
return await this.readAccordingToPlan(buffer, plan); | ||
} | ||
write(buffer, bufferOffset, length, fileOffset) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
if (this.recordWrites) { | ||
const chunk = new diskchunk_1.BufferDiskChunk(buffer.slice(bufferOffset, bufferOffset + length), fileOffset); | ||
this.insertDiskChunk(chunk); | ||
} | ||
else { | ||
// Special case: we do not record writes but we may have recorded | ||
// some discards. We want to remove any discard overlapping this | ||
// write. | ||
// In order to do this we do as if we were inserting a chunk: this | ||
// will slice existing discards in this area if there are any. | ||
const chunk = new diskchunk_1.DiscardDiskChunk(fileOffset, length); | ||
// The `false` below means "don't insert the chunk into knownChunks" | ||
this.insertDiskChunk(chunk, false); | ||
} | ||
if (this.readOnly) { | ||
return { bytesWritten: length, buffer }; | ||
} | ||
else { | ||
return yield this._write(buffer, bufferOffset, length, fileOffset); | ||
} | ||
}); | ||
async write(buffer, bufferOffset, length, fileOffset) { | ||
if (this.recordWrites) { | ||
const chunk = new diskchunk_1.BufferDiskChunk(buffer.slice(bufferOffset, bufferOffset + length), fileOffset); | ||
await this.insertDiskChunk(chunk); | ||
} | ||
else { | ||
// Special case: we do not record writes but we may have recorded | ||
// some discards. We want to remove any discard overlapping this | ||
// write. | ||
// In order to do this we do as if we were inserting a chunk: this | ||
// will slice existing discards in this area if there are any. | ||
const chunk = new diskchunk_1.DiscardDiskChunk(fileOffset, length); | ||
// The `false` below means "don't insert the chunk into knownChunks" | ||
await this.insertDiskChunk(chunk, false); | ||
} | ||
if (this.readOnly) { | ||
return { bytesWritten: length, buffer }; | ||
} | ||
else { | ||
return await this._write(buffer, bufferOffset, length, fileOffset); | ||
} | ||
} | ||
flush() { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
if (!this.readOnly) { | ||
return yield this._flush(); | ||
} | ||
}); | ||
async flush() { | ||
if (!this.readOnly) { | ||
return await this._flush(); | ||
} | ||
} | ||
discard(offset, length) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
this.insertDiskChunk(new diskchunk_1.DiscardDiskChunk(offset, length)); | ||
}); | ||
async discard(offset, length) { | ||
await this.insertDiskChunk(new diskchunk_1.DiscardDiskChunk(offset, length)); | ||
} | ||
getCapacity() { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
if (this.capacity === null) { | ||
this.capacity = yield this._getCapacity(); | ||
} | ||
return this.capacity; | ||
}); | ||
async getCapacity() { | ||
if (this.capacity === null) { | ||
this.capacity = await this._getCapacity(); | ||
} | ||
return this.capacity; | ||
} | ||
getStream(position = 0, length = null, highWaterMark = DEFAULT_HIGH_WATER_MARK) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
let end = yield this.getCapacity(); | ||
if (length !== null) { | ||
end = Math.min(position + length, end); | ||
} | ||
return new DiskStream(this, end, highWaterMark, position); | ||
}); | ||
async getStream(position = 0, length = null, highWaterMark = DEFAULT_HIGH_WATER_MARK) { | ||
let end = await this.getCapacity(); | ||
if (length !== null) { | ||
end = Math.min(position + length, end); | ||
} | ||
return new DiskStream(this, end, highWaterMark, position); | ||
} | ||
@@ -208,8 +193,11 @@ getDiscardedChunks() { | ||
} | ||
getRanges(blockSize) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
return Array.from(yield mapped_ranges_1.getRanges(this, blockSize)); | ||
}); | ||
async getRanges(blockSize) { | ||
return Array.from(await mapped_ranges_1.getRanges(this, blockSize)); | ||
} | ||
insertDiskChunk(chunk, insert = true) { | ||
async insertDiskChunk(chunk, insert = true) { | ||
const capacity = await this.getCapacity(); | ||
if (chunk.start < 0 || chunk.end > capacity) { | ||
// Invalid chunk | ||
return; | ||
} | ||
let insertAt = 0; | ||
@@ -277,24 +265,22 @@ for (let i = 0; i < this.knownChunks.length; i++) { | ||
} | ||
readAccordingToPlan(buffer, plan) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
let offset = 0; | ||
yield Bluebird.each(plan, (entry) => tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
if (entry instanceof diskchunk_1.DiskChunk) { | ||
const data = entry.data(); | ||
const length = Math.min(data.length, buffer.length - offset); | ||
data.copy(buffer, offset, 0, length); | ||
offset += length; | ||
async readAccordingToPlan(buffer, plan) { | ||
let offset = 0; | ||
await Bluebird.each(plan, async (entry) => { | ||
if (entry instanceof diskchunk_1.DiskChunk) { | ||
const data = entry.data(); | ||
const length = Math.min(data.length, buffer.length - offset); | ||
data.copy(buffer, offset, 0, length); | ||
offset += length; | ||
} | ||
else { | ||
const length = entry[1] - entry[0] + 1; | ||
await this._read(buffer, offset, length, entry[0]); | ||
if (this.recordReads) { | ||
const chunk = new diskchunk_1.BufferDiskChunk(Buffer.from(buffer.slice(offset, offset + length)), entry[0]); | ||
await this.insertDiskChunk(chunk); | ||
} | ||
else { | ||
const length = entry[1] - entry[0] + 1; | ||
yield this._read(buffer, offset, length, entry[0]); | ||
if (this.recordReads) { | ||
const chunk = new diskchunk_1.BufferDiskChunk(Buffer.from(buffer.slice(offset, offset + length)), entry[0]); | ||
this.insertDiskChunk(chunk); | ||
} | ||
offset += length; | ||
} | ||
})); | ||
return { bytesRead: offset, buffer }; | ||
offset += length; | ||
} | ||
}); | ||
return { bytesRead: offset, buffer }; | ||
} | ||
@@ -308,22 +294,14 @@ } | ||
} | ||
_getCapacity() { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
const stats = yield fs.fstat(this.fd); | ||
return stats.size; | ||
}); | ||
async _getCapacity() { | ||
const stats = await fs.fstat(this.fd); | ||
return stats.size; | ||
} | ||
_read(buffer, bufferOffset, length, fileOffset) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
return yield fs.read(this.fd, buffer, bufferOffset, length, fileOffset); | ||
}); | ||
async _read(buffer, bufferOffset, length, fileOffset) { | ||
return await fs.read(this.fd, buffer, bufferOffset, length, fileOffset); | ||
} | ||
_write(buffer, bufferOffset, length, fileOffset) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
return yield fs.write(this.fd, buffer, bufferOffset, length, fileOffset); | ||
}); | ||
async _write(buffer, bufferOffset, length, fileOffset) { | ||
return await fs.write(this.fd, buffer, bufferOffset, length, fileOffset); | ||
} | ||
_flush() { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
yield fs.fdatasync(this.fd); | ||
}); | ||
async _flush() { | ||
await fs.fdatasync(this.fd); | ||
} | ||
@@ -337,23 +315,15 @@ } | ||
} | ||
_getCapacity() { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
return this.buffer.length; | ||
}); | ||
async _getCapacity() { | ||
return this.buffer.length; | ||
} | ||
_read(buffer, bufferOffset, length, fileOffset) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
const bytesRead = this.buffer.copy(buffer, bufferOffset, fileOffset, fileOffset + length); | ||
return { buffer, bytesRead }; | ||
}); | ||
async _read(buffer, bufferOffset, length, fileOffset) { | ||
const bytesRead = this.buffer.copy(buffer, bufferOffset, fileOffset, fileOffset + length); | ||
return { buffer, bytesRead }; | ||
} | ||
_write(buffer, bufferOffset, length, fileOffset) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
const bytesWritten = buffer.copy(this.buffer, fileOffset, bufferOffset, bufferOffset + length); | ||
return { buffer, bytesWritten }; | ||
}); | ||
async _write(buffer, bufferOffset, length, fileOffset) { | ||
const bytesWritten = buffer.copy(this.buffer, fileOffset, bufferOffset, bufferOffset + length); | ||
return { buffer, bytesWritten }; | ||
} | ||
_flush() { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
// Nothing to do to flush a BufferDisk | ||
}); | ||
async _flush() { | ||
// Nothing to do to flush a BufferDisk | ||
} | ||
@@ -360,0 +330,0 @@ } |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.getRanges = void 0; | ||
const tslib_1 = require("tslib"); | ||
function* mapIterable(source, transform) { | ||
@@ -10,6 +9,4 @@ for (const value of source) { | ||
} | ||
function getNotDiscardedIntervals(disk) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
return notDiscardedIntervalsIterator(disk.getDiscardedChunks(), yield disk.getCapacity()); | ||
}); | ||
async function getNotDiscardedIntervals(disk) { | ||
return notDiscardedIntervalsIterator(disk.getDiscardedChunks(), await disk.getCapacity()); | ||
} | ||
@@ -47,15 +44,13 @@ function* notDiscardedIntervalsIterator(discardedChunks, capacity) { | ||
} | ||
function getRanges(disk, blockSize) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function* () { | ||
const intervals = yield getNotDiscardedIntervals(disk); | ||
const blockSizedIntervals = mapIterable(intervals, (chunk) => [ | ||
Math.floor(chunk[0] / blockSize), | ||
Math.floor(chunk[1] / blockSize), | ||
]); | ||
const mergedBlockSizedIntervals = mergeIntervals(blockSizedIntervals); | ||
return mapIterable(mergedBlockSizedIntervals, (interval) => { | ||
const offset = interval[0] * blockSize; | ||
const length = (interval[1] - interval[0] + 1) * blockSize; | ||
return { offset, length }; | ||
}); | ||
async function getRanges(disk, blockSize) { | ||
const intervals = await getNotDiscardedIntervals(disk); | ||
const blockSizedIntervals = mapIterable(intervals, (chunk) => [ | ||
Math.floor(chunk[0] / blockSize), | ||
Math.floor(chunk[1] / blockSize), | ||
]); | ||
const mergedBlockSizedIntervals = mergeIntervals(blockSizedIntervals); | ||
return mapIterable(mergedBlockSizedIntervals, (interval) => { | ||
const offset = interval[0] * blockSize; | ||
const length = (interval[1] - interval[0] + 1) * blockSize; | ||
return { offset, length }; | ||
}); | ||
@@ -62,0 +57,0 @@ } |
@@ -7,2 +7,8 @@ # Change Log | ||
# v6.0.3 | ||
## (2020-07-24) | ||
* Set typescript target to es2018 [Alexis Svinartchouk] | ||
* Check that chunks are in the disk [Alexis Svinartchouk] | ||
# v6.0.2 | ||
@@ -9,0 +15,0 @@ ## (2020-07-14) |
{ | ||
"name": "file-disk", | ||
"version": "6.0.2", | ||
"version": "6.0.3-check-chunks-in-disk-f95c55e5552840e1962a66ad5945387360ff06c0", | ||
"description": "Handles reads / writes on disk image files.", | ||
@@ -5,0 +5,0 @@ "author": "Petros Angelatos <petrosagg@balena.io>", |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
No v1
QualityPackage is not semver >=1. This means it is not stable and does not support ^ ranges.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
61344
638
1