Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@gmod/cram

Package Overview
Dependencies
Maintainers
0
Versions
51
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@gmod/cram - npm Package Compare versions

Comparing version 4.0.4 to 4.0.5

2

dist/cramFile/codecs/_base.d.ts

@@ -26,3 +26,3 @@ import { CramFileBlock } from '../file';

constructor(parameters: TParameters, dataType: TResult);
abstract decode(slice: CramSlice, coreDataBlock: CramFileBlock, blocksByContentId: Record<number, CramFileBlock>, cursors: Cursors): DataTypeMapping[TResult];
abstract decode(slice: CramSlice, coreDataBlock: CramFileBlock, blocksByContentId: Record<number, CramFileBlock>, cursors: Cursors): DataTypeMapping[TResult] | undefined;
}

@@ -15,7 +15,8 @@ "use strict";

const lengthCodec = this._getLengthCodec();
const arrayLength = lengthCodec.decode(slice, coreDataBlock, blocksByContentId, cursors);
const arrayLength = lengthCodec.decode(slice, coreDataBlock, blocksByContentId, cursors) || 0;
const dataCodec = this._getDataCodec();
const data = new Uint8Array(arrayLength);
for (let i = 0; i < arrayLength; i += 1) {
data[i] = dataCodec.decode(slice, coreDataBlock, blocksByContentId, cursors);
data[i] =
dataCodec.decode(slice, coreDataBlock, blocksByContentId, cursors) || 0;
}

@@ -22,0 +23,0 @@ return data;

@@ -8,5 +8,5 @@ import CramCodec, { Cursor, Cursors } from './_base';

constructor(parameters: ExternalCramEncoding['parameters'], dataType: 'int' | 'byte');
decode(slice: CramSlice, coreDataBlock: CramFileBlock, blocksByContentId: Record<number, CramFileBlock>, cursors: Cursors): number;
decode(slice: CramSlice, coreDataBlock: CramFileBlock, blocksByContentId: Record<number, CramFileBlock>, cursors: Cursors): number | undefined;
_decodeInt(contentBlock: CramFileBlock, cursor: Cursor): number;
_decodeByte(contentBlock: CramFileBlock, cursor: Cursor): number;
}

@@ -26,7 +26,4 @@ "use strict";

const contentBlock = blocksByContentId[blockContentId];
if (!contentBlock) {
throw new errors_1.CramMalformedError(`no block found with content ID ${blockContentId}}`);
}
const cursor = cursors.externalBlocks.getCursor(blockContentId);
return this._decodeData(contentBlock, cursor);
return contentBlock ? this._decodeData(contentBlock, cursor) : undefined;
}

@@ -33,0 +30,0 @@ _decodeInt(contentBlock, cursor) {

@@ -8,3 +8,8 @@ import CramSlice from '../slice';

constructor(file: CramFile, filePosition: number);
getHeader(): Promise<({
getHeader(): Promise<{
_size: number;
_endPosition: number;
numLandmarks: number;
landmarks: number[];
crc32?: number | undefined;
length: number;

@@ -15,20 +20,6 @@ refSeqId: number;

numBlocks: number;
numLandmarks: number;
numBases: number | undefined;
recordCounter: number;
numRecords: number;
} & {
_endPosition: number;
_size: number;
} & {
numLandmarks: number;
landmarks: number[];
crc32?: number | undefined;
} & {
_endPosition: number;
_size: number;
} & {
_size: number;
_endPosition: number;
}) | undefined>;
}>;
getCompressionHeaderBlock(): Promise<{

@@ -53,7 +44,12 @@ parsedContent: {

crc32?: number;
} | null | undefined>;
getFirstBlock(): Promise<import("../file").CramFileBlock | undefined>;
} | null>;
getFirstBlock(): Promise<import("../file").CramFileBlock>;
getCompressionScheme(): Promise<CramContainerCompressionScheme | undefined>;
getSlice(slicePosition: number, sliceSize: number): CramSlice;
_readContainerHeader(position: number): Promise<({
_readContainerHeader(position: number): Promise<{
_size: number;
_endPosition: number;
numLandmarks: number;
landmarks: number[];
crc32?: number | undefined;
length: number;

@@ -64,20 +60,6 @@ refSeqId: number;

numBlocks: number;
numLandmarks: number;
numBases: number | undefined;
recordCounter: number;
numRecords: number;
} & {
_endPosition: number;
_size: number;
} & {
numLandmarks: number;
landmarks: number[];
crc32?: number | undefined;
} & {
_endPosition: number;
_size: number;
} & {
_size: number;
_endPosition: number;
}) | undefined>;
}>;
}

@@ -24,3 +24,3 @@ "use strict";

// header
if (!containerHeader?.numRecords) {
if (!containerHeader.numRecords) {
return null;

@@ -31,5 +31,2 @@ }

const block = await this.getFirstBlock();
if (block === undefined) {
return undefined;
}
if (block.contentType !== 'COMPRESSION_HEADER') {

@@ -46,5 +43,2 @@ throw new errors_1.CramMalformedError(`invalid content type ${block.contentType} in compression header block`);

const containerHeader = await this.getHeader();
if (!containerHeader) {
return undefined;
}
return this.file.readBlock(containerHeader._endPosition);

@@ -70,7 +64,2 @@ }

const { cramContainerHeader1, cramContainerHeader2 } = sectionParsers;
const { size: fileSize } = await this.file.stat();
if (position >= fileSize) {
console.warn(`pos:${position}>=fileSize:${fileSize} in cram container`);
return undefined;
}
// parse the container header. do it in 2 pieces because you cannot tell

@@ -81,7 +70,2 @@ // how much to buffer until you read numLandmarks

const numLandmarksSize = (0, util_1.itf8Size)(header1.numLandmarks);
if (position + header1.length >= fileSize) {
// header indicates container goes beyond fileSize
console.warn(`container at ${position} is beyond fileSize:${fileSize}, skipping`);
return undefined;
}
const bytes2 = await this.file.read(cramContainerHeader2.maxLength(header1.numLandmarks), position + header1._size - numLandmarksSize);

@@ -92,7 +76,8 @@ const header2 = (0, util_1.parseItem)(bytes2, cramContainerHeader2.parser);

}
const completeHeader = Object.assign(header1, header2, {
return {
...header1,
...header2,
_size: header1._size + header2._size - numLandmarksSize,
_endPosition: header1._size + header2._size - numLandmarksSize + position,
});
return completeHeader;
};
}

@@ -99,0 +84,0 @@ }

@@ -35,3 +35,2 @@ import QuickLRU from 'quick-lru';

constructor(args: CramFileArgs);
stat(): Promise<import("generic-filehandle2").Stats>;
read(length: number, position: number): Promise<Uint8Array<ArrayBuffer>>;

@@ -56,6 +55,9 @@ getDefinition(): Promise<{

* @returns {Promise[number]} the number of containers in the file
*
* note: this is currently used only in unit tests, and after removing file
* length check, relies on a try catch to read return an error to break
*/
containerCount(): Promise<number | undefined>;
getContainerAtPosition(position: number): CramContainer;
readBlockHeader(position: number): Promise<({
readBlockHeader(position: number): Promise<{
uncompressedSize: number;

@@ -69,3 +71,3 @@ compressedSize: number;

_size: number;
}) | undefined>;
}>;
_parseSection<T>(section: {

@@ -77,8 +79,8 @@ maxLength: number;

};
}, position: number, size?: number, preReadBuffer?: Uint8Array): Promise<(T & {
}, position: number, size?: number, preReadBuffer?: Uint8Array): Promise<T & {
_endPosition: number;
_size: number;
}) | undefined>;
}>;
_uncompress(compressionMethod: CompressionMethod, inputBuffer: Uint8Array, uncompressedSize: number): Promise<any>;
readBlock(position: number): Promise<CramFileBlock | undefined>;
readBlock(position: number): Promise<CramFileBlock>;
}

@@ -94,7 +94,2 @@ "use strict";

}
// can just stat this object like a filehandle
stat() {
return this.file.stat();
}
// can just stat this object like a filehandle
read(length, position) {

@@ -120,15 +115,10 @@ return this.file.read(length, position);

const firstBlock = await firstContainer.getFirstBlock();
if (firstBlock === undefined) {
return (0, sam_1.parseHeaderText)('');
}
else {
const content = firstBlock.content;
const dataView = new DataView(content.buffer);
const headerLength = dataView.getInt32(0, true);
const textStart = 4;
const decoder = new TextDecoder('utf8');
const text = decoder.decode(content.subarray(textStart, textStart + headerLength));
this.header = text;
return (0, sam_1.parseHeaderText)(text);
}
const content = firstBlock.content;
const dataView = new DataView(content.buffer);
const headerLength = dataView.getInt32(0, true);
const textStart = 4;
const decoder = new TextDecoder('utf8');
const text = decoder.decode(content.subarray(textStart, textStart + headerLength));
this.header = text;
return (0, sam_1.parseHeaderText)(text);
}

@@ -143,4 +133,2 @@ async getHeaderText() {

let position = sectionParsers.cramFileDefinition.maxLength;
const { size: fileSize } = await this.file.stat();
const { cramContainerHeader1 } = sectionParsers;
// skip with a series of reads to the proper container

@@ -151,10 +139,7 @@ let currentContainer;

// and have not found that container, it does not exist
if (position + cramContainerHeader1.maxLength + 8 >= fileSize) {
return undefined;
}
// if (position + cramContainerHeader1.maxLength + 8 >= fileSize) {
// return undefined
// }
currentContainer = this.getContainerAtPosition(position);
const currentHeader = await currentContainer.getHeader();
if (!currentHeader) {
throw new errors_1.CramMalformedError(`container ${containerNumber} not found in file`);
}
// if this is the first container, read all the blocks in the container

@@ -167,5 +152,2 @@ // to determine its length, because we cannot trust the container

const block = await this.readBlock(position);
if (block === undefined) {
return undefined;
}
position = block._endPosition;

@@ -194,2 +176,5 @@ }

* @returns {Promise[number]} the number of containers in the file
*
* note: this is currently used only in unit tests, and after removing file
* length check, relies on a try catch to read return an error to break
*/

@@ -199,31 +184,30 @@ async containerCount() {

const sectionParsers = (0, sectionParsers_1.getSectionParsers)(majorVersion);
const { size: fileSize } = await this.file.stat();
const { cramContainerHeader1 } = sectionParsers;
let containerCount = 0;
let position = sectionParsers.cramFileDefinition.maxLength;
while (position + cramContainerHeader1.maxLength + 8 < fileSize) {
const currentHeader = await this.getContainerAtPosition(position).getHeader();
if (!currentHeader) {
break;
}
// if this is the first container, read all the blocks in the container,
// because we cannot trust the container header's given length due to a
// bug somewhere in htslib
if (containerCount === 0) {
position = currentHeader._endPosition;
for (let j = 0; j < currentHeader.numBlocks; j++) {
const block = await this.readBlock(position);
if (block === undefined) {
return undefined;
try {
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
while (true) {
const currentHeader = await this.getContainerAtPosition(position).getHeader();
// if this is the first container, read all the blocks in the container,
// because we cannot trust the container header's given length due to a
// bug somewhere in htslib
if (containerCount === 0) {
position = currentHeader._endPosition;
for (let j = 0; j < currentHeader.numBlocks; j++) {
const block = await this.readBlock(position);
position = block._endPosition;
}
position = block._endPosition;
}
else {
// otherwise, just traverse to the next container using the container's
// length
position += currentHeader._size + currentHeader.length;
}
containerCount += 1;
}
else {
// otherwise, just traverse to the next container using the container's
// length
position += currentHeader._size + currentHeader.length;
}
containerCount += 1;
}
catch (e) {
containerCount--;
/* do nothing */
}
return containerCount;

@@ -238,6 +222,2 @@ }

const { cramBlockHeader } = sectionParsers;
const { size: fileSize } = await this.file.stat();
if (position + cramBlockHeader.maxLength >= fileSize) {
return undefined;
}
const buffer = await this.file.read(cramBlockHeader.maxLength, position);

@@ -247,13 +227,3 @@ return (0, util_2.parseItem)(buffer, cramBlockHeader.parser, 0, position);

async _parseSection(section, position, size = section.maxLength, preReadBuffer) {
let buffer;
if (preReadBuffer) {
buffer = preReadBuffer;
}
else {
const { size: fileSize } = await this.file.stat();
if (position + size >= fileSize) {
return undefined;
}
buffer = await this.file.read(size, position);
}
const buffer = preReadBuffer ?? (await this.file.read(size, position));
const data = (0, util_2.parseItem)(buffer, section.parser, 0, position);

@@ -314,5 +284,2 @@ if (data._size !== size) {

const blockHeader = await this.readBlockHeader(position);
if (blockHeader === undefined) {
return undefined;
}
const blockContentPosition = blockHeader._endPosition;

@@ -332,5 +299,2 @@ const d = await this.file.read(blockHeader.compressedSize, blockContentPosition);

const crc = await this._parseSection(sectionParsers.cramBlockCrc32, blockContentPosition + blockHeader.compressedSize);
if (crc === undefined) {
return undefined;
}
block.crc32 = crc.crc32;

@@ -337,0 +301,0 @@ // check the block data crc32

@@ -566,4 +566,5 @@ "use strict";

offset += 4;
// reference sequence identifier, -1 for unmapped reads, -2 for multiple
// reference sequences
// reference sequence identifier:
// -1 for unmapped reads,
// -2 for multiple reference sequences
const [refSeqId, newOffset1] = (0, util_1.parseItf8)(buffer, offset);

@@ -570,0 +571,0 @@ offset += newOffset1;

@@ -7,6 +7,6 @@ import { Cursors, DataTypeMapping } from '../codecs/_base';

import { CramFileBlock } from '../file';
export type DataSeriesDecoder = <T extends DataSeriesEncodingKey>(dataSeriesName: T) => DataTypeMapping[DataSeriesTypes[T]];
export type DataSeriesDecoder = <T extends DataSeriesEncodingKey>(dataSeriesName: T) => DataTypeMapping[DataSeriesTypes[T]] | undefined;
export default function decodeRecord(slice: CramSlice, decodeDataSeries: DataSeriesDecoder, compressionScheme: CramContainerCompressionScheme, sliceHeader: SliceHeader, coreDataBlock: CramFileBlock, blocksByContentId: Record<number, CramFileBlock>, cursors: Cursors, majorVersion: number, recordNumber: number): {
readLength: number;
sequenceId: number;
sequenceId: number | undefined;
cramFlags: number;

@@ -13,0 +13,0 @@ flags: number;

@@ -183,5 +183,4 @@ "use strict";

let flags = decodeDataSeries('BF');
// note: the C data type of compressionFlags is byte in cram v1
// and int32 in cram v2+, but that does not matter for us here
// in javascript land.
// note: the C data type of compressionFlags is byte in cram v1 and int32 in
// cram v2+, but that does not matter for us here in javascript land.
const cramFlags = decodeDataSeries('CF');

@@ -262,3 +261,7 @@ if (!(0, sectionParsers_1.isMappedSliceHeader)(sliceHeader.parsedContent)) {

tags[tagName] =
typeof tagData === 'number' ? tagData : parseTagData(tagType, tagData);
tagData === undefined
? undefined
: typeof tagData === 'number'
? tagData
: parseTagData(tagType, tagData);
}

@@ -265,0 +268,0 @@ let readFeatures;

@@ -142,9 +142,3 @@ "use strict";

const containerHeader = await this.container.getHeader();
if (!containerHeader) {
throw new Error('no container header detected');
}
const header = await this.file.readBlock(containerHeader._endPosition + this.containerPosition);
if (header === undefined) {
throw new Error('block header undefined');
}
if (header.contentType === 'MAPPED_SLICE_HEADER') {

@@ -170,5 +164,2 @@ const content = (0, util_1.parseItem)(header.content, sectionParsers.cramMappedSliceHeader.parser, 0, containerHeader._endPosition);

const block = await this.file.readBlock(blockPosition);
if (block === undefined) {
throw new Error('block undefined');
}
blocks[i] = block;

@@ -305,5 +296,3 @@ blockPosition = blocks[i]._endPosition;

}
// console.log(dataSeriesName, Object.getPrototypeOf(codec))
const decoded = codec.decode(this, coreDataBlock, blocksByContentId, cursors);
return decoded;
return codec.decode(this, coreDataBlock, blocksByContentId, cursors);
};

@@ -333,7 +322,16 @@ const records = new Array(sliceHeader.parsedContent.numRecords);

// interpret `recordsToNextFragment` attributes to make standard `mate`
// objects Resolve mate pair cross-references between records in this slice
// objects
//
// Resolve mate pair cross-references between records in this slice
for (let i = 0; i < records.length; i += 1) {
const { mateRecordNumber } = records[i];
if (mateRecordNumber !== undefined && mateRecordNumber >= 0) {
associateIntraSliceMate(records, i, records[i], records[mateRecordNumber]);
const r = records[i];
// check for !!r added after removal of "stat" file size check: found
// some undefined entries
if (r) {
const { mateRecordNumber } = r;
if (mateRecordNumber !== undefined &&
mateRecordNumber >= 0 &&
records[mateRecordNumber]) {
associateIntraSliceMate(records, i, r, records[mateRecordNumber]);
}
}

@@ -340,0 +338,0 @@ }

@@ -0,1 +1,6 @@

export declare const TWO_PWR_16_DBL: number;
export declare const TWO_PWR_32_DBL: number;
export declare const TWO_PWR_64_DBL: number;
export declare const TWO_PWR_24_DBL: number;
export declare const TWO_PWR_56_DBL: number;
export declare function itf8Size(v: number): 1 | 2 | 3 | 4 | 5;

@@ -2,0 +7,0 @@ export declare function parseItf8(buffer: Uint8Array, initialOffset: number): readonly [number, number];

@@ -6,2 +6,3 @@ "use strict";

Object.defineProperty(exports, "__esModule", { value: true });
exports.TWO_PWR_56_DBL = exports.TWO_PWR_24_DBL = exports.TWO_PWR_64_DBL = exports.TWO_PWR_32_DBL = exports.TWO_PWR_16_DBL = void 0;
exports.itf8Size = itf8Size;

@@ -13,5 +14,8 @@ exports.parseItf8 = parseItf8;

exports.sequenceMD5 = sequenceMD5;
const longfn_1 = require("longfn");
const md5_1 = __importDefault(require("md5"));
const getBits_1 = require("./codecs/getBits");
exports.TWO_PWR_16_DBL = 1 << 16;
exports.TWO_PWR_32_DBL = exports.TWO_PWR_16_DBL * exports.TWO_PWR_16_DBL;
exports.TWO_PWR_64_DBL = exports.TWO_PWR_32_DBL * exports.TWO_PWR_32_DBL;
exports.TWO_PWR_24_DBL = 1 << 24;
exports.TWO_PWR_56_DBL = exports.TWO_PWR_24_DBL * exports.TWO_PWR_32_DBL;
function itf8Size(v) {

@@ -36,25 +40,30 @@ if (!(v & ~0x7f)) {

let result;
// Single byte value (0xxxxxxx)
if (countFlags < 0x80) {
result = countFlags;
offset = offset + 1;
offset += 1;
}
// Two byte value (10xxxxxx)
else if (countFlags < 0xc0) {
result = ((countFlags << 8) | buffer[offset + 1]) & 0x3fff;
offset = offset + 2;
result = ((countFlags & 0x3f) << 8) | buffer[offset + 1];
offset += 2;
}
// Three byte value (110xxxxx)
else if (countFlags < 0xe0) {
result =
((countFlags << 16) | (buffer[offset + 1] << 8) | buffer[offset + 2]) &
0x1fffff;
offset = offset + 3;
((countFlags & 0x1f) << 16) |
(buffer[offset + 1] << 8) |
buffer[offset + 2];
offset += 3;
}
// Four byte value (1110xxxx)
else if (countFlags < 0xf0) {
result =
((countFlags << 24) |
((countFlags & 0x0f) << 24) |
(buffer[offset + 1] << 16) |
(buffer[offset + 2] << 8) |
buffer[offset + 3]) &
0x0fffffff;
offset = offset + 4;
buffer[offset + 3];
offset += 4;
}
// Five byte value (11110xxx)
else {

@@ -67,56 +76,53 @@ result =

(buffer[offset + 4] & 0x0f);
// x=((0xff & 0x0f)<<28) | (0xff<<20) | (0xff<<12) | (0xff<<4) | (0x0f & 0x0f);
// TODO *val_p = uv < 0x80000000UL ? uv : -((int32_t) (0xffffffffUL - uv)) - 1;
offset = offset + 5;
offset += 5;
}
if (offset > buffer.length) {
throw new getBits_1.CramBufferOverrunError('Attempted to read beyond end of buffer; this file seems truncated.');
}
return [result, offset - initialOffset];
}
function parseLtf8(buffer, initialOffset) {
const dataView = new DataView(buffer.buffer);
let offset = initialOffset;
const countFlags = buffer[offset];
let n;
let value;
// Single byte value < 0x80
if (countFlags < 0x80) {
n = countFlags;
value = countFlags;
offset += 1;
}
// Two byte value < 0xC0
else if (countFlags < 0xc0) {
n = ((buffer[offset] << 8) | buffer[offset + 1]) & 0x3fff;
value = ((countFlags << 8) | buffer[offset + 1]) & 0x3fff;
offset += 2;
}
// Three byte value < 0xE0
else if (countFlags < 0xe0) {
n =
((buffer[offset] << 16) |
value =
((countFlags & 0x3f) << 16) |
(buffer[offset + 1] << 8) |
buffer[offset + 2]) &
0x1fffff;
n = ((countFlags & 63) << 16) | dataView.getUint16(offset + 1, true);
buffer[offset + 2];
offset += 3;
}
// Four byte value < 0xF0
else if (countFlags < 0xf0) {
n =
((buffer[offset] << 24) |
value =
((countFlags & 0x1f) << 24) |
(buffer[offset + 1] << 16) |
(buffer[offset + 2] << 8) |
buffer[offset + 3]) &
0x0fffffff;
buffer[offset + 3];
offset += 4;
}
// Five byte value < 0xF8
else if (countFlags < 0xf8) {
n =
((buffer[offset] & 15) * 2 ** 32 + (buffer[offset + 1] << 24)) |
((buffer[offset + 2] << 16) |
value =
(buffer[offset] & 0x0f) * exports.TWO_PWR_32_DBL +
((buffer[offset + 1] << 24) |
(buffer[offset + 2] << 16) |
(buffer[offset + 3] << 8) |
buffer[offset + 4]);
// TODO *val_p = uv < 0x80000000UL ? uv : -((int32_t) (0xffffffffUL - uv)) - 1;
offset += 5;
}
// Six byte value < 0xFC
else if (countFlags < 0xfc) {
n =
((((buffer[offset] & 7) << 8) | buffer[offset + 1]) * 2 ** 32 +
(buffer[offset + 2] << 24)) |
((buffer[offset + 3] << 16) |
value =
(((buffer[offset] & 0x07) << 8) | buffer[offset + 1]) * exports.TWO_PWR_32_DBL +
((buffer[offset + 2] << 24) |
(buffer[offset + 3] << 16) |
(buffer[offset + 4] << 8) |

@@ -126,10 +132,11 @@ buffer[offset + 5]);

}
// Seven byte value < 0xFE
else if (countFlags < 0xfe) {
n =
((((buffer[offset] & 3) << 16) |
value =
(((buffer[offset] & 0x03) << 16) |
(buffer[offset + 1] << 8) |
buffer[offset + 2]) *
2 ** 32 +
(buffer[offset + 3] << 24)) |
((buffer[offset + 4] << 16) |
exports.TWO_PWR_32_DBL +
((buffer[offset + 3] << 24) |
(buffer[offset + 4] << 16) |
(buffer[offset + 5] << 8) |

@@ -139,11 +146,32 @@ buffer[offset + 6]);

}
// Eight byte value < 0xFF
else if (countFlags < 0xff) {
n = (0, longfn_1.toNumber)((0, longfn_1.fromBytesBE)(buffer.slice(offset + 1, offset + 8), false));
value =
((buffer[offset + 1] << 24) |
(buffer[offset + 2] << 16) |
(buffer[offset + 3] << 8) |
buffer[offset + 4]) *
exports.TWO_PWR_32_DBL +
((buffer[offset + 5] << 24) |
(buffer[offset + 6] << 16) |
(buffer[offset + 7] << 8) |
buffer[offset + 8]);
offset += 8;
}
// Nine byte value
else {
n = (0, longfn_1.toNumber)((0, longfn_1.fromBytesBE)(buffer.subarray(offset + 1, offset + 9), false));
value =
buffer[offset + 1] * exports.TWO_PWR_56_DBL +
((buffer[offset + 2] << 24) |
(buffer[offset + 3] << 16) |
(buffer[offset + 4] << 8) |
buffer[offset + 5]) *
exports.TWO_PWR_32_DBL +
((buffer[offset + 6] << 24) |
(buffer[offset + 7] << 16) |
(buffer[offset + 8] << 8) |
buffer[offset + 9]);
offset += 9;
}
return [n, offset - initialOffset];
return [value, offset - initialOffset];
}

@@ -158,6 +186,2 @@ function parseItem(buffer, parser, startBufferPosition = 0, startFilePosition = 0) {

}
// this would be nice as a decorator, but i'm a little worried about babel
// support for it going away or changing. memoizes a method in the stupidest
// possible way, with no regard for the arguments. actually, this only works
// on methods that take no arguments
function tinyMemoize(_class, methodName) {

@@ -164,0 +188,0 @@ const method = _class.prototype[methodName];

@@ -26,3 +26,3 @@ import { CramFileBlock } from '../file';

constructor(parameters: TParameters, dataType: TResult);
abstract decode(slice: CramSlice, coreDataBlock: CramFileBlock, blocksByContentId: Record<number, CramFileBlock>, cursors: Cursors): DataTypeMapping[TResult];
abstract decode(slice: CramSlice, coreDataBlock: CramFileBlock, blocksByContentId: Record<number, CramFileBlock>, cursors: Cursors): DataTypeMapping[TResult] | undefined;
}

@@ -10,7 +10,8 @@ import CramCodec from './_base';

const lengthCodec = this._getLengthCodec();
const arrayLength = lengthCodec.decode(slice, coreDataBlock, blocksByContentId, cursors);
const arrayLength = lengthCodec.decode(slice, coreDataBlock, blocksByContentId, cursors) || 0;
const dataCodec = this._getDataCodec();
const data = new Uint8Array(arrayLength);
for (let i = 0; i < arrayLength; i += 1) {
data[i] = dataCodec.decode(slice, coreDataBlock, blocksByContentId, cursors);
data[i] =
dataCodec.decode(slice, coreDataBlock, blocksByContentId, cursors) || 0;
}

@@ -17,0 +18,0 @@ return data;

@@ -8,5 +8,5 @@ import CramCodec, { Cursor, Cursors } from './_base';

constructor(parameters: ExternalCramEncoding['parameters'], dataType: 'int' | 'byte');
decode(slice: CramSlice, coreDataBlock: CramFileBlock, blocksByContentId: Record<number, CramFileBlock>, cursors: Cursors): number;
decode(slice: CramSlice, coreDataBlock: CramFileBlock, blocksByContentId: Record<number, CramFileBlock>, cursors: Cursors): number | undefined;
_decodeInt(contentBlock: CramFileBlock, cursor: Cursor): number;
_decodeByte(contentBlock: CramFileBlock, cursor: Cursor): number;
}
import CramCodec from './_base';
import { CramMalformedError, CramUnimplementedError } from '../../errors';
import { CramUnimplementedError } from '../../errors';
import { parseItf8 } from '../util';

@@ -21,7 +21,4 @@ import { CramBufferOverrunError } from './getBits';

const contentBlock = blocksByContentId[blockContentId];
if (!contentBlock) {
throw new CramMalformedError(`no block found with content ID ${blockContentId}}`);
}
const cursor = cursors.externalBlocks.getCursor(blockContentId);
return this._decodeData(contentBlock, cursor);
return contentBlock ? this._decodeData(contentBlock, cursor) : undefined;
}

@@ -28,0 +25,0 @@ _decodeInt(contentBlock, cursor) {

@@ -8,3 +8,8 @@ import CramSlice from '../slice';

constructor(file: CramFile, filePosition: number);
getHeader(): Promise<({
getHeader(): Promise<{
_size: number;
_endPosition: number;
numLandmarks: number;
landmarks: number[];
crc32?: number | undefined;
length: number;

@@ -15,20 +20,6 @@ refSeqId: number;

numBlocks: number;
numLandmarks: number;
numBases: number | undefined;
recordCounter: number;
numRecords: number;
} & {
_endPosition: number;
_size: number;
} & {
numLandmarks: number;
landmarks: number[];
crc32?: number | undefined;
} & {
_endPosition: number;
_size: number;
} & {
_size: number;
_endPosition: number;
}) | undefined>;
}>;
getCompressionHeaderBlock(): Promise<{

@@ -53,7 +44,12 @@ parsedContent: {

crc32?: number;
} | null | undefined>;
getFirstBlock(): Promise<import("../file").CramFileBlock | undefined>;
} | null>;
getFirstBlock(): Promise<import("../file").CramFileBlock>;
getCompressionScheme(): Promise<CramContainerCompressionScheme | undefined>;
getSlice(slicePosition: number, sliceSize: number): CramSlice;
_readContainerHeader(position: number): Promise<({
_readContainerHeader(position: number): Promise<{
_size: number;
_endPosition: number;
numLandmarks: number;
landmarks: number[];
crc32?: number | undefined;
length: number;

@@ -64,20 +60,6 @@ refSeqId: number;

numBlocks: number;
numLandmarks: number;
numBases: number | undefined;
recordCounter: number;
numRecords: number;
} & {
_endPosition: number;
_size: number;
} & {
numLandmarks: number;
landmarks: number[];
crc32?: number | undefined;
} & {
_endPosition: number;
_size: number;
} & {
_size: number;
_endPosition: number;
}) | undefined>;
}>;
}

@@ -19,3 +19,3 @@ import { CramMalformedError } from '../../errors';

// header
if (!containerHeader?.numRecords) {
if (!containerHeader.numRecords) {
return null;

@@ -26,5 +26,2 @@ }

const block = await this.getFirstBlock();
if (block === undefined) {
return undefined;
}
if (block.contentType !== 'COMPRESSION_HEADER') {

@@ -41,5 +38,2 @@ throw new CramMalformedError(`invalid content type ${block.contentType} in compression header block`);

const containerHeader = await this.getHeader();
if (!containerHeader) {
return undefined;
}
return this.file.readBlock(containerHeader._endPosition);

@@ -65,7 +59,2 @@ }

const { cramContainerHeader1, cramContainerHeader2 } = sectionParsers;
const { size: fileSize } = await this.file.stat();
if (position >= fileSize) {
console.warn(`pos:${position}>=fileSize:${fileSize} in cram container`);
return undefined;
}
// parse the container header. do it in 2 pieces because you cannot tell

@@ -76,7 +65,2 @@ // how much to buffer until you read numLandmarks

const numLandmarksSize = itf8Size(header1.numLandmarks);
if (position + header1.length >= fileSize) {
// header indicates container goes beyond fileSize
console.warn(`container at ${position} is beyond fileSize:${fileSize}, skipping`);
return undefined;
}
const bytes2 = await this.file.read(cramContainerHeader2.maxLength(header1.numLandmarks), position + header1._size - numLandmarksSize);

@@ -87,7 +71,8 @@ const header2 = parseItem(bytes2, cramContainerHeader2.parser);

}
const completeHeader = Object.assign(header1, header2, {
return {
...header1,
...header2,
_size: header1._size + header2._size - numLandmarksSize,
_endPosition: header1._size + header2._size - numLandmarksSize + position,
});
return completeHeader;
};
}

@@ -94,0 +79,0 @@ }

@@ -35,3 +35,2 @@ import QuickLRU from 'quick-lru';

constructor(args: CramFileArgs);
stat(): Promise<import("generic-filehandle2").Stats>;
read(length: number, position: number): Promise<Uint8Array<ArrayBuffer>>;

@@ -56,6 +55,9 @@ getDefinition(): Promise<{

* @returns {Promise[number]} the number of containers in the file
*
* note: this is currently used only in unit tests, and after removing file
* length check, relies on a try catch to read return an error to break
*/
containerCount(): Promise<number | undefined>;
getContainerAtPosition(position: number): CramContainer;
readBlockHeader(position: number): Promise<({
readBlockHeader(position: number): Promise<{
uncompressedSize: number;

@@ -69,3 +71,3 @@ compressedSize: number;

_size: number;
}) | undefined>;
}>;
_parseSection<T>(section: {

@@ -77,8 +79,8 @@ maxLength: number;

};
}, position: number, size?: number, preReadBuffer?: Uint8Array): Promise<(T & {
}, position: number, size?: number, preReadBuffer?: Uint8Array): Promise<T & {
_endPosition: number;
_size: number;
}) | undefined>;
}>;
_uncompress(compressionMethod: CompressionMethod, inputBuffer: Uint8Array, uncompressedSize: number): Promise<any>;
readBlock(position: number): Promise<CramFileBlock | undefined>;
readBlock(position: number): Promise<CramFileBlock>;
}

@@ -56,7 +56,2 @@ import bzip2 from 'bzip2';

}
// can just stat this object like a filehandle
stat() {
return this.file.stat();
}
// can just stat this object like a filehandle
read(length, position) {

@@ -82,15 +77,10 @@ return this.file.read(length, position);

const firstBlock = await firstContainer.getFirstBlock();
if (firstBlock === undefined) {
return parseHeaderText('');
}
else {
const content = firstBlock.content;
const dataView = new DataView(content.buffer);
const headerLength = dataView.getInt32(0, true);
const textStart = 4;
const decoder = new TextDecoder('utf8');
const text = decoder.decode(content.subarray(textStart, textStart + headerLength));
this.header = text;
return parseHeaderText(text);
}
const content = firstBlock.content;
const dataView = new DataView(content.buffer);
const headerLength = dataView.getInt32(0, true);
const textStart = 4;
const decoder = new TextDecoder('utf8');
const text = decoder.decode(content.subarray(textStart, textStart + headerLength));
this.header = text;
return parseHeaderText(text);
}

@@ -105,4 +95,2 @@ async getHeaderText() {

let position = sectionParsers.cramFileDefinition.maxLength;
const { size: fileSize } = await this.file.stat();
const { cramContainerHeader1 } = sectionParsers;
// skip with a series of reads to the proper container

@@ -113,10 +101,7 @@ let currentContainer;

// and have not found that container, it does not exist
if (position + cramContainerHeader1.maxLength + 8 >= fileSize) {
return undefined;
}
// if (position + cramContainerHeader1.maxLength + 8 >= fileSize) {
// return undefined
// }
currentContainer = this.getContainerAtPosition(position);
const currentHeader = await currentContainer.getHeader();
if (!currentHeader) {
throw new CramMalformedError(`container ${containerNumber} not found in file`);
}
// if this is the first container, read all the blocks in the container

@@ -129,5 +114,2 @@ // to determine its length, because we cannot trust the container

const block = await this.readBlock(position);
if (block === undefined) {
return undefined;
}
position = block._endPosition;

@@ -156,2 +138,5 @@ }

* @returns {Promise[number]} the number of containers in the file
*
* note: this is currently used only in unit tests, and after removing file
* length check, relies on a try catch to read return an error to break
*/

@@ -161,31 +146,30 @@ async containerCount() {

const sectionParsers = getSectionParsers(majorVersion);
const { size: fileSize } = await this.file.stat();
const { cramContainerHeader1 } = sectionParsers;
let containerCount = 0;
let position = sectionParsers.cramFileDefinition.maxLength;
while (position + cramContainerHeader1.maxLength + 8 < fileSize) {
const currentHeader = await this.getContainerAtPosition(position).getHeader();
if (!currentHeader) {
break;
}
// if this is the first container, read all the blocks in the container,
// because we cannot trust the container header's given length due to a
// bug somewhere in htslib
if (containerCount === 0) {
position = currentHeader._endPosition;
for (let j = 0; j < currentHeader.numBlocks; j++) {
const block = await this.readBlock(position);
if (block === undefined) {
return undefined;
try {
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
while (true) {
const currentHeader = await this.getContainerAtPosition(position).getHeader();
// if this is the first container, read all the blocks in the container,
// because we cannot trust the container header's given length due to a
// bug somewhere in htslib
if (containerCount === 0) {
position = currentHeader._endPosition;
for (let j = 0; j < currentHeader.numBlocks; j++) {
const block = await this.readBlock(position);
position = block._endPosition;
}
position = block._endPosition;
}
else {
// otherwise, just traverse to the next container using the container's
// length
position += currentHeader._size + currentHeader.length;
}
containerCount += 1;
}
else {
// otherwise, just traverse to the next container using the container's
// length
position += currentHeader._size + currentHeader.length;
}
containerCount += 1;
}
catch (e) {
containerCount--;
/* do nothing */
}
return containerCount;

@@ -200,6 +184,2 @@ }

const { cramBlockHeader } = sectionParsers;
const { size: fileSize } = await this.file.stat();
if (position + cramBlockHeader.maxLength >= fileSize) {
return undefined;
}
const buffer = await this.file.read(cramBlockHeader.maxLength, position);

@@ -209,13 +189,3 @@ return parseItem(buffer, cramBlockHeader.parser, 0, position);

async _parseSection(section, position, size = section.maxLength, preReadBuffer) {
let buffer;
if (preReadBuffer) {
buffer = preReadBuffer;
}
else {
const { size: fileSize } = await this.file.stat();
if (position + size >= fileSize) {
return undefined;
}
buffer = await this.file.read(size, position);
}
const buffer = preReadBuffer ?? (await this.file.read(size, position));
const data = parseItem(buffer, section.parser, 0, position);

@@ -276,5 +246,2 @@ if (data._size !== size) {

const blockHeader = await this.readBlockHeader(position);
if (blockHeader === undefined) {
return undefined;
}
const blockContentPosition = blockHeader._endPosition;

@@ -294,5 +261,2 @@ const d = await this.file.read(blockHeader.compressedSize, blockContentPosition);

const crc = await this._parseSection(sectionParsers.cramBlockCrc32, blockContentPosition + blockHeader.compressedSize);
if (crc === undefined) {
return undefined;
}
block.crc32 = crc.crc32;

@@ -299,0 +263,0 @@ // check the block data crc32

@@ -557,4 +557,5 @@ import { parseItf8, parseLtf8 } from './util';

offset += 4;
// reference sequence identifier, -1 for unmapped reads, -2 for multiple
// reference sequences
// reference sequence identifier:
// -1 for unmapped reads,
// -2 for multiple reference sequences
const [refSeqId, newOffset1] = parseItf8(buffer, offset);

@@ -561,0 +562,0 @@ offset += newOffset1;

@@ -7,6 +7,6 @@ import { Cursors, DataTypeMapping } from '../codecs/_base';

import { CramFileBlock } from '../file';
export type DataSeriesDecoder = <T extends DataSeriesEncodingKey>(dataSeriesName: T) => DataTypeMapping[DataSeriesTypes[T]];
export type DataSeriesDecoder = <T extends DataSeriesEncodingKey>(dataSeriesName: T) => DataTypeMapping[DataSeriesTypes[T]] | undefined;
export default function decodeRecord(slice: CramSlice, decodeDataSeries: DataSeriesDecoder, compressionScheme: CramContainerCompressionScheme, sliceHeader: SliceHeader, coreDataBlock: CramFileBlock, blocksByContentId: Record<number, CramFileBlock>, cursors: Cursors, majorVersion: number, recordNumber: number): {
readLength: number;
sequenceId: number;
sequenceId: number | undefined;
cramFlags: number;

@@ -13,0 +13,0 @@ flags: number;

@@ -180,5 +180,4 @@ import { CramMalformedError } from '../../errors';

let flags = decodeDataSeries('BF');
// note: the C data type of compressionFlags is byte in cram v1
// and int32 in cram v2+, but that does not matter for us here
// in javascript land.
// note: the C data type of compressionFlags is byte in cram v1 and int32 in
// cram v2+, but that does not matter for us here in javascript land.
const cramFlags = decodeDataSeries('CF');

@@ -259,3 +258,7 @@ if (!isMappedSliceHeader(sliceHeader.parsedContent)) {

tags[tagName] =
typeof tagData === 'number' ? tagData : parseTagData(tagType, tagData);
tagData === undefined
? undefined
: typeof tagData === 'number'
? tagData
: parseTagData(tagType, tagData);
}

@@ -262,0 +265,0 @@ let readFeatures;

@@ -137,9 +137,3 @@ import { CramArgumentError, CramMalformedError } from '../../errors';

const containerHeader = await this.container.getHeader();
if (!containerHeader) {
throw new Error('no container header detected');
}
const header = await this.file.readBlock(containerHeader._endPosition + this.containerPosition);
if (header === undefined) {
throw new Error('block header undefined');
}
if (header.contentType === 'MAPPED_SLICE_HEADER') {

@@ -165,5 +159,2 @@ const content = parseItem(header.content, sectionParsers.cramMappedSliceHeader.parser, 0, containerHeader._endPosition);

const block = await this.file.readBlock(blockPosition);
if (block === undefined) {
throw new Error('block undefined');
}
blocks[i] = block;

@@ -300,5 +291,3 @@ blockPosition = blocks[i]._endPosition;

}
// console.log(dataSeriesName, Object.getPrototypeOf(codec))
const decoded = codec.decode(this, coreDataBlock, blocksByContentId, cursors);
return decoded;
return codec.decode(this, coreDataBlock, blocksByContentId, cursors);
};

@@ -328,7 +317,16 @@ const records = new Array(sliceHeader.parsedContent.numRecords);

// interpret `recordsToNextFragment` attributes to make standard `mate`
// objects Resolve mate pair cross-references between records in this slice
// objects
//
// Resolve mate pair cross-references between records in this slice
for (let i = 0; i < records.length; i += 1) {
const { mateRecordNumber } = records[i];
if (mateRecordNumber !== undefined && mateRecordNumber >= 0) {
associateIntraSliceMate(records, i, records[i], records[mateRecordNumber]);
const r = records[i];
// check for !!r added after removal of "stat" file size check: found
// some undefined entries
if (r) {
const { mateRecordNumber } = r;
if (mateRecordNumber !== undefined &&
mateRecordNumber >= 0 &&
records[mateRecordNumber]) {
associateIntraSliceMate(records, i, r, records[mateRecordNumber]);
}
}

@@ -335,0 +333,0 @@ }

@@ -0,1 +1,6 @@

export declare const TWO_PWR_16_DBL: number;
export declare const TWO_PWR_32_DBL: number;
export declare const TWO_PWR_64_DBL: number;
export declare const TWO_PWR_24_DBL: number;
export declare const TWO_PWR_56_DBL: number;
export declare function itf8Size(v: number): 1 | 2 | 3 | 4 | 5;

@@ -2,0 +7,0 @@ export declare function parseItf8(buffer: Uint8Array, initialOffset: number): readonly [number, number];

@@ -1,4 +0,7 @@

import { fromBytesBE, toNumber } from 'longfn';
import md5 from 'md5';
import { CramBufferOverrunError } from './codecs/getBits';
export const TWO_PWR_16_DBL = 1 << 16;
export const TWO_PWR_32_DBL = TWO_PWR_16_DBL * TWO_PWR_16_DBL;
export const TWO_PWR_64_DBL = TWO_PWR_32_DBL * TWO_PWR_32_DBL;
export const TWO_PWR_24_DBL = 1 << 24;
export const TWO_PWR_56_DBL = TWO_PWR_24_DBL * TWO_PWR_32_DBL;
export function itf8Size(v) {

@@ -23,25 +26,30 @@ if (!(v & ~0x7f)) {

let result;
// Single byte value (0xxxxxxx)
if (countFlags < 0x80) {
result = countFlags;
offset = offset + 1;
offset += 1;
}
// Two byte value (10xxxxxx)
else if (countFlags < 0xc0) {
result = ((countFlags << 8) | buffer[offset + 1]) & 0x3fff;
offset = offset + 2;
result = ((countFlags & 0x3f) << 8) | buffer[offset + 1];
offset += 2;
}
// Three byte value (110xxxxx)
else if (countFlags < 0xe0) {
result =
((countFlags << 16) | (buffer[offset + 1] << 8) | buffer[offset + 2]) &
0x1fffff;
offset = offset + 3;
((countFlags & 0x1f) << 16) |
(buffer[offset + 1] << 8) |
buffer[offset + 2];
offset += 3;
}
// Four byte value (1110xxxx)
else if (countFlags < 0xf0) {
result =
((countFlags << 24) |
((countFlags & 0x0f) << 24) |
(buffer[offset + 1] << 16) |
(buffer[offset + 2] << 8) |
buffer[offset + 3]) &
0x0fffffff;
offset = offset + 4;
buffer[offset + 3];
offset += 4;
}
// Five byte value (11110xxx)
else {

@@ -54,56 +62,53 @@ result =

(buffer[offset + 4] & 0x0f);
// x=((0xff & 0x0f)<<28) | (0xff<<20) | (0xff<<12) | (0xff<<4) | (0x0f & 0x0f);
// TODO *val_p = uv < 0x80000000UL ? uv : -((int32_t) (0xffffffffUL - uv)) - 1;
offset = offset + 5;
offset += 5;
}
if (offset > buffer.length) {
throw new CramBufferOverrunError('Attempted to read beyond end of buffer; this file seems truncated.');
}
return [result, offset - initialOffset];
}
export function parseLtf8(buffer, initialOffset) {
const dataView = new DataView(buffer.buffer);
let offset = initialOffset;
const countFlags = buffer[offset];
let n;
let value;
// Single byte value < 0x80
if (countFlags < 0x80) {
n = countFlags;
value = countFlags;
offset += 1;
}
// Two byte value < 0xC0
else if (countFlags < 0xc0) {
n = ((buffer[offset] << 8) | buffer[offset + 1]) & 0x3fff;
value = ((countFlags << 8) | buffer[offset + 1]) & 0x3fff;
offset += 2;
}
// Three byte value < 0xE0
else if (countFlags < 0xe0) {
n =
((buffer[offset] << 16) |
value =
((countFlags & 0x3f) << 16) |
(buffer[offset + 1] << 8) |
buffer[offset + 2]) &
0x1fffff;
n = ((countFlags & 63) << 16) | dataView.getUint16(offset + 1, true);
buffer[offset + 2];
offset += 3;
}
// Four byte value < 0xF0
else if (countFlags < 0xf0) {
n =
((buffer[offset] << 24) |
value =
((countFlags & 0x1f) << 24) |
(buffer[offset + 1] << 16) |
(buffer[offset + 2] << 8) |
buffer[offset + 3]) &
0x0fffffff;
buffer[offset + 3];
offset += 4;
}
// Five byte value < 0xF8
else if (countFlags < 0xf8) {
n =
((buffer[offset] & 15) * 2 ** 32 + (buffer[offset + 1] << 24)) |
((buffer[offset + 2] << 16) |
value =
(buffer[offset] & 0x0f) * TWO_PWR_32_DBL +
((buffer[offset + 1] << 24) |
(buffer[offset + 2] << 16) |
(buffer[offset + 3] << 8) |
buffer[offset + 4]);
// TODO *val_p = uv < 0x80000000UL ? uv : -((int32_t) (0xffffffffUL - uv)) - 1;
offset += 5;
}
// Six byte value < 0xFC
else if (countFlags < 0xfc) {
n =
((((buffer[offset] & 7) << 8) | buffer[offset + 1]) * 2 ** 32 +
(buffer[offset + 2] << 24)) |
((buffer[offset + 3] << 16) |
value =
(((buffer[offset] & 0x07) << 8) | buffer[offset + 1]) * TWO_PWR_32_DBL +
((buffer[offset + 2] << 24) |
(buffer[offset + 3] << 16) |
(buffer[offset + 4] << 8) |

@@ -113,10 +118,11 @@ buffer[offset + 5]);

}
// Seven byte value < 0xFE
else if (countFlags < 0xfe) {
n =
((((buffer[offset] & 3) << 16) |
value =
(((buffer[offset] & 0x03) << 16) |
(buffer[offset + 1] << 8) |
buffer[offset + 2]) *
2 ** 32 +
(buffer[offset + 3] << 24)) |
((buffer[offset + 4] << 16) |
TWO_PWR_32_DBL +
((buffer[offset + 3] << 24) |
(buffer[offset + 4] << 16) |
(buffer[offset + 5] << 8) |

@@ -126,11 +132,32 @@ buffer[offset + 6]);

}
// Eight byte value < 0xFF
else if (countFlags < 0xff) {
n = toNumber(fromBytesBE(buffer.slice(offset + 1, offset + 8), false));
value =
((buffer[offset + 1] << 24) |
(buffer[offset + 2] << 16) |
(buffer[offset + 3] << 8) |
buffer[offset + 4]) *
TWO_PWR_32_DBL +
((buffer[offset + 5] << 24) |
(buffer[offset + 6] << 16) |
(buffer[offset + 7] << 8) |
buffer[offset + 8]);
offset += 8;
}
// Nine byte value
else {
n = toNumber(fromBytesBE(buffer.subarray(offset + 1, offset + 9), false));
value =
buffer[offset + 1] * TWO_PWR_56_DBL +
((buffer[offset + 2] << 24) |
(buffer[offset + 3] << 16) |
(buffer[offset + 4] << 8) |
buffer[offset + 5]) *
TWO_PWR_32_DBL +
((buffer[offset + 6] << 24) |
(buffer[offset + 7] << 16) |
(buffer[offset + 8] << 8) |
buffer[offset + 9]);
offset += 9;
}
return [n, offset - initialOffset];
return [value, offset - initialOffset];
}

@@ -145,6 +172,2 @@ export function parseItem(buffer, parser, startBufferPosition = 0, startFilePosition = 0) {

}
// this would be nice as a decorator, but i'm a little worried about babel
// support for it going away or changing. memoizes a method in the stupidest
// possible way, with no regard for the arguments. actually, this only works
// on methods that take no arguments
export function tinyMemoize(_class, methodName) {

@@ -151,0 +174,0 @@ const method = _class.prototype[methodName];

{
"name": "@gmod/cram",
"version": "4.0.4",
"version": "4.0.5",
"description": "read CRAM files with pure Javascript",

@@ -18,3 +18,2 @@ "license": "MIT",

"files": [
"errors.js",
"src",

@@ -28,3 +27,3 @@ "dist",

"format": "prettier --write .",
"docs": "documentation readme --shallow src/indexedCramFile.ts --section=IndexedCramFile; documentation readme --shallow src/cramFile/file.ts --section=CramFile; documentation readme --shallow src/craiIndex.ts --section=CraiIndex; documentation readme --shallow errors.ts '--section=Exception Classes'; documentation readme --shallow src/cramFile/file.ts --section=CramFile; documentation readme --shallow src/cramFile/record.ts --section=CramRecord",
"docs": "documentation readme --shallow src/indexedCramFile.ts --section=IndexedCramFile; documentation readme --shallow src/cramFile/file.ts --section=CramFile; documentation readme --shallow src/craiIndex.ts --section=CraiIndex; documentation readme --shallow src/cramFile/file.ts --section=CramFile; documentation readme --shallow src/cramFile/record.ts --section=CramRecord",
"prebuild": "yarn clean",

@@ -36,3 +35,3 @@ "clean": "rimraf dist esm",

"postbuild": "webpack",
"preversion": "yarn test --run && yarn build && cp dist/errors.js errors.js",
"preversion": "yarn test --run && yarn build",
"postpublish": "rm errors.js",

@@ -51,3 +50,2 @@ "postversion": "git push --follow-tags"

"generic-filehandle2": "^1.0.0",
"longfn": "^1.3.1",
"md5": "^2.2.1",

@@ -54,0 +52,0 @@ "pako": "^1.0.4",

@@ -145,2 +145,3 @@ # @gmod/cram

- [CramRecord](#cramrecord)
- [Parameters](#parameters)
- [isPaired](#ispaired)

@@ -165,3 +166,3 @@ - [isProperlyPaired](#isproperlypaired)

- [addReferenceSequence](#addreferencesequence)
- [Parameters](#parameters)
- [Parameters](#parameters-1)

@@ -172,2 +173,24 @@ #### CramRecord

##### Parameters
- `$0` **any**&#x20;
- `$0.flags` &#x20;
- `$0.cramFlags` &#x20;
- `$0.readLength` &#x20;
- `$0.mappingQuality` &#x20;
- `$0.lengthOnRef` &#x20;
- `$0.qualityScores` &#x20;
- `$0.mateRecordNumber` &#x20;
- `$0.readBases` &#x20;
- `$0.readFeatures` &#x20;
- `$0.mateToUse` &#x20;
- `$0.readGroupId` &#x20;
- `$0.readName` &#x20;
- `$0.sequenceId` &#x20;
- `$0.uniqueId` &#x20;
- `$0.templateSize` &#x20;
- `$0.alignmentStart` &#x20;
- `$0.tags` &#x20;
##### isPaired

@@ -288,3 +311,3 @@

Annotates this feature with the given reference sequence basepair information.
This will add a `sub` and a `ref` item to base subsitution read features given
This will add a `sub` and a `ref` item to base substitution read features given
the actual substituted and reference base pairs, and will make the

@@ -296,12 +319,12 @@ `getReadSequence()` method work.

- `refRegion`
**[object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)**
**[object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)**&#x20;
- `refRegion.start`
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)**
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)**&#x20;
- `refRegion.end`
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)**
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)**&#x20;
- `refRegion.seq`
**[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)**
**[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)**&#x20;
- `compressionScheme` **CramContainerCompressionScheme**
- `compressionScheme` **CramContainerCompressionScheme**&#x20;

@@ -344,5 +367,5 @@ Returns

- `args`
**[object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)**
**[object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)**&#x20;
- `args.cram` **CramFile**
- `args.cram` **CramFile**&#x20;
- `args.index` **Index-like** object that supports

@@ -372,3 +395,9 @@ getEntriesForRange(seqId,start,end) -> Promise\[Array\[index entries]]

end of the range of interest. 1-based closed coordinates.
- `opts` (optional, default `{}`)
- `opts` **{viewAsPairs:
[boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)?,
pairAcrossChr:
[boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)?,
maxInsertSize:
[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)?}**
(optional, default `{}`)

@@ -380,3 +409,3 @@ #### hasDataForReferenceSequence

- `seqId`
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)**
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)**&#x20;

@@ -394,38 +423,11 @@ Returns

- [constructor](#constructor)
- [Parameters](#parameters)
- [containerCount](#containercount)
#### constructor
#### containerCount
##### Parameters
Returns
**[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)<([number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
|
[undefined](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/undefined))>**&#x20;
- `args`
**[object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)**
- `args.filehandle`
**[object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)?**
a filehandle that implements the stat() and read() methods of the Node
filehandle API <https://nodejs.org/api/fs.html#fs_class_filehandle>
- `args.path`
**[object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)?**
path to the cram file
- `args.url`
**[object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)?**
url for the cram file. also supports file:// urls for local files
- `args.seqFetch`
**[function](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Statements/function)?**
a function with signature `(seqId, startCoordinate, endCoordinate)` that
returns a promise for a string of sequence bases
- `args.cacheSize`
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)?**
optional maximum number of CRAM records to cache. default 20,000
- `args.checkSequenceMD5`
**[boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)?**
default true. if false, disables verifying the MD5 checksum of the reference
sequence underlying a slice. In some applications, this check can cause an
inconvenient amount (many megabases) of sequences to be fetched.
#### containerCount
### CraiIndex

@@ -449,9 +451,9 @@

- `args`
**[object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)**
**[object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)**&#x20;
- `args.path`
**[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)?**
**[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)?**&#x20;
- `args.url`
**[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)?**
- `args.filehandle` **FileHandle?**
**[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)?**&#x20;
- `args.filehandle` **FileHandle?**&#x20;

@@ -463,3 +465,3 @@ #### hasDataForReferenceSequence

- `seqId`
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)**
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)**&#x20;

@@ -478,7 +480,7 @@ Returns

- `seqId`
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)**
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)**&#x20;
- `queryStart`
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)**
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)**&#x20;
- `queryEnd`
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)**
**[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)**&#x20;

@@ -485,0 +487,0 @@ Returns

@@ -44,3 +44,3 @@ import { CramFileBlock } from '../file'

cursors: Cursors,
): DataTypeMapping[TResult]
): DataTypeMapping[TResult] | undefined
}

@@ -35,8 +35,4 @@ import CramCodec, { Cursors } from './_base'

const lengthCodec = this._getLengthCodec()
const arrayLength = lengthCodec.decode(
slice,
coreDataBlock,
blocksByContentId,
cursors,
)
const arrayLength =
lengthCodec.decode(slice, coreDataBlock, blocksByContentId, cursors) || 0

@@ -46,8 +42,4 @@ const dataCodec = this._getDataCodec()

for (let i = 0; i < arrayLength; i += 1) {
data[i] = dataCodec.decode(
slice,
coreDataBlock,
blocksByContentId,
cursors,
)
data[i] =
dataCodec.decode(slice, coreDataBlock, blocksByContentId, cursors) || 0
}

@@ -54,0 +46,0 @@

import CramCodec, { Cursor, Cursors } from './_base'
import { CramMalformedError, CramUnimplementedError } from '../../errors'
import { CramUnimplementedError } from '../../errors'
import { CramFileBlock } from '../file'

@@ -42,9 +42,5 @@ import CramSlice from '../slice'

const contentBlock = blocksByContentId[blockContentId]
if (!contentBlock) {
throw new CramMalformedError(
`no block found with content ID ${blockContentId}}`,
)
}
const cursor = cursors.externalBlocks.getCursor(blockContentId)
return this._decodeData(contentBlock, cursor)
return contentBlock ? this._decodeData(contentBlock, cursor) : undefined
}

@@ -51,0 +47,0 @@

@@ -24,3 +24,3 @@ import { CramMalformedError } from '../../errors'

// header
if (!containerHeader?.numRecords) {
if (!containerHeader.numRecords) {
return null

@@ -32,5 +32,2 @@ }

const block = await this.getFirstBlock()
if (block === undefined) {
return undefined
}
if (block.contentType !== 'COMPRESSION_HEADER') {

@@ -56,5 +53,2 @@ throw new CramMalformedError(

const containerHeader = await this.getHeader()
if (!containerHeader) {
return undefined
}
return this.file.readBlock(containerHeader._endPosition)

@@ -84,9 +78,3 @@ }

const { cramContainerHeader1, cramContainerHeader2 } = sectionParsers
const { size: fileSize } = await this.file.stat()
if (position >= fileSize) {
console.warn(`pos:${position}>=fileSize:${fileSize} in cram container`)
return undefined
}
// parse the container header. do it in 2 pieces because you cannot tell

@@ -100,9 +88,2 @@ // how much to buffer until you read numLandmarks

const numLandmarksSize = itf8Size(header1.numLandmarks)
if (position + header1.length >= fileSize) {
// header indicates container goes beyond fileSize
console.warn(
`container at ${position} is beyond fileSize:${fileSize}, skipping`,
)
return undefined
}

@@ -124,8 +105,8 @@ const bytes2 = await this.file.read(

const completeHeader = Object.assign(header1, header2, {
return {
...header1,
...header2,
_size: header1._size + header2._size - numLandmarksSize,
_endPosition: header1._size + header2._size - numLandmarksSize + position,
})
return completeHeader
}
}

@@ -132,0 +113,0 @@ }

@@ -105,8 +105,2 @@ import bzip2 from 'bzip2'

// can just stat this object like a filehandle
stat() {
return this.file.stat()
}
// can just stat this object like a filehandle
read(length: number, position: number) {

@@ -137,16 +131,13 @@ return this.file.read(length, position)

const firstBlock = await firstContainer.getFirstBlock()
if (firstBlock === undefined) {
return parseHeaderText('')
} else {
const content = firstBlock.content
const dataView = new DataView(content.buffer)
const headerLength = dataView.getInt32(0, true)
const textStart = 4
const decoder = new TextDecoder('utf8')
const text = decoder.decode(
content.subarray(textStart, textStart + headerLength),
)
this.header = text
return parseHeaderText(text)
}
const content = firstBlock.content
const dataView = new DataView(content.buffer)
const headerLength = dataView.getInt32(0, true)
const textStart = 4
const decoder = new TextDecoder('utf8')
const text = decoder.decode(
content.subarray(textStart, textStart + headerLength),
)
this.header = text
return parseHeaderText(text)
}

@@ -163,4 +154,2 @@

let position = sectionParsers.cramFileDefinition.maxLength
const { size: fileSize } = await this.file.stat()
const { cramContainerHeader1 } = sectionParsers

@@ -172,13 +161,9 @@ // skip with a series of reads to the proper container

// and have not found that container, it does not exist
if (position + cramContainerHeader1.maxLength + 8 >= fileSize) {
return undefined
}
// if (position + cramContainerHeader1.maxLength + 8 >= fileSize) {
// return undefined
// }
currentContainer = this.getContainerAtPosition(position)
const currentHeader = await currentContainer.getHeader()
if (!currentHeader) {
throw new CramMalformedError(
`container ${containerNumber} not found in file`,
)
}
// if this is the first container, read all the blocks in the container

@@ -191,5 +176,2 @@ // to determine its length, because we cannot trust the container

const block = await this.readBlock(position)
if (block === undefined) {
return undefined
}
position = block._endPosition

@@ -227,2 +209,5 @@ }

* @returns {Promise[number]} the number of containers in the file
*
* note: this is currently used only in unit tests, and after removing file
* length check, relies on a try catch to read return an error to break
*/

@@ -232,31 +217,30 @@ async containerCount(): Promise<number | undefined> {

const sectionParsers = getSectionParsers(majorVersion)
const { size: fileSize } = await this.file.stat()
const { cramContainerHeader1 } = sectionParsers
let containerCount = 0
let position = sectionParsers.cramFileDefinition.maxLength
while (position + cramContainerHeader1.maxLength + 8 < fileSize) {
const currentHeader =
await this.getContainerAtPosition(position).getHeader()
if (!currentHeader) {
break
}
// if this is the first container, read all the blocks in the container,
// because we cannot trust the container header's given length due to a
// bug somewhere in htslib
if (containerCount === 0) {
position = currentHeader._endPosition
for (let j = 0; j < currentHeader.numBlocks; j++) {
const block = await this.readBlock(position)
if (block === undefined) {
return undefined
try {
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
while (true) {
const currentHeader =
await this.getContainerAtPosition(position).getHeader()
// if this is the first container, read all the blocks in the container,
// because we cannot trust the container header's given length due to a
// bug somewhere in htslib
if (containerCount === 0) {
position = currentHeader._endPosition
for (let j = 0; j < currentHeader.numBlocks; j++) {
const block = await this.readBlock(position)
position = block._endPosition
}
position = block._endPosition
} else {
// otherwise, just traverse to the next container using the container's
// length
position += currentHeader._size + currentHeader.length
}
} else {
// otherwise, just traverse to the next container using the container's
// length
position += currentHeader._size + currentHeader.length
containerCount += 1
}
containerCount += 1
} catch (e) {
containerCount--
/* do nothing */
}

@@ -275,8 +259,3 @@

const { cramBlockHeader } = sectionParsers
const { size: fileSize } = await this.file.stat()
if (position + cramBlockHeader.maxLength >= fileSize) {
return undefined
}
const buffer = await this.file.read(cramBlockHeader.maxLength, position)

@@ -298,12 +277,3 @@ return parseItem(buffer, cramBlockHeader.parser, 0, position)

) {
let buffer: Uint8Array
if (preReadBuffer) {
buffer = preReadBuffer
} else {
const { size: fileSize } = await this.file.stat()
if (position + size >= fileSize) {
return undefined
}
buffer = await this.file.read(size, position)
}
const buffer = preReadBuffer ?? (await this.file.read(size, position))
const data = parseItem(buffer, section.parser, 0, position)

@@ -368,5 +338,2 @@ if (data._size !== size) {

const blockHeader = await this.readBlockHeader(position)
if (blockHeader === undefined) {
return undefined
}
const blockContentPosition = blockHeader._endPosition

@@ -399,5 +366,2 @@

)
if (crc === undefined) {
return undefined
}
block.crc32 = crc.crc32

@@ -404,0 +368,0 @@

@@ -274,3 +274,3 @@ import Constants from './constants'

this.readName = readName
this.sequenceId = sequenceId
this.sequenceId = sequenceId!
this.uniqueId = uniqueId

@@ -277,0 +277,0 @@ this.templateSize = templateSize

@@ -655,7 +655,10 @@ import { TupleOf } from '../typescript'

const dataView = new DataView(b.buffer, b.byteOffset, b.length)
// byte size of the container data (blocks)
const length = dataView.getInt32(offset, true)
offset += 4
// reference sequence identifier, -1 for unmapped reads, -2 for multiple
// reference sequences
// reference sequence identifier:
// -1 for unmapped reads,
// -2 for multiple reference sequences
const [refSeqId, newOffset1] = parseItf8(buffer, offset)

@@ -662,0 +665,0 @@ offset += newOffset1

@@ -214,3 +214,3 @@ import { CramMalformedError } from '../../errors'

dataSeriesName: T,
) => DataTypeMapping[DataSeriesTypes[T]]
) => DataTypeMapping[DataSeriesTypes[T]] | undefined

@@ -228,8 +228,7 @@ export default function decodeRecord(

) {
let flags = decodeDataSeries('BF')
let flags = decodeDataSeries('BF')!
// note: the C data type of compressionFlags is byte in cram v1
// and int32 in cram v2+, but that does not matter for us here
// in javascript land.
const cramFlags = decodeDataSeries('CF')
// note: the C data type of compressionFlags is byte in cram v1 and int32 in
// cram v2+, but that does not matter for us here in javascript land.
const cramFlags = decodeDataSeries('CF')!

@@ -245,5 +244,5 @@ if (!isMappedSliceHeader(sliceHeader.parsedContent)) {

const readLength = decodeDataSeries('RL')
const readLength = decodeDataSeries('RL')!
// if APDelta, will calculate the true start in a second pass
let alignmentStart = decodeDataSeries('AP')
let alignmentStart = decodeDataSeries('AP')!
if (compressionScheme.APdelta) {

@@ -253,7 +252,7 @@ alignmentStart = alignmentStart + cursors.lastAlignmentStart

cursors.lastAlignmentStart = alignmentStart
const readGroupId = decodeDataSeries('RG')
const readGroupId = decodeDataSeries('RG')!
let readName: string | undefined
if (compressionScheme.readNamesIncluded) {
readName = readNullTerminatedString(decodeDataSeries('RN'))
readName = readNullTerminatedString(decodeDataSeries('RN')!)
}

@@ -275,10 +274,10 @@

// matter for javascript
const mateFlags = decodeDataSeries('MF')
const mateFlags = decodeDataSeries('MF')!
let mateReadName: string | undefined
if (!compressionScheme.readNamesIncluded) {
mateReadName = readNullTerminatedString(decodeDataSeries('RN'))
mateReadName = readNullTerminatedString(decodeDataSeries('RN')!)
readName = mateReadName
}
const mateSequenceId = decodeDataSeries('NS')
const mateAlignmentStart = decodeDataSeries('NP')
const mateSequenceId = decodeDataSeries('NS')!
const mateAlignmentStart = decodeDataSeries('NP')!
if (mateFlags || mateSequenceId > -1) {

@@ -293,3 +292,3 @@ mateToUse = {

templateSize = decodeDataSeries('TS')
templateSize = decodeDataSeries('TS')!

@@ -307,3 +306,3 @@ // set mate unmapped if needed

} else if (CramFlagsDecoder.isWithMateDownstream(cramFlags)) {
mateRecordNumber = decodeDataSeries('NF') + recordNumber + 1
mateRecordNumber = decodeDataSeries('NF')! + recordNumber + 1
}

@@ -313,3 +312,3 @@

// cram v1
const TLindex = decodeDataSeries('TL')
const TLindex = decodeDataSeries('TL')!
if (TLindex < 0) {

@@ -333,3 +332,7 @@ /* TODO: check nTL: TLindex >= compressionHeader.tagEncoding.size */

tags[tagName] =
typeof tagData === 'number' ? tagData : parseTagData(tagType, tagData)
tagData === undefined
? undefined
: typeof tagData === 'number'
? tagData
: parseTagData(tagType, tagData)
}

@@ -344,3 +347,3 @@

// reading read features
const readFeatureCount = decodeDataSeries('FN')
const readFeatureCount = decodeDataSeries('FN')!
if (readFeatureCount) {

@@ -380,7 +383,7 @@ readFeatures = decodeReadFeatures(

// mapping quality
mappingQuality = decodeDataSeries('MQ')
mappingQuality = decodeDataSeries('MQ')!
if (CramFlagsDecoder.isPreservingQualityScores(cramFlags)) {
qualityScores = new Array(readLength)
for (let i = 0; i < qualityScores.length; i++) {
qualityScores[i] = decodeDataSeries('QS')
qualityScores[i] = decodeDataSeries('QS')!
}

@@ -394,3 +397,3 @@ }

for (let i = 0; i < bases.length; i++) {
bases[i] = decodeDataSeries('BA')
bases[i] = decodeDataSeries('BA')!
}

@@ -402,3 +405,3 @@ readBases = String.fromCharCode(...bases)

for (let i = 0; i < bases.length; i++) {
qualityScores[i] = decodeDataSeries('QS')
qualityScores[i] = decodeDataSeries('QS')!
}

@@ -405,0 +408,0 @@ }

@@ -23,2 +23,9 @@ import { CramArgumentError, CramMalformedError } from '../../errors'

interface RefRegion {
id: number
start: number
end: number
seq: string | null
}
/**

@@ -195,5 +202,2 @@ * @private

const containerHeader = await this.container.getHeader()
if (!containerHeader) {
throw new Error('no container header detected')
}

@@ -203,5 +207,2 @@ const header = await this.file.readBlock(

)
if (header === undefined) {
throw new Error('block header undefined')
}
if (header.contentType === 'MAPPED_SLICE_HEADER') {

@@ -238,5 +239,2 @@ const content = parseItem(

const block = await this.file.readBlock(blockPosition)
if (block === undefined) {
throw new Error('block undefined')
}
blocks[i] = block

@@ -411,3 +409,3 @@ blockPosition = blocks[i]!._endPosition

dataSeriesName: T,
): DataTypeMapping[DataSeriesTypes[T]] => {
): DataTypeMapping[DataSeriesTypes[T]] | undefined => {
const codec = compressionScheme.getCodecForDataSeries(dataSeriesName)

@@ -419,10 +417,3 @@ if (!codec) {

}
// console.log(dataSeriesName, Object.getPrototypeOf(codec))
const decoded = codec.decode(
this,
coreDataBlock,
blocksByContentId,
cursors,
)
return decoded
return codec.decode(this, coreDataBlock, blocksByContentId, cursors)
}

@@ -466,12 +457,18 @@ const records: CramRecord[] = new Array(

// interpret `recordsToNextFragment` attributes to make standard `mate`
// objects Resolve mate pair cross-references between records in this slice
// objects
//
// Resolve mate pair cross-references between records in this slice
for (let i = 0; i < records.length; i += 1) {
const { mateRecordNumber } = records[i]!
if (mateRecordNumber !== undefined && mateRecordNumber >= 0) {
associateIntraSliceMate(
records,
i,
records[i]!,
records[mateRecordNumber]!,
)
const r = records[i]
// check for !!r added after removal of "stat" file size check: found
// some undefined entries
if (r) {
const { mateRecordNumber } = r
if (
mateRecordNumber !== undefined &&
mateRecordNumber >= 0 &&
records[mateRecordNumber]
) {
associateIntraSliceMate(records, i, r, records[mateRecordNumber])
}
}

@@ -511,6 +508,3 @@ }

}
const refRegions: Record<
string,
{ id: number; start: number; end: number; seq: string | null }
> = {}
const refRegions: Record<string, RefRegion> = {}

@@ -517,0 +511,0 @@ // iterate over the records to find the spans of the reference

@@ -1,5 +0,8 @@

import { fromBytesBE, toNumber } from 'longfn'
import md5 from 'md5'
import { CramBufferOverrunError } from './codecs/getBits'
export const TWO_PWR_16_DBL = 1 << 16
export const TWO_PWR_32_DBL = TWO_PWR_16_DBL * TWO_PWR_16_DBL
export const TWO_PWR_64_DBL = TWO_PWR_32_DBL * TWO_PWR_32_DBL
export const TWO_PWR_24_DBL = 1 << 24
export const TWO_PWR_56_DBL = TWO_PWR_24_DBL * TWO_PWR_32_DBL

@@ -26,22 +29,32 @@ export function itf8Size(v: number) {

let result: number
// Single byte value (0xxxxxxx)
if (countFlags < 0x80) {
result = countFlags
offset = offset + 1
} else if (countFlags < 0xc0) {
result = ((countFlags << 8) | buffer[offset + 1]!) & 0x3fff
offset = offset + 2
} else if (countFlags < 0xe0) {
offset += 1
}
// Two byte value (10xxxxxx)
else if (countFlags < 0xc0) {
result = ((countFlags & 0x3f) << 8) | buffer[offset + 1]!
offset += 2
}
// Three byte value (110xxxxx)
else if (countFlags < 0xe0) {
result =
((countFlags << 16) | (buffer[offset + 1]! << 8) | buffer[offset + 2]!) &
0x1fffff
offset = offset + 3
} else if (countFlags < 0xf0) {
((countFlags & 0x1f) << 16) |
(buffer[offset + 1]! << 8) |
buffer[offset + 2]!
offset += 3
}
// Four byte value (1110xxxx)
else if (countFlags < 0xf0) {
result =
((countFlags << 24) |
(buffer[offset + 1]! << 16) |
(buffer[offset + 2]! << 8) |
buffer[offset + 3]!) &
0x0fffffff
offset = offset + 4
} else {
((countFlags & 0x0f) << 24) |
(buffer[offset + 1]! << 16) |
(buffer[offset + 2]! << 8) |
buffer[offset + 3]!
offset += 4
}
// Five byte value (11110xxx)
else {
result =

@@ -53,11 +66,5 @@ ((countFlags & 0x0f) << 28) |

(buffer[offset + 4]! & 0x0f)
// x=((0xff & 0x0f)<<28) | (0xff<<20) | (0xff<<12) | (0xff<<4) | (0x0f & 0x0f);
// TODO *val_p = uv < 0x80000000UL ? uv : -((int32_t) (0xffffffffUL - uv)) - 1;
offset = offset + 5
offset += 5
}
if (offset > buffer.length) {
throw new CramBufferOverrunError(
'Attempted to read beyond end of buffer; this file seems truncated.',
)
}
return [result, offset - initialOffset] as const

@@ -67,65 +74,97 @@ }

export function parseLtf8(buffer: Uint8Array, initialOffset: number) {
const dataView = new DataView(buffer.buffer)
let offset = initialOffset
const countFlags = buffer[offset]!
let n: number
let value: number
// Single byte value < 0x80
if (countFlags < 0x80) {
n = countFlags
value = countFlags
offset += 1
} else if (countFlags < 0xc0) {
n = ((buffer[offset]! << 8) | buffer[offset + 1]!) & 0x3fff
}
// Two byte value < 0xC0
else if (countFlags < 0xc0) {
value = ((countFlags << 8) | buffer[offset + 1]!) & 0x3fff
offset += 2
} else if (countFlags < 0xe0) {
n =
((buffer[offset]! << 16) |
(buffer[offset + 1]! << 8) |
buffer[offset + 2]!) &
0x1fffff
n = ((countFlags & 63) << 16) | dataView.getUint16(offset + 1, true)
}
// Three byte value < 0xE0
else if (countFlags < 0xe0) {
value =
((countFlags & 0x3f) << 16) |
(buffer[offset + 1]! << 8) |
buffer[offset + 2]!
offset += 3
} else if (countFlags < 0xf0) {
n =
((buffer[offset]! << 24) |
(buffer[offset + 1]! << 16) |
(buffer[offset + 2]! << 8) |
buffer[offset + 3]!) &
0x0fffffff
}
// Four byte value < 0xF0
else if (countFlags < 0xf0) {
value =
((countFlags & 0x1f) << 24) |
(buffer[offset + 1]! << 16) |
(buffer[offset + 2]! << 8) |
buffer[offset + 3]!
offset += 4
} else if (countFlags < 0xf8) {
n =
((buffer[offset]! & 15) * 2 ** 32 + (buffer[offset + 1]! << 24)) |
((buffer[offset + 2]! << 16) |
}
// Five byte value < 0xF8
else if (countFlags < 0xf8) {
value =
(buffer[offset]! & 0x0f) * TWO_PWR_32_DBL +
((buffer[offset + 1]! << 24) |
(buffer[offset + 2]! << 16) |
(buffer[offset + 3]! << 8) |
buffer[offset + 4]!)
// TODO *val_p = uv < 0x80000000UL ? uv : -((int32_t) (0xffffffffUL - uv)) - 1;
offset += 5
} else if (countFlags < 0xfc) {
n =
((((buffer[offset]! & 7) << 8) | buffer[offset + 1]!) * 2 ** 32 +
(buffer[offset + 2]! << 24)) |
((buffer[offset + 3]! << 16) |
}
// Six byte value < 0xFC
else if (countFlags < 0xfc) {
value =
(((buffer[offset]! & 0x07) << 8) | buffer[offset + 1]!) * TWO_PWR_32_DBL +
((buffer[offset + 2]! << 24) |
(buffer[offset + 3]! << 16) |
(buffer[offset + 4]! << 8) |
buffer[offset + 5]!)
offset += 6
} else if (countFlags < 0xfe) {
n =
((((buffer[offset]! & 3) << 16) |
}
// Seven byte value < 0xFE
else if (countFlags < 0xfe) {
value =
(((buffer[offset]! & 0x03) << 16) |
(buffer[offset + 1]! << 8) |
buffer[offset + 2]!) *
2 ** 32 +
(buffer[offset + 3]! << 24)) |
((buffer[offset + 4]! << 16) |
TWO_PWR_32_DBL +
((buffer[offset + 3]! << 24) |
(buffer[offset + 4]! << 16) |
(buffer[offset + 5]! << 8) |
buffer[offset + 6]!)
offset += 7
} else if (countFlags < 0xff) {
n = toNumber(fromBytesBE(buffer.slice(offset + 1, offset + 8), false))
}
// Eight byte value < 0xFF
else if (countFlags < 0xff) {
value =
((buffer[offset + 1]! << 24) |
(buffer[offset + 2]! << 16) |
(buffer[offset + 3]! << 8) |
buffer[offset + 4]!) *
TWO_PWR_32_DBL +
((buffer[offset + 5]! << 24) |
(buffer[offset + 6]! << 16) |
(buffer[offset + 7]! << 8) |
buffer[offset + 8]!)
offset += 8
} else {
n = toNumber(fromBytesBE(buffer.subarray(offset + 1, offset + 9), false))
}
// Nine byte value
else {
value =
buffer[offset + 1]! * TWO_PWR_56_DBL +
((buffer[offset + 2]! << 24) |
(buffer[offset + 3]! << 16) |
(buffer[offset + 4]! << 8) |
buffer[offset + 5]!) *
TWO_PWR_32_DBL +
((buffer[offset + 6]! << 24) |
(buffer[offset + 7]! << 16) |
(buffer[offset + 8]! << 8) |
buffer[offset + 9]!)
offset += 9
}
return [n, offset - initialOffset] as const
return [value, offset - initialOffset] as const
}

@@ -146,7 +185,2 @@

}
// this would be nice as a decorator, but i'm a little worried about babel
// support for it going away or changing. memoizes a method in the stupidest
// possible way, with no regard for the arguments. actually, this only works
// on methods that take no arguments
export function tinyMemoize(_class: any, methodName: any) {

@@ -153,0 +187,0 @@ const method = _class.prototype[methodName]

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc