Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@gmod/cram

Package Overview
Dependencies
Maintainers
0
Versions
51
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@gmod/cram - npm Package Compare versions

Comparing version 3.0.4 to 3.0.5

8

dist/craiIndex.d.ts

@@ -9,5 +9,5 @@ import { CramFileSource } from './cramFile/file';

}
type ParsedIndex = Record<string, Slice[]>;
type ParsedIndex = Record<string, Slice[] | undefined>;
export default class CraiIndex {
private _parseCache;
private parseIndexP?;
private filehandle;

@@ -23,5 +23,3 @@ /**

parseIndex(): Promise<ParsedIndex>;
getIndex(opts?: {
signal?: AbortSignal;
}): Promise<ParsedIndex>;
getIndex(): Promise<ParsedIndex>;
/**

@@ -28,0 +26,0 @@ * @param {number} seqId

@@ -11,8 +11,3 @@ "use strict";

};
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
const abortable_promise_cache_1 = __importDefault(require("@gmod/abortable-promise-cache"));
const quick_lru_1 = __importDefault(require("quick-lru"));
const unzip_1 = require("./unzip");

@@ -24,13 +19,20 @@ const io_1 = require("./io");

const [seqId, start, span, containerStart, sliceStart, sliceBytes] = record;
if (!index[seqId]) {
index[seqId] = [];
const s = seqId;
if (!index[s]) {
index[s] = [];
}
index[seqId].push({
start,
span,
containerStart,
sliceStart,
sliceBytes,
index[s].push({
start: start,
span: span,
containerStart: containerStart,
sliceStart: sliceStart,
sliceBytes: sliceBytes,
});
}
function maybeUnzip(data) {
if (data[0] === 31 && data[1] === 139) {
return (0, unzip_1.unzip)(data);
}
return data;
}
class CraiIndex {

@@ -46,18 +48,7 @@ /**

this.filehandle = (0, io_1.open)(args.url, args.path, args.filehandle);
this._parseCache = new abortable_promise_cache_1.default({
cache: new quick_lru_1.default({ maxSize: 1 }),
fill: (_data, _signal) => this.parseIndex(),
});
}
parseIndex() {
const index = {};
return this.filehandle
.readFile()
.then(data => {
if (data[0] === 31 && data[1] === 139) {
return (0, unzip_1.unzip)(data);
}
return data;
})
.then(uncompressedBuffer => {
return __awaiter(this, void 0, void 0, function* () {
const index = {};
const uncompressedBuffer = maybeUnzip(yield this.filehandle.readFile());
if (uncompressedBuffer.length > 4 &&

@@ -103,3 +94,4 @@ uncompressedBuffer.readUInt32LE(0) === BAI_MAGIC) {

Object.entries(index).forEach(([seqId, ent]) => {
index[seqId] = ent.sort((a, b) => a.start - b.start || a.span - b.span);
const e2 = ent;
index[seqId] = e2.sort((a, b) => a.start - b.start || a.span - b.span);
});

@@ -109,4 +101,10 @@ return index;

}
getIndex(opts = {}) {
return this._parseCache.get('index', null, opts.signal);
getIndex() {
if (!this.parseIndexP) {
this.parseIndexP = this.parseIndex().catch((e) => {
this.parseIndexP = undefined;
throw e;
});
}
return this.parseIndexP;
}

@@ -113,0 +111,0 @@ /**

@@ -12,5 +12,2 @@ "use strict";

this.instantiateCodec = instantiateCodec;
if (dataType !== 'byteArray') {
throw new TypeError(`byteArrayLength does not support data type ${dataType}`);
}
}

@@ -39,5 +36,5 @@ decode(slice, coreDataBlock, blocksByContentId, cursors) {

exports.default = ByteArrayStopCodec;
'_getLengthCodec _getDataCodec'
.split(' ')
.forEach(method => (0, util_1.tinyMemoize)(ByteArrayStopCodec, method));
'_getLengthCodec _getDataCodec'.split(' ').forEach(method => {
(0, util_1.tinyMemoize)(ByteArrayStopCodec, method);
});
//# sourceMappingURL=byteArrayLength.js.map

@@ -6,5 +6,4 @@ import CramCodec, { Cursor, Cursors } from './_base';

export default class ByteArrayStopCodec extends CramCodec<'byteArray', ByteArrayStopCramEncoding['parameters']> {
constructor(parameters: ByteArrayStopCramEncoding['parameters'], dataType: 'byteArray');
decode(slice: CramSlice, coreDataBlock: CramFileBlock, blocksByContentId: Record<number, CramFileBlock>, cursors: Cursors): Buffer;
_decodeByteArray(contentBlock: CramFileBlock, cursor: Cursor): Buffer;
}

@@ -10,8 +10,2 @@ "use strict";

class ByteArrayStopCodec extends _base_1.default {
constructor(parameters, dataType) {
super(parameters, dataType);
if (dataType !== 'byteArray') {
throw new TypeError(`byteArrayStop codec does not support data type ${dataType}`);
}
}
decode(slice, coreDataBlock, blocksByContentId, cursors) {

@@ -18,0 +12,0 @@ const { blockContentId } = this.parameters;

@@ -6,3 +6,2 @@ "use strict";

Object.defineProperty(exports, "__esModule", { value: true });
/* eslint-disable @typescript-eslint/unbound-method */
const errors_1 = require("../../errors");

@@ -9,0 +8,0 @@ const _base_1 = __importDefault(require("./_base"));

@@ -6,3 +6,2 @@ "use strict";

Object.defineProperty(exports, "__esModule", { value: true });
/* eslint-disable @typescript-eslint/unbound-method */
const errors_1 = require("../../errors");

@@ -41,3 +40,3 @@ const _base_1 = __importDefault(require("./_base"));

let codes = new Array(this.parameters.numCodes);
for (let i = 0; i < this.parameters.numCodes; i += 1) {
for (let i = 0; i < this.parameters.numCodes; i++) {
codes[i] = {

@@ -44,0 +43,0 @@ symbol: this.parameters.symbols[i],

@@ -62,3 +62,3 @@ import CramCodec from '../codecs/_base';

*/
getTagNames(tagListId: number): string[];
getTagNames(tagListId: number): string[] | undefined;
getCodecForDataSeries<TDataSeries extends DataSeriesEncodingKey>(dataSeriesName: TDataSeries): CramCodec<DataSeriesTypes[TDataSeries]> | undefined;

@@ -65,0 +65,0 @@ toJSON(): any;

@@ -87,9 +87,15 @@ "use strict";

getCodecForTag(tagName) {
if (!this.tagCodecCache[tagName]) {
const test = this.tagCodecCache[tagName];
if (!test) {
const encodingData = this.tagEncoding[tagName];
if (encodingData) {
this.tagCodecCache[tagName] = (0, codecs_1.instantiateCodec)(encodingData, 'byteArray');
if (!encodingData) {
throw new Error('Error, no tag encoding');
}
const ret = (0, codecs_1.instantiateCodec)(encodingData, 'byteArray');
this.tagCodecCache[tagName] = ret;
return ret;
}
return this.tagCodecCache[tagName];
else {
return test;
}
}

@@ -108,4 +114,6 @@ /**

const encodingData = this.dataSeriesEncoding[dataSeriesName];
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
if (encodingData) {
const dataType = dataSeriesTypes[dataSeriesName];
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
if (!dataType) {

@@ -112,0 +120,0 @@ throw new errors_1.CramMalformedError(`data series name ${dataSeriesName} not defined in file compression header`);

@@ -114,3 +114,5 @@ "use strict";

.split(' ')
.forEach(method => (0, util_1.tinyMemoize)(CramContainer, method));
.forEach(method => {
(0, util_1.tinyMemoize)(CramContainer, method);
});
//# sourceMappingURL=index.js.map

@@ -78,3 +78,3 @@ import { Buffer } from 'buffer';

};
}, position: number, size?: number, preReadBuffer?: undefined): Promise<(T & {
}, position: number, size?: number, preReadBuffer?: Buffer): Promise<(T & {
_endPosition: number;

@@ -81,0 +81,0 @@ _size: number;

@@ -62,6 +62,5 @@ "use strict";

};
// cache of features in a slice, keyed by the
// slice offset. caches all of the features in a slice, or none.
// the cache is actually used by the slice object, it's just
// kept here at the level of the file
// cache of features in a slice, keyed by the slice offset. caches all of
// the features in a slice, or none. the cache is actually used by the
// slice object, it's just kept here at the level of the file
this.featureCache = new quick_lru_1.default({

@@ -231,3 +230,3 @@ maxSize: this.options.cacheSize,

_parseSection(section_1, position_1) {
return __awaiter(this, arguments, void 0, function* (section, position, size = section.maxLength, preReadBuffer = undefined) {
return __awaiter(this, arguments, void 0, function* (section, position, size = section.maxLength, preReadBuffer) {
let buffer;

@@ -279,4 +278,4 @@ if (preReadBuffer) {

(0, rans_1.default)(inputBuffer, outputBuffer);
//htscodecs r4x8 is slower, but compatible.
//htscodecs.r4x8_uncompress(inputBuffer, outputBuffer);
// htscodecs r4x8 is slower, but compatible.
// htscodecs.r4x8_uncompress(inputBuffer, outputBuffer);
}

@@ -344,5 +343,5 @@ else if (compressionMethod === 'rans4x16') {

exports.default = CramFile;
'getDefinition getSectionParsers getSamHeader'
.split(' ')
.forEach(method => (0, util_1.tinyMemoize)(CramFile, method));
'getDefinition getSectionParsers getSamHeader'.split(' ').forEach(method => {
(0, util_1.tinyMemoize)(CramFile, method);
});
//# sourceMappingURL=file.js.map

@@ -84,6 +84,3 @@ "use strict";

// put down a chunk of sequence up to the next read feature
const chunk = refRegion.seq.slice(regionPos, regionPos +
cramRecord.readFeatures[currentReadFeature].pos -
bases.length -
1);
const chunk = refRegion.seq.slice(regionPos, regionPos + feature.pos - bases.length - 1);
bases += chunk;

@@ -115,5 +112,2 @@ regionPos += chunk.length;

function decodeBaseSubstitution(cramRecord, refRegion, compressionScheme, readFeature) {
if (!refRegion) {
return;
}
// decode base substitution code using the substitution matrix

@@ -120,0 +114,0 @@ const refCoord = readFeature.refPos - refRegion.start;

"use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.default = decodeRecord;
const long_1 = __importDefault(require("long"));
const errors_1 = require("../../errors");

@@ -86,3 +82,3 @@ const record_1 = require("../record");

if (tagType === 'I') {
return long_1.default.fromBytesLE(buffer).toNumber();
return new Uint32Array(buffer.buffer)[0];
}

@@ -260,8 +256,7 @@ if (tagType === 'i') {

const tagType = tagId.slice(2, 3);
const tagCodec = compressionScheme.getCodecForTag(tagId);
if (!tagCodec) {
throw new errors_1.CramMalformedError(`no codec defined for auxiliary tag ${tagId}`);
}
const tagData = tagCodec.decode(slice, coreDataBlock, blocksByContentId, cursors);
tags[tagName] = parseTagData(tagType, tagData);
const tagData = compressionScheme
.getCodecForTag(tagId)
.decode(slice, coreDataBlock, blocksByContentId, cursors);
tags[tagName] =
typeof tagData === 'number' ? tagData : parseTagData(tagType, tagData);
}

@@ -268,0 +263,0 @@ let readFeatures;

@@ -11,4 +11,5 @@ import CramRecord from '../record';

containerPosition: number;
sliceSize: number;
private file;
constructor(container: CramContainer, containerPosition: number, _unused: number);
constructor(container: CramContainer, containerPosition: number, sliceSize: number);
getHeader(): Promise<{

@@ -40,3 +41,3 @@ parsedContent: {

_getBlocksContentIdIndex(): Promise<Record<number, CramFileBlock>>;
getBlockByContentId(id: number): Promise<CramFileBlock>;
getBlockByContentId(id: number): Promise<CramFileBlock | undefined>;
getReferenceRegion(): Promise<{

@@ -43,0 +44,0 @@ seq: any;

@@ -72,10 +72,8 @@ "use strict";

/**
* @private establishes a mate-pair relationship between two records in the same slice.
* CRAM compresses mate-pair relationships between records in the same slice down into
* just one record having the index in the slice of its mate
* @private establishes a mate-pair relationship between two records in the
* same slice. CRAM compresses mate-pair relationships between records in the
* same slice down into just one record having the index in the slice of its
* mate
*/
function associateIntraSliceMate(allRecords, currentRecordNumber, thisRecord, mateRecord) {
if (!mateRecord) {
throw new errors_1.CramMalformedError('could not resolve intra-slice mate pairs, file seems truncated or malformed');
}
const complicatedMultiSegment = !!(mateRecord.mate ||

@@ -142,5 +140,6 @@ (mateRecord.mateRecordNumber !== undefined &&

class CramSlice {
constructor(container, containerPosition, _unused) {
constructor(container, containerPosition, sliceSize) {
this.container = container;
this.containerPosition = containerPosition;
this.sliceSize = sliceSize;
this.file = container.file;

@@ -197,3 +196,2 @@ }

const blocks = yield this.getBlocks();
// the core data block is always the first block in the slice
return blocks[0];

@@ -283,5 +281,2 @@ });

const sliceHeader = yield this.getHeader();
if (sliceHeader === undefined) {
throw new Error('slice header undefined');
}
const blocksByContentId = yield this._getBlocksContentIdIndex();

@@ -335,3 +330,3 @@ // check MD5 of reference if available

};
let records = new Array(sliceHeader.parsedContent.numRecords);
const records = new Array(sliceHeader.parsedContent.numRecords);
for (let i = 0; i < records.length; i += 1) {

@@ -348,3 +343,2 @@ try {

console.warn('read attempted beyond end of buffer, file seems truncated.');
records = records.filter(r => !!r);
break;

@@ -443,5 +437,5 @@ }

// memoize several methods in the class for performance
'getHeader getBlocks _getBlocksContentIdIndex'
.split(' ')
.forEach(method => (0, util_1.tinyMemoize)(CramSlice, method));
'getHeader getBlocks _getBlocksContentIdIndex'.split(' ').forEach(method => {
(0, util_1.tinyMemoize)(CramSlice, method);
});
//# sourceMappingURL=index.js.map

@@ -43,5 +43,2 @@ "use strict";

this.index = args.index;
if (!this.index.getEntriesForRange) {
throw new Error('invalid arguments: not an index');
}
}

@@ -48,0 +45,0 @@ /**

@@ -7,3 +7,3 @@ "use strict";

exports.default = uncompress;
//@ts-nocheck
// @ts-nocheck
const errors_1 = require("../errors");

@@ -10,0 +10,0 @@ const constants_1 = require("./constants");

@@ -7,3 +7,3 @@ "use strict";

exports.default = uncompress;
//@ts-nocheck
// @ts-nocheck
const constants_1 = require("./constants");

@@ -10,0 +10,0 @@ const decoding_1 = __importDefault(require("./decoding"));

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
//@ts-nocheck
// @ts-nocheck
const errors_1 = require("../errors");

@@ -5,0 +5,0 @@ const constants_1 = require("./constants");

@@ -8,3 +8,3 @@ "use strict";

exports.readStatsO1 = readStatsO1;
//@ts-nocheck
// @ts-nocheck
const errors_1 = require("../errors");

@@ -11,0 +11,0 @@ const constants_1 = require("./constants");

@@ -7,3 +7,3 @@ "use strict";

exports.default = uncompress;
//@ts-nocheck
// @ts-nocheck
const buffer_1 = require("buffer");

@@ -10,0 +10,0 @@ const errors_1 = require("../errors");

@@ -9,5 +9,5 @@ import { CramFileSource } from './cramFile/file';

}
type ParsedIndex = Record<string, Slice[]>;
type ParsedIndex = Record<string, Slice[] | undefined>;
export default class CraiIndex {
private _parseCache;
private parseIndexP?;
private filehandle;

@@ -23,5 +23,3 @@ /**

parseIndex(): Promise<ParsedIndex>;
getIndex(opts?: {
signal?: AbortSignal;
}): Promise<ParsedIndex>;
getIndex(): Promise<ParsedIndex>;
/**

@@ -28,0 +26,0 @@ * @param {number} seqId

@@ -1,3 +0,1 @@

import AbortablePromiseCache from '@gmod/abortable-promise-cache';
import QuickLRU from 'quick-lru';
import { unzip } from './unzip';

@@ -9,13 +7,20 @@ import { open } from './io';

const [seqId, start, span, containerStart, sliceStart, sliceBytes] = record;
if (!index[seqId]) {
index[seqId] = [];
const s = seqId;
if (!index[s]) {
index[s] = [];
}
index[seqId].push({
start,
span,
containerStart,
sliceStart,
sliceBytes,
index[s].push({
start: start,
span: span,
containerStart: containerStart,
sliceStart: sliceStart,
sliceBytes: sliceBytes,
});
}
function maybeUnzip(data) {
if (data[0] === 31 && data[1] === 139) {
return unzip(data);
}
return data;
}
export default class CraiIndex {

@@ -31,65 +36,59 @@ /**

this.filehandle = open(args.url, args.path, args.filehandle);
this._parseCache = new AbortablePromiseCache({
cache: new QuickLRU({ maxSize: 1 }),
fill: (_data, _signal) => this.parseIndex(),
});
}
parseIndex() {
async parseIndex() {
const index = {};
return this.filehandle
.readFile()
.then(data => {
if (data[0] === 31 && data[1] === 139) {
return unzip(data);
const uncompressedBuffer = maybeUnzip(await this.filehandle.readFile());
if (uncompressedBuffer.length > 4 &&
uncompressedBuffer.readUInt32LE(0) === BAI_MAGIC) {
throw new CramMalformedError('invalid .crai index file. note: file appears to be a .bai index. this is technically legal but please open a github issue if you need support');
}
// interpret the text as regular ascii, since it is
// supposed to be only digits and whitespace characters
// this is written in a deliberately low-level fashion for performance,
// because some .crai files can be pretty large.
let currentRecord = [];
let currentString = '';
for (const charCode of uncompressedBuffer) {
if ((charCode >= 48 && charCode <= 57) /* 0-9 */ ||
(!currentString && charCode === 45) /* leading - */) {
currentString += String.fromCharCode(charCode);
}
return data;
})
.then(uncompressedBuffer => {
if (uncompressedBuffer.length > 4 &&
uncompressedBuffer.readUInt32LE(0) === BAI_MAGIC) {
throw new CramMalformedError('invalid .crai index file. note: file appears to be a .bai index. this is technically legal but please open a github issue if you need support');
else if (charCode === 9 /* \t */) {
currentRecord.push(Number.parseInt(currentString, 10));
currentString = '';
}
// interpret the text as regular ascii, since it is
// supposed to be only digits and whitespace characters
// this is written in a deliberately low-level fashion for performance,
// because some .crai files can be pretty large.
let currentRecord = [];
let currentString = '';
for (const charCode of uncompressedBuffer) {
if ((charCode >= 48 && charCode <= 57) /* 0-9 */ ||
(!currentString && charCode === 45) /* leading - */) {
currentString += String.fromCharCode(charCode);
}
else if (charCode === 9 /* \t */) {
currentRecord.push(Number.parseInt(currentString, 10));
currentString = '';
}
else if (charCode === 10 /* \n */) {
currentRecord.push(Number.parseInt(currentString, 10));
currentString = '';
addRecordToIndex(index, currentRecord);
currentRecord = [];
}
else if (charCode !== 13 /* \r */ && charCode !== 32 /* space */) {
// if there are other characters in the file besides
// space and \r, something is wrong.
throw new CramMalformedError('invalid .crai index file');
}
}
// if the file ends without a \n, we need to flush our buffers
if (currentString) {
else if (charCode === 10 /* \n */) {
currentRecord.push(Number.parseInt(currentString, 10));
}
if (currentRecord.length === 6) {
currentString = '';
addRecordToIndex(index, currentRecord);
currentRecord = [];
}
// sort each of them by start
Object.entries(index).forEach(([seqId, ent]) => {
index[seqId] = ent.sort((a, b) => a.start - b.start || a.span - b.span);
});
return index;
else if (charCode !== 13 /* \r */ && charCode !== 32 /* space */) {
// if there are other characters in the file besides
// space and \r, something is wrong.
throw new CramMalformedError('invalid .crai index file');
}
}
// if the file ends without a \n, we need to flush our buffers
if (currentString) {
currentRecord.push(Number.parseInt(currentString, 10));
}
if (currentRecord.length === 6) {
addRecordToIndex(index, currentRecord);
}
// sort each of them by start
Object.entries(index).forEach(([seqId, ent]) => {
const e2 = ent;
index[seqId] = e2.sort((a, b) => a.start - b.start || a.span - b.span);
});
return index;
}
getIndex(opts = {}) {
return this._parseCache.get('index', null, opts.signal);
getIndex() {
if (!this.parseIndexP) {
this.parseIndexP = this.parseIndex().catch((e) => {
this.parseIndexP = undefined;
throw e;
});
}
return this.parseIndexP;
}

@@ -96,0 +95,0 @@ /**

@@ -7,5 +7,2 @@ import CramCodec from './_base';

this.instantiateCodec = instantiateCodec;
if (dataType !== 'byteArray') {
throw new TypeError(`byteArrayLength does not support data type ${dataType}`);
}
}

@@ -33,5 +30,5 @@ decode(slice, coreDataBlock, blocksByContentId, cursors) {

}
'_getLengthCodec _getDataCodec'
.split(' ')
.forEach(method => tinyMemoize(ByteArrayStopCodec, method));
'_getLengthCodec _getDataCodec'.split(' ').forEach(method => {
tinyMemoize(ByteArrayStopCodec, method);
});
//# sourceMappingURL=byteArrayLength.js.map

@@ -6,5 +6,4 @@ import CramCodec, { Cursor, Cursors } from './_base';

export default class ByteArrayStopCodec extends CramCodec<'byteArray', ByteArrayStopCramEncoding['parameters']> {
constructor(parameters: ByteArrayStopCramEncoding['parameters'], dataType: 'byteArray');
decode(slice: CramSlice, coreDataBlock: CramFileBlock, blocksByContentId: Record<number, CramFileBlock>, cursors: Cursors): Buffer;
_decodeByteArray(contentBlock: CramFileBlock, cursor: Cursor): Buffer;
}

@@ -5,8 +5,2 @@ import { CramMalformedError } from '../../errors';

export default class ByteArrayStopCodec extends CramCodec {
constructor(parameters, dataType) {
super(parameters, dataType);
if (dataType !== 'byteArray') {
throw new TypeError(`byteArrayStop codec does not support data type ${dataType}`);
}
}
decode(slice, coreDataBlock, blocksByContentId, cursors) {

@@ -13,0 +7,0 @@ const { blockContentId } = this.parameters;

@@ -1,2 +0,1 @@

/* eslint-disable @typescript-eslint/unbound-method */
import { CramMalformedError, CramUnimplementedError } from '../../errors';

@@ -3,0 +2,0 @@ import CramCodec from './_base';

@@ -1,2 +0,1 @@

/* eslint-disable @typescript-eslint/unbound-method */
import { CramMalformedError } from '../../errors';

@@ -35,3 +34,3 @@ import CramCodec from './_base';

let codes = new Array(this.parameters.numCodes);
for (let i = 0; i < this.parameters.numCodes; i += 1) {
for (let i = 0; i < this.parameters.numCodes; i++) {
codes[i] = {

@@ -38,0 +37,0 @@ symbol: this.parameters.symbols[i],

@@ -62,3 +62,3 @@ import CramCodec from '../codecs/_base';

*/
getTagNames(tagListId: number): string[];
getTagNames(tagListId: number): string[] | undefined;
getCodecForDataSeries<TDataSeries extends DataSeriesEncodingKey>(dataSeriesName: TDataSeries): CramCodec<DataSeriesTypes[TDataSeries]> | undefined;

@@ -65,0 +65,0 @@ toJSON(): any;

@@ -85,9 +85,15 @@ import { instantiateCodec } from '../codecs';

getCodecForTag(tagName) {
if (!this.tagCodecCache[tagName]) {
const test = this.tagCodecCache[tagName];
if (!test) {
const encodingData = this.tagEncoding[tagName];
if (encodingData) {
this.tagCodecCache[tagName] = instantiateCodec(encodingData, 'byteArray');
if (!encodingData) {
throw new Error('Error, no tag encoding');
}
const ret = instantiateCodec(encodingData, 'byteArray');
this.tagCodecCache[tagName] = ret;
return ret;
}
return this.tagCodecCache[tagName];
else {
return test;
}
}

@@ -106,4 +112,6 @@ /**

const encodingData = this.dataSeriesEncoding[dataSeriesName];
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
if (encodingData) {
const dataType = dataSeriesTypes[dataSeriesName];
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
if (!dataType) {

@@ -110,0 +118,0 @@ throw new CramMalformedError(`data series name ${dataSeriesName} not defined in file compression header`);

@@ -94,3 +94,5 @@ import { Buffer } from 'buffer';

.split(' ')
.forEach(method => tinyMemoize(CramContainer, method));
.forEach(method => {
tinyMemoize(CramContainer, method);
});
//# sourceMappingURL=index.js.map

@@ -78,3 +78,3 @@ import { Buffer } from 'buffer';

};
}, position: number, size?: number, preReadBuffer?: undefined): Promise<(T & {
}, position: number, size?: number, preReadBuffer?: Buffer): Promise<(T & {
_endPosition: number;

@@ -81,0 +81,0 @@ _size: number;

@@ -48,6 +48,5 @@ import { Buffer } from 'buffer';

};
// cache of features in a slice, keyed by the
// slice offset. caches all of the features in a slice, or none.
// the cache is actually used by the slice object, it's just
// kept here at the level of the file
// cache of features in a slice, keyed by the slice offset. caches all of
// the features in a slice, or none. the cache is actually used by the
// slice object, it's just kept here at the level of the file
this.featureCache = new QuickLRU({

@@ -202,3 +201,3 @@ maxSize: this.options.cacheSize,

}
async _parseSection(section, position, size = section.maxLength, preReadBuffer = undefined) {
async _parseSection(section, position, size = section.maxLength, preReadBuffer) {
let buffer;

@@ -248,4 +247,4 @@ if (preReadBuffer) {

ransuncompress(inputBuffer, outputBuffer);
//htscodecs r4x8 is slower, but compatible.
//htscodecs.r4x8_uncompress(inputBuffer, outputBuffer);
// htscodecs r4x8 is slower, but compatible.
// htscodecs.r4x8_uncompress(inputBuffer, outputBuffer);
}

@@ -314,5 +313,5 @@ else if (compressionMethod === 'rans4x16') {

}
'getDefinition getSectionParsers getSamHeader'
.split(' ')
.forEach(method => tinyMemoize(CramFile, method));
'getDefinition getSectionParsers getSamHeader'.split(' ').forEach(method => {
tinyMemoize(CramFile, method);
});
//# sourceMappingURL=file.js.map

@@ -78,6 +78,3 @@ import Constants from './constants';

// put down a chunk of sequence up to the next read feature
const chunk = refRegion.seq.slice(regionPos, regionPos +
cramRecord.readFeatures[currentReadFeature].pos -
bases.length -
1);
const chunk = refRegion.seq.slice(regionPos, regionPos + feature.pos - bases.length - 1);
bases += chunk;

@@ -109,5 +106,2 @@ regionPos += chunk.length;

function decodeBaseSubstitution(cramRecord, refRegion, compressionScheme, readFeature) {
if (!refRegion) {
return;
}
// decode base substitution code using the substitution matrix

@@ -114,0 +108,0 @@ const refCoord = readFeature.refPos - refRegion.start;

@@ -1,2 +0,1 @@

import Long from 'long';
import { CramMalformedError } from '../../errors';

@@ -80,3 +79,3 @@ import { BamFlagsDecoder, CramFlagsDecoder, MateFlagsDecoder, } from '../record';

if (tagType === 'I') {
return Long.fromBytesLE(buffer).toNumber();
return new Uint32Array(buffer.buffer)[0];
}

@@ -254,8 +253,7 @@ if (tagType === 'i') {

const tagType = tagId.slice(2, 3);
const tagCodec = compressionScheme.getCodecForTag(tagId);
if (!tagCodec) {
throw new CramMalformedError(`no codec defined for auxiliary tag ${tagId}`);
}
const tagData = tagCodec.decode(slice, coreDataBlock, blocksByContentId, cursors);
tags[tagName] = parseTagData(tagType, tagData);
const tagData = compressionScheme
.getCodecForTag(tagId)
.decode(slice, coreDataBlock, blocksByContentId, cursors);
tags[tagName] =
typeof tagData === 'number' ? tagData : parseTagData(tagType, tagData);
}

@@ -262,0 +260,0 @@ let readFeatures;

@@ -11,4 +11,5 @@ import CramRecord from '../record';

containerPosition: number;
sliceSize: number;
private file;
constructor(container: CramContainer, containerPosition: number, _unused: number);
constructor(container: CramContainer, containerPosition: number, sliceSize: number);
getHeader(): Promise<{

@@ -40,3 +41,3 @@ parsedContent: {

_getBlocksContentIdIndex(): Promise<Record<number, CramFileBlock>>;
getBlockByContentId(id: number): Promise<CramFileBlock>;
getBlockByContentId(id: number): Promise<CramFileBlock | undefined>;
getReferenceRegion(): Promise<{

@@ -43,0 +44,0 @@ seq: any;

@@ -58,10 +58,8 @@ import { CramArgumentError, CramMalformedError } from '../../errors';

/**
* @private establishes a mate-pair relationship between two records in the same slice.
* CRAM compresses mate-pair relationships between records in the same slice down into
* just one record having the index in the slice of its mate
* @private establishes a mate-pair relationship between two records in the
* same slice. CRAM compresses mate-pair relationships between records in the
* same slice down into just one record having the index in the slice of its
* mate
*/
function associateIntraSliceMate(allRecords, currentRecordNumber, thisRecord, mateRecord) {
if (!mateRecord) {
throw new CramMalformedError('could not resolve intra-slice mate pairs, file seems truncated or malformed');
}
const complicatedMultiSegment = !!(mateRecord.mate ||

@@ -128,5 +126,6 @@ (mateRecord.mateRecordNumber !== undefined &&

export default class CramSlice {
constructor(container, containerPosition, _unused) {
constructor(container, containerPosition, sliceSize) {
this.container = container;
this.containerPosition = containerPosition;
this.sliceSize = sliceSize;
this.file = container.file;

@@ -178,3 +177,2 @@ }

const blocks = await this.getBlocks();
// the core data block is always the first block in the slice
return blocks[0];

@@ -256,5 +254,2 @@ }

const sliceHeader = await this.getHeader();
if (sliceHeader === undefined) {
throw new Error('slice header undefined');
}
const blocksByContentId = await this._getBlocksContentIdIndex();

@@ -308,3 +303,3 @@ // check MD5 of reference if available

};
let records = new Array(sliceHeader.parsedContent.numRecords);
const records = new Array(sliceHeader.parsedContent.numRecords);
for (let i = 0; i < records.length; i += 1) {

@@ -324,3 +319,2 @@ try {

console.warn('read attempted beyond end of buffer, file seems truncated.');
records = records.filter(r => !!r);
break;

@@ -415,5 +409,5 @@ }

// memoize several methods in the class for performance
'getHeader getBlocks _getBlocksContentIdIndex'
.split(' ')
.forEach(method => tinyMemoize(CramSlice, method));
'getHeader getBlocks _getBlocksContentIdIndex'.split(' ').forEach(method => {
tinyMemoize(CramSlice, method);
});
//# sourceMappingURL=index.js.map

@@ -29,5 +29,2 @@ import { CramUnimplementedError } from './errors';

this.index = args.index;
if (!this.index.getEntriesForRange) {
throw new Error('invalid arguments: not an index');
}
}

@@ -34,0 +31,0 @@ /**

@@ -1,2 +0,2 @@

//@ts-nocheck
// @ts-nocheck
import { CramMalformedError } from '../errors';

@@ -3,0 +3,0 @@ import { TF_SHIFT } from './constants';

@@ -1,2 +0,2 @@

//@ts-nocheck
// @ts-nocheck
import { TF_SHIFT } from './constants';

@@ -3,0 +3,0 @@ import Decoding from './decoding';

@@ -1,2 +0,2 @@

//@ts-nocheck
// @ts-nocheck
import { CramMalformedError } from '../errors';

@@ -3,0 +3,0 @@ import { RANS_BYTE_L } from './constants';

@@ -1,2 +0,2 @@

//@ts-nocheck
// @ts-nocheck
import { CramMalformedError } from '../errors';

@@ -3,0 +3,0 @@ import { TOTFREQ } from './constants';

@@ -1,2 +0,2 @@

//@ts-nocheck
// @ts-nocheck
import { Buffer } from 'buffer';

@@ -3,0 +3,0 @@ import { CramMalformedError } from '../errors';

{
"name": "@gmod/cram",
"version": "3.0.4",
"version": "3.0.5",
"description": "read CRAM files with pure Javascript",

@@ -24,4 +24,5 @@ "license": "MIT",

"scripts": {
"test": "jest",
"lint": "eslint --report-unused-disable-directives --max-warnings 0 src test",
"test": "vitest",
"lint": "eslint --report-unused-disable-directives --max-warnings 0",
"format": "prettier --write .",
"docs": "documentation readme --shallow src/indexedCramFile.ts --section=IndexedCramFile; documentation readme --shallow src/cramFile/file.ts --section=CramFile; documentation readme --shallow src/craiIndex.ts --section=CraiIndex; documentation readme --shallow errors.ts '--section=Exception Classes'; documentation readme --shallow src/cramFile/file.ts --section=CramFile; documentation readme --shallow src/cramFile/record.ts --section=CramRecord",

@@ -34,3 +35,3 @@ "prebuild": "npm run clean",

"postbuild": "webpack",
"prepack": "npm test && npm run build && cp dist/errors.js errors.js",
"prepack": "npm test run && npm run build && cp dist/errors.js errors.js",
"postpublish": "rm errors.js",

@@ -46,3 +47,2 @@ "postversion": "git push --follow-tags"

"dependencies": {
"@gmod/abortable-promise-cache": "^2.0.0",
"@jkbonfield/htscodecs": "^0.5.1",

@@ -59,3 +59,2 @@ "bzip2": "^0.1.1",

"@gmod/indexedfasta": "^2.1.0",
"@types/jest": "^29.5.12",
"@types/long": "^4.0.0",

@@ -66,14 +65,15 @@ "@types/md5": "^2.3.2",

"@typescript-eslint/parser": "^8.0.0",
"@vitest/coverage-v8": "^2.0.5",
"buffer": "^6.0.3",
"documentation": "^14.0.3",
"eslint": "^9.8.0",
"eslint": "^9.9.0",
"eslint-config-prettier": "^9.0.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^55.0.0",
"jest": "^29.3.1",
"mock-fs": "^5.2.0",
"prettier": "^3.2.5",
"rimraf": "^6.0.1",
"ts-jest": "^29.1.2",
"typescript": "^5.0.3",
"typescript-eslint": "^8.0.1",
"vitest": "^2.0.5",
"webpack": "^5.90.3",

@@ -80,0 +80,0 @@ "webpack-cli": "^5.0.1"

@@ -1,3 +0,1 @@

import AbortablePromiseCache from '@gmod/abortable-promise-cache'
import QuickLRU from 'quick-lru'
import { unzip } from './unzip'

@@ -9,3 +7,3 @@ import { open } from './io'

const BAI_MAGIC = 21578050 // BAI\1
const BAI_MAGIC = 21_578_050 // BAI\1

@@ -20,3 +18,3 @@ export interface Slice {

type ParsedIndex = Record<string, Slice[]>
type ParsedIndex = Record<string, Slice[] | undefined>

@@ -26,17 +24,27 @@ function addRecordToIndex(index: ParsedIndex, record: number[]) {

if (!index[seqId]) {
index[seqId] = []
const s = seqId!
if (!index[s]) {
index[s] = []
}
index[seqId].push({
start,
span,
containerStart,
sliceStart,
sliceBytes,
index[s].push({
start: start!,
span: span!,
containerStart: containerStart!,
sliceStart: sliceStart!,
sliceBytes: sliceBytes!,
})
}
function maybeUnzip(data: Buffer) {
if (data[0] === 31 && data[1] === 139) {
return unzip(data)
}
return data
}
export default class CraiIndex {
// A CRAM index (.crai) is a gzipped tab delimited file containing the following columns:
// A CRAM index (.crai) is a gzipped tab delimited file containing the
// following columns:
//
// 1. Sequence id

@@ -49,3 +57,4 @@ // 2. Alignment start

// Each line represents a slice in the CRAM file. Please note that all slices must be listed in index file.
private _parseCache: AbortablePromiseCache<unknown, ParsedIndex>
private parseIndexP?: Promise<ParsedIndex>
private filehandle: Filehandle

@@ -62,74 +71,66 @@

this.filehandle = open(args.url, args.path, args.filehandle)
this._parseCache = new AbortablePromiseCache<unknown, ParsedIndex>({
cache: new QuickLRU({ maxSize: 1 }),
fill: (_data, _signal) => this.parseIndex(),
})
}
parseIndex() {
async parseIndex() {
const index: ParsedIndex = {}
return this.filehandle
.readFile()
.then(data => {
if (data[0] === 31 && data[1] === 139) {
return unzip(data)
}
return data
})
.then(uncompressedBuffer => {
if (
uncompressedBuffer.length > 4 &&
uncompressedBuffer.readUInt32LE(0) === BAI_MAGIC
) {
throw new CramMalformedError(
'invalid .crai index file. note: file appears to be a .bai index. this is technically legal but please open a github issue if you need support',
)
}
// interpret the text as regular ascii, since it is
// supposed to be only digits and whitespace characters
// this is written in a deliberately low-level fashion for performance,
// because some .crai files can be pretty large.
let currentRecord: number[] = []
let currentString = ''
for (const charCode of uncompressedBuffer) {
if (
(charCode >= 48 && charCode <= 57) /* 0-9 */ ||
(!currentString && charCode === 45) /* leading - */
) {
currentString += String.fromCharCode(charCode)
} else if (charCode === 9 /* \t */) {
currentRecord.push(Number.parseInt(currentString, 10))
currentString = ''
} else if (charCode === 10 /* \n */) {
currentRecord.push(Number.parseInt(currentString, 10))
currentString = ''
addRecordToIndex(index, currentRecord)
currentRecord = []
} else if (charCode !== 13 /* \r */ && charCode !== 32 /* space */) {
// if there are other characters in the file besides
// space and \r, something is wrong.
throw new CramMalformedError('invalid .crai index file')
}
}
const uncompressedBuffer = maybeUnzip(await this.filehandle.readFile())
if (
uncompressedBuffer.length > 4 &&
uncompressedBuffer.readUInt32LE(0) === BAI_MAGIC
) {
throw new CramMalformedError(
'invalid .crai index file. note: file appears to be a .bai index. this is technically legal but please open a github issue if you need support',
)
}
// interpret the text as regular ascii, since it is
// supposed to be only digits and whitespace characters
// this is written in a deliberately low-level fashion for performance,
// because some .crai files can be pretty large.
let currentRecord: number[] = []
let currentString = ''
for (const charCode of uncompressedBuffer) {
if (
(charCode >= 48 && charCode <= 57) /* 0-9 */ ||
(!currentString && charCode === 45) /* leading - */
) {
currentString += String.fromCharCode(charCode)
} else if (charCode === 9 /* \t */) {
currentRecord.push(Number.parseInt(currentString, 10))
currentString = ''
} else if (charCode === 10 /* \n */) {
currentRecord.push(Number.parseInt(currentString, 10))
currentString = ''
addRecordToIndex(index, currentRecord)
currentRecord = []
} else if (charCode !== 13 /* \r */ && charCode !== 32 /* space */) {
// if there are other characters in the file besides
// space and \r, something is wrong.
throw new CramMalformedError('invalid .crai index file')
}
}
// if the file ends without a \n, we need to flush our buffers
if (currentString) {
currentRecord.push(Number.parseInt(currentString, 10))
}
if (currentRecord.length === 6) {
addRecordToIndex(index, currentRecord)
}
// if the file ends without a \n, we need to flush our buffers
if (currentString) {
currentRecord.push(Number.parseInt(currentString, 10))
}
if (currentRecord.length === 6) {
addRecordToIndex(index, currentRecord)
}
// sort each of them by start
Object.entries(index).forEach(([seqId, ent]) => {
index[seqId] = ent.sort(
(a, b) => a.start - b.start || a.span - b.span,
)
})
return index
})
// sort each of them by start
Object.entries(index).forEach(([seqId, ent]) => {
const e2 = ent!
index[seqId] = e2.sort((a, b) => a.start - b.start || a.span - b.span)
})
return index
}
getIndex(opts: { signal?: AbortSignal } = {}) {
return this._parseCache.get('index', null, opts.signal)
getIndex() {
if (!this.parseIndexP) {
this.parseIndexP = this.parseIndex().catch((e: unknown) => {
this.parseIndexP = undefined
throw e
})
}
return this.parseIndexP
}

@@ -136,0 +137,0 @@

@@ -26,7 +26,2 @@ import CramCodec, { Cursors } from './_base'

this.instantiateCodec = instantiateCodec
if (dataType !== 'byteArray') {
throw new TypeError(
`byteArrayLength does not support data type ${dataType}`,
)
}
}

@@ -75,4 +70,4 @@

'_getLengthCodec _getDataCodec'
.split(' ')
.forEach(method => tinyMemoize(ByteArrayStopCodec, method))
'_getLengthCodec _getDataCodec'.split(' ').forEach(method => {
tinyMemoize(ByteArrayStopCodec, method)
})

@@ -13,14 +13,2 @@ import { CramMalformedError } from '../../errors'

> {
constructor(
parameters: ByteArrayStopCramEncoding['parameters'],
dataType: 'byteArray',
) {
super(parameters, dataType)
if (dataType !== 'byteArray') {
throw new TypeError(
`byteArrayStop codec does not support data type ${dataType}`,
)
}
}
decode(

@@ -27,0 +15,0 @@ slice: CramSlice,

@@ -1,2 +0,1 @@

/* eslint-disable @typescript-eslint/unbound-method */
import { CramMalformedError, CramUnimplementedError } from '../../errors'

@@ -67,4 +66,4 @@ import CramCodec, { Cursor, Cursors } from './_base'

}
return contentBlock.content[cursor.bytePosition++]
return contentBlock.content[cursor.bytePosition++]!
}
}

@@ -20,3 +20,3 @@ export class CramBufferOverrunError extends Error {}

val <<= 1
val |= (data[cursor.bytePosition] >> cursor.bitPosition) & 1
val |= (data[cursor.bytePosition]! >> cursor.bitPosition) & 1
cursor.bitPosition -= 1

@@ -23,0 +23,0 @@ if (cursor.bitPosition < 0) {

@@ -1,2 +0,1 @@

/* eslint-disable @typescript-eslint/unbound-method */
import { CramMalformedError } from '../../errors'

@@ -51,3 +50,3 @@ import CramCodec, { Cursor, Cursors } from './_base'

// decoding
if (this.sortedCodes[0].bitLength === 0) {
if (this.sortedCodes[0]!.bitLength === 0) {
this._decode = this._decodeZeroLengthCode

@@ -62,6 +61,6 @@ }

)
for (let i = 0; i < this.parameters.numCodes; i += 1) {
for (let i = 0; i < this.parameters.numCodes; i++) {
codes[i] = {
symbol: this.parameters.symbols[i],
bitLength: this.parameters.bitLengths[i],
symbol: this.parameters.symbols[i]!,
bitLength: this.parameters.bitLengths[i]!,
}

@@ -79,3 +78,3 @@ }

}
this.codeBook[code.bitLength].push(code.symbol)
this.codeBook[code.bitLength]!.push(code.symbol)
})

@@ -123,3 +122,3 @@ }

for (let i = 0; i < this.sortedBitCodes.length; i += 1) {
this.bitCodeToValue[this.sortedCodes[i].bitCode] = i
this.bitCodeToValue[this.sortedCodes[i]!.bitCode] = i
}

@@ -143,3 +142,3 @@ }

_decodeZeroLengthCode() {
return this.sortedCodes[0].value
return this.sortedCodes[0]!.value
}

@@ -153,3 +152,3 @@

for (let i = 0; i < this.sortedCodes.length; i += 1) {
const length = this.sortedCodes[i].bitLength
const length = this.sortedCodes[i]!.bitLength
bits <<= length - prevLen

@@ -159,5 +158,5 @@ bits |= getBits(input, coreCursor, length - prevLen)

{
const index = this.bitCodeToValue[bits]
const index = this.bitCodeToValue[bits]!
if (index > -1 && this.sortedBitLengthsByBitCode[index] === length) {
return this.sortedValuesByBitCode[index]
return this.sortedValuesByBitCode[index]!
}

@@ -167,3 +166,3 @@

let j = i;
this.sortedCodes[j + 1].bitLength === length &&
this.sortedCodes[j + 1]!.bitLength === length &&
j < this.sortedCodes.length;

@@ -170,0 +169,0 @@ j += 1

@@ -56,26 +56,26 @@ import { instantiateCodec } from '../codecs'

matrix[0][(byteArray[0] >> 6) & 3] = 'C'
matrix[0][(byteArray[0] >> 4) & 3] = 'G'
matrix[0][(byteArray[0] >> 2) & 3] = 'T'
matrix[0][(byteArray[0] >> 0) & 3] = 'N'
matrix[0]![(byteArray[0]! >> 6) & 3] = 'C'
matrix[0]![(byteArray[0]! >> 4) & 3] = 'G'
matrix[0]![(byteArray[0]! >> 2) & 3] = 'T'
matrix[0]![(byteArray[0]! >> 0) & 3] = 'N'
matrix[1][(byteArray[1] >> 6) & 3] = 'A'
matrix[1][(byteArray[1] >> 4) & 3] = 'G'
matrix[1][(byteArray[1] >> 2) & 3] = 'T'
matrix[1][(byteArray[1] >> 0) & 3] = 'N'
matrix[1]![(byteArray[1]! >> 6) & 3] = 'A'
matrix[1]![(byteArray[1]! >> 4) & 3] = 'G'
matrix[1]![(byteArray[1]! >> 2) & 3] = 'T'
matrix[1]![(byteArray[1]! >> 0) & 3] = 'N'
matrix[2][(byteArray[2] >> 6) & 3] = 'A'
matrix[2][(byteArray[2] >> 4) & 3] = 'C'
matrix[2][(byteArray[2] >> 2) & 3] = 'T'
matrix[2][(byteArray[2] >> 0) & 3] = 'N'
matrix[2]![(byteArray[2]! >> 6) & 3] = 'A'
matrix[2]![(byteArray[2]! >> 4) & 3] = 'C'
matrix[2]![(byteArray[2]! >> 2) & 3] = 'T'
matrix[2]![(byteArray[2]! >> 0) & 3] = 'N'
matrix[3][(byteArray[3] >> 6) & 3] = 'A'
matrix[3][(byteArray[3] >> 4) & 3] = 'C'
matrix[3][(byteArray[3] >> 2) & 3] = 'G'
matrix[3][(byteArray[3] >> 0) & 3] = 'N'
matrix[3]![(byteArray[3]! >> 6) & 3] = 'A'
matrix[3]![(byteArray[3]! >> 4) & 3] = 'C'
matrix[3]![(byteArray[3]! >> 2) & 3] = 'G'
matrix[3]![(byteArray[3]! >> 0) & 3] = 'N'
matrix[4][(byteArray[4] >> 6) & 3] = 'A'
matrix[4][(byteArray[4] >> 4) & 3] = 'C'
matrix[4][(byteArray[4] >> 2) & 3] = 'G'
matrix[4][(byteArray[4] >> 0) & 3] = 'T'
matrix[4]![(byteArray[4]! >> 6) & 3] = 'A'
matrix[4]![(byteArray[4]! >> 4) & 3] = 'C'
matrix[4]![(byteArray[4]! >> 2) & 3] = 'G'
matrix[4]![(byteArray[4]! >> 0) & 3] = 'T'

@@ -116,12 +116,17 @@ return matrix

getCodecForTag(tagName: string): CramCodec {
if (!this.tagCodecCache[tagName]) {
const test = this.tagCodecCache[tagName]
if (!test) {
const encodingData = this.tagEncoding[tagName]
if (encodingData) {
this.tagCodecCache[tagName] = instantiateCodec(
encodingData,
'byteArray', // all tags are byte array data
)
if (!encodingData) {
throw new Error('Error, no tag encoding')
}
const ret = instantiateCodec(
encodingData,
'byteArray', // all tags are byte array data
)
this.tagCodecCache[tagName] = ret
return ret
} else {
return test
}
return this.tagCodecCache[tagName]
}

@@ -145,4 +150,6 @@

const encodingData = this.dataSeriesEncoding[dataSeriesName]
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
if (encodingData) {
const dataType = dataSeriesTypes[dataSeriesName]
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
if (!dataType) {

@@ -149,0 +156,0 @@ throw new CramMalformedError(

@@ -133,2 +133,4 @@ import { Buffer } from 'buffer'

.split(' ')
.forEach(method => tinyMemoize(CramContainer, method))
.forEach(method => {
tinyMemoize(CramContainer, method)
})

@@ -93,6 +93,5 @@ import { Buffer } from 'buffer'

// cache of features in a slice, keyed by the
// slice offset. caches all of the features in a slice, or none.
// the cache is actually used by the slice object, it's just
// kept here at the level of the file
// cache of features in a slice, keyed by the slice offset. caches all of
// the features in a slice, or none. the cache is actually used by the
// slice object, it's just kept here at the level of the file
this.featureCache = new QuickLRU({

@@ -283,3 +282,3 @@ maxSize: this.options.cacheSize,

size = section.maxLength,
preReadBuffer = undefined,
preReadBuffer?: Buffer,
) {

@@ -335,4 +334,4 @@ let buffer: Buffer

ransuncompress(inputBuffer, outputBuffer)
//htscodecs r4x8 is slower, but compatible.
//htscodecs.r4x8_uncompress(inputBuffer, outputBuffer);
// htscodecs r4x8 is slower, but compatible.
// htscodecs.r4x8_uncompress(inputBuffer, outputBuffer);
} else if (compressionMethod === 'rans4x16') {

@@ -428,4 +427,4 @@ htscodecs.r4x16_uncompress(inputBuffer, outputBuffer)

'getDefinition getSectionParsers getSamHeader'
.split(' ')
.forEach(method => tinyMemoize(CramFile, method))
'getDefinition getSectionParsers getSamHeader'.split(' ').forEach(method => {
tinyMemoize(CramFile, method)
})

@@ -45,3 +45,3 @@ import Constants from './constants'

if (currentReadFeature < cramRecord.readFeatures.length) {
const feature = cramRecord.readFeatures[currentReadFeature]
const feature = cramRecord.readFeatures[currentReadFeature]!
if (feature.code === 'Q' || feature.code === 'q') {

@@ -94,6 +94,3 @@ currentReadFeature += 1

regionPos,
regionPos +
cramRecord.readFeatures[currentReadFeature].pos -
bases.length -
1,
regionPos + feature.pos - bases.length - 1,
)

@@ -136,6 +133,2 @@ bases += chunk

) {
if (!refRegion) {
return
}
// decode base substitution code using the substitution matrix

@@ -151,3 +144,3 @@ const refCoord = readFeature.refPos - refRegion.start

}
const substitutionScheme = compressionScheme.substitutionMatrix[baseNumber]
const substitutionScheme = compressionScheme.substitutionMatrix[baseNumber]!
const base = substitutionScheme[readFeature.data]

@@ -154,0 +147,0 @@ if (base) {

@@ -182,4 +182,4 @@ import { TupleOf } from '../typescript'

const key =
String.fromCharCode(buffer[offset]) +
String.fromCharCode(buffer[offset + 1])
String.fromCharCode(buffer[offset]!) +
String.fromCharCode(buffer[offset + 1]!)
offset += 2

@@ -554,4 +554,4 @@

const key =
String.fromCharCode(buffer[offset]) +
String.fromCharCode(buffer[offset + 1])
String.fromCharCode(buffer[offset]!) +
String.fromCharCode(buffer[offset + 1]!)
offset += 2

@@ -558,0 +558,0 @@

@@ -1,2 +0,1 @@

import Long from 'long'
import { CramMalformedError } from '../../errors'

@@ -25,3 +24,3 @@ import {

for (let i = 0; i < buffer.length && buffer[i] !== 0; i++) {
r += String.fromCharCode(buffer[i])
r += String.fromCharCode(buffer[i]!)
}

@@ -35,5 +34,5 @@ return r

*/
function parseTagValueArray(buffer: Buffer) {
const arrayType = String.fromCharCode(buffer[0])
const length = Int32Array.from(buffer.slice(1))[0]
function parseTagValueArray(buffer: Uint8Array) {
const arrayType = String.fromCharCode(buffer[0]!)
const length = Int32Array.from(buffer.slice(1))[0]!

@@ -46,3 +45,3 @@ const array: number[] = new Array(length)

for (let i = 0; i < length; i += 1) {
array[i] = arr[i]
array[i] = arr[i]!
}

@@ -52,3 +51,3 @@ } else if (arrayType === 'C') {

for (let i = 0; i < length; i += 1) {
array[i] = arr[i]
array[i] = arr[i]!
}

@@ -58,3 +57,3 @@ } else if (arrayType === 's') {

for (let i = 0; i < length; i += 1) {
array[i] = arr[i]
array[i] = arr[i]!
}

@@ -64,3 +63,3 @@ } else if (arrayType === 'S') {

for (let i = 0; i < length; i += 1) {
array[i] = arr[i]
array[i] = arr[i]!
}

@@ -70,3 +69,3 @@ } else if (arrayType === 'i') {

for (let i = 0; i < length; i += 1) {
array[i] = arr[i]
array[i] = arr[i]!
}

@@ -76,3 +75,3 @@ } else if (arrayType === 'I') {

for (let i = 0; i < length; i += 1) {
array[i] = arr[i]
array[i] = arr[i]!
}

@@ -82,3 +81,3 @@ } else if (arrayType === 'f') {

for (let i = 0; i < length; i += 1) {
array[i] = arr[i]
array[i] = arr[i]!
}

@@ -92,3 +91,3 @@ } else {

function parseTagData(tagType: string, buffer: any) {
function parseTagData(tagType: string, buffer: Uint8Array) {
if (tagType === 'Z') {

@@ -98,6 +97,6 @@ return readNullTerminatedString(buffer)

if (tagType === 'A') {
return String.fromCharCode(buffer[0])
return String.fromCharCode(buffer[0]!)
}
if (tagType === 'I') {
return Long.fromBytesLE(buffer).toNumber()
return new Uint32Array(buffer.buffer)[0]
}

@@ -117,3 +116,3 @@ if (tagType === 'i') {

if (tagType === 'C') {
return buffer[0] as number
return buffer[0]!
}

@@ -326,23 +325,14 @@ if (tagType === 'f') {

// TN = tag names
const TN = compressionScheme.getTagNames(TLindex)
const TN = compressionScheme.getTagNames(TLindex)!
const ntags = TN.length
for (let i = 0; i < ntags; i += 1) {
const tagId = TN[i]
const tagId = TN[i]!
const tagName = tagId.slice(0, 2)
const tagType = tagId.slice(2, 3)
const tagCodec = compressionScheme.getCodecForTag(tagId)
if (!tagCodec) {
throw new CramMalformedError(
`no codec defined for auxiliary tag ${tagId}`,
)
}
const tagData = tagCodec.decode(
slice,
coreDataBlock,
blocksByContentId,
cursors,
)
tags[tagName] = parseTagData(tagType, tagData)
const tagData = compressionScheme
.getCodecForTag(tagId)
.decode(slice, coreDataBlock, blocksByContentId, cursors)
tags[tagName] =
typeof tagData === 'number' ? tagData : parseTagData(tagType, tagData)
}

@@ -349,0 +339,0 @@

@@ -93,5 +93,6 @@ import { CramArgumentError, CramMalformedError } from '../../errors'

/**
* @private establishes a mate-pair relationship between two records in the same slice.
* CRAM compresses mate-pair relationships between records in the same slice down into
* just one record having the index in the slice of its mate
* @private establishes a mate-pair relationship between two records in the
* same slice. CRAM compresses mate-pair relationships between records in the
* same slice down into just one record having the index in the slice of its
* mate
*/

@@ -104,8 +105,2 @@ function associateIntraSliceMate(

) {
if (!mateRecord) {
throw new CramMalformedError(
'could not resolve intra-slice mate pairs, file seems truncated or malformed',
)
}
const complicatedMultiSegment = !!(

@@ -191,3 +186,3 @@ mateRecord.mate ||

public containerPosition: number,
_unused: number,
public sliceSize: number,
) {

@@ -248,3 +243,3 @@ this.file = container.file

blocks[i] = block
blockPosition = blocks[i]._endPosition
blockPosition = blocks[i]!._endPosition
}

@@ -258,4 +253,3 @@

const blocks = await this.getBlocks()
// the core data block is always the first block in the slice
return blocks[0]
return blocks[0]!
}

@@ -365,6 +359,2 @@

const sliceHeader = await this.getHeader()
if (sliceHeader === undefined) {
throw new Error('slice header undefined')
}
const blocksByContentId = await this._getBlocksContentIdIndex()

@@ -438,3 +428,5 @@

}
let records: CramRecord[] = new Array(sliceHeader.parsedContent.numRecords)
const records: CramRecord[] = new Array(
sliceHeader.parsedContent.numRecords,
)
for (let i = 0; i < records.length; i += 1) {

@@ -466,3 +458,2 @@ try {

)
records = records.filter(r => !!r)
break

@@ -478,3 +469,3 @@ } else {

for (let i = 0; i < records.length; i += 1) {
const { mateRecordNumber } = records[i]
const { mateRecordNumber } = records[i]!
if (mateRecordNumber !== undefined && mateRecordNumber >= 0) {

@@ -484,4 +475,4 @@ associateIntraSliceMate(

i,
records[i],
records[mateRecordNumber],
records[i]!,
records[mateRecordNumber]!,
)

@@ -593,4 +584,4 @@ }

// memoize several methods in the class for performance
'getHeader getBlocks _getBlocksContentIdIndex'
.split(' ')
.forEach(method => tinyMemoize(CramSlice, method))
'getHeader getBlocks _getBlocksContentIdIndex'.split(' ').forEach(method => {
tinyMemoize(CramSlice, method)
})

@@ -23,3 +23,3 @@ import md5 from 'md5'

let offset = initialOffset
const countFlags = buffer[offset]
const countFlags = buffer[offset]!
let result: number

@@ -30,7 +30,7 @@ if (countFlags < 0x80) {

} else if (countFlags < 0xc0) {
result = ((countFlags << 8) | buffer[offset + 1]) & 0x3fff
result = ((countFlags << 8) | buffer[offset + 1]!) & 0x3fff
offset = offset + 2
} else if (countFlags < 0xe0) {
result =
((countFlags << 16) | (buffer[offset + 1] << 8) | buffer[offset + 2]) &
((countFlags << 16) | (buffer[offset + 1]! << 8) | buffer[offset + 2]!) &
0x1fffff

@@ -41,5 +41,5 @@ offset = offset + 3

((countFlags << 24) |
(buffer[offset + 1] << 16) |
(buffer[offset + 2] << 8) |
buffer[offset + 3]) &
(buffer[offset + 1]! << 16) |
(buffer[offset + 2]! << 8) |
buffer[offset + 3]!) &
0x0fffffff

@@ -50,6 +50,6 @@ offset = offset + 4

((countFlags & 0x0f) << 28) |
(buffer[offset + 1] << 20) |
(buffer[offset + 2] << 12) |
(buffer[offset + 3] << 4) |
(buffer[offset + 4] & 0x0f)
(buffer[offset + 1]! << 20) |
(buffer[offset + 2]! << 12) |
(buffer[offset + 3]! << 4) |
(buffer[offset + 4]! & 0x0f)
// x=((0xff & 0x0f)<<28) | (0xff<<20) | (0xff<<12) | (0xff<<4) | (0x0f & 0x0f);

@@ -69,3 +69,3 @@ // TODO *val_p = uv < 0x80000000UL ? uv : -((int32_t) (0xffffffffUL - uv)) - 1;

let offset = initialOffset
const countFlags = buffer[offset]
const countFlags = buffer[offset]!
let n: number | Long

@@ -76,9 +76,9 @@ if (countFlags < 0x80) {

} else if (countFlags < 0xc0) {
n = ((buffer[offset] << 8) | buffer[offset + 1]) & 0x3fff
n = ((buffer[offset]! << 8) | buffer[offset + 1]!) & 0x3fff
offset += 2
} else if (countFlags < 0xe0) {
n =
((buffer[offset] << 16) |
(buffer[offset + 1] << 8) |
buffer[offset + 2]) &
((buffer[offset]! << 16) |
(buffer[offset + 1]! << 8) |
buffer[offset + 2]!) &
0x1fffff

@@ -89,6 +89,6 @@ n = ((countFlags & 63) << 16) | buffer.readUInt16LE(offset + 1)

n =
((buffer[offset] << 24) |
(buffer[offset + 1] << 16) |
(buffer[offset + 2] << 8) |
buffer[offset + 3]) &
((buffer[offset]! << 24) |
(buffer[offset + 1]! << 16) |
(buffer[offset + 2]! << 8) |
buffer[offset + 3]!) &
0x0fffffff

@@ -98,6 +98,6 @@ offset += 4

n =
((buffer[offset] & 15) * 2 ** 32 + (buffer[offset + 1] << 24)) |
((buffer[offset + 2] << 16) |
(buffer[offset + 3] << 8) |
buffer[offset + 4])
((buffer[offset]! & 15) * 2 ** 32 + (buffer[offset + 1]! << 24)) |
((buffer[offset + 2]! << 16) |
(buffer[offset + 3]! << 8) |
buffer[offset + 4]!)
// TODO *val_p = uv < 0x80000000UL ? uv : -((int32_t) (0xffffffffUL - uv)) - 1;

@@ -107,18 +107,18 @@ offset += 5

n =
((((buffer[offset] & 7) << 8) | buffer[offset + 1]) * 2 ** 32 +
(buffer[offset + 2] << 24)) |
((buffer[offset + 3] << 16) |
(buffer[offset + 4] << 8) |
buffer[offset + 5])
((((buffer[offset]! & 7) << 8) | buffer[offset + 1]!) * 2 ** 32 +
(buffer[offset + 2]! << 24)) |
((buffer[offset + 3]! << 16) |
(buffer[offset + 4]! << 8) |
buffer[offset + 5]!)
offset += 6
} else if (countFlags < 0xfe) {
n =
((((buffer[offset] & 3) << 16) |
(buffer[offset + 1] << 8) |
buffer[offset + 2]) *
((((buffer[offset]! & 3) << 16) |
(buffer[offset + 1]! << 8) |
buffer[offset + 2]!) *
2 ** 32 +
(buffer[offset + 3] << 24)) |
((buffer[offset + 4] << 16) |
(buffer[offset + 5] << 8) |
buffer[offset + 6])
(buffer[offset + 3]! << 24)) |
((buffer[offset + 4]! << 16) |
(buffer[offset + 5]! << 8) |
buffer[offset + 6]!)
offset += 7

@@ -179,2 +179,3 @@ } else if (countFlags < 0xff) {

Promise.resolve(res).catch(() => {
delete this[memoAttrName]

@@ -181,0 +182,0 @@ })

@@ -66,5 +66,2 @@ import { CramUnimplementedError } from './errors'

this.index = args.index
if (!this.index.getEntriesForRange) {
throw new Error('invalid arguments: not an index')
}
}

@@ -167,3 +164,3 @@

(item, pos, ary) =>
!pos || item.toString() !== ary[pos - 1].toString(),
!pos || item.toString() !== ary[pos - 1]!.toString(),
)

@@ -170,0 +167,0 @@

@@ -1,2 +0,2 @@

//@ts-nocheck
// @ts-nocheck
import { CramMalformedError } from '../errors'

@@ -3,0 +3,0 @@

@@ -1,2 +0,2 @@

//@ts-nocheck
// @ts-nocheck
import { TF_SHIFT } from './constants'

@@ -3,0 +3,0 @@ import Decoding from './decoding'

@@ -1,2 +0,2 @@

//@ts-nocheck
// @ts-nocheck
import { CramMalformedError } from '../errors'

@@ -3,0 +3,0 @@

@@ -1,2 +0,2 @@

//@ts-nocheck
// @ts-nocheck
import { CramMalformedError } from '../errors'

@@ -3,0 +3,0 @@

@@ -1,2 +0,2 @@

//@ts-nocheck
// @ts-nocheck
import { Buffer } from 'buffer'

@@ -3,0 +3,0 @@ import { CramMalformedError } from '../errors'

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc