Comparing version 0.6.0 to 0.6.3
135
decode.js
@@ -13,2 +13,3 @@ "use strict" | ||
const RECORD_STARTING_ID = 40100 | ||
const STOP_CODE = {} | ||
let strings = EMPTY_ARRAY | ||
@@ -139,2 +140,32 @@ let stringPosition = 0 | ||
break | ||
case 0x1f: | ||
// indefinite length | ||
switch(majorType) { | ||
case 2: // byte string | ||
case 3: // text string | ||
case 4: // array | ||
let array = [] | ||
let value, i = 0 | ||
while ((value = read()) != STOP_CODE) { | ||
array[i++] = value | ||
} | ||
return majorType == 4 ? array : majorType == 3 ? array.join('') : Buffer.concat(array) | ||
case 5: // map | ||
let key | ||
if (currentDecoder.mapsAsObjects) { | ||
let object = {} | ||
while ((key = readKey()) != STOP_CODE) | ||
object[key] = read() | ||
return object | ||
} else { | ||
let map = new Map() | ||
while ((key = read()) != STOP_CODE) | ||
map.set(key, read()) | ||
return map | ||
} | ||
case 7: | ||
return STOP_CODE | ||
default: | ||
throw new Error('Invalid major type for indefinite length ' + majorType) | ||
} | ||
default: | ||
@@ -155,5 +186,5 @@ throw new Error('Unknown token ' + token) | ||
} | ||
if (srcStringEnd == 0 && srcEnd < 120 && token < 16) { | ||
if (srcStringEnd == 0 && srcEnd < 140 && token < 32) { | ||
// for small blocks, avoiding the overhead of the extract call is helpful | ||
let string = /*length < 16 ? */shortStringInJS(token)// : longStringInJS(length) | ||
let string = token < 16 ? shortStringInJS(token) : longStringInJS(token) | ||
if (string != null) | ||
@@ -173,3 +204,3 @@ return string | ||
for (let i = 0; i < token; i++) { | ||
object[read()] = read() | ||
object[readKey()] = read() | ||
} | ||
@@ -219,3 +250,3 @@ return object | ||
else | ||
throw new Error('Unknown extension ' + token) | ||
return new Tag(read()) | ||
} | ||
@@ -228,3 +259,3 @@ case 7: // fixed value | ||
case 0x17: return; // undefined | ||
// case 0x19: // half-precision float | ||
case 0x1f: | ||
default: | ||
@@ -295,2 +326,3 @@ throw new Error('Unknown token ' + token) | ||
stringPosition = 0 | ||
srcStringEnd = 1 // even if a utf-8 string was decoded, must indicate we are in the midst of extracted strings and can't skip strings | ||
string = strings[stringPosition++] | ||
@@ -364,20 +396,2 @@ } | ||
} | ||
/*function readShortString(length) { | ||
let start = position | ||
let end = start + length | ||
while (position < end) { | ||
const byte = src[position++]; | ||
if ((byte & 0x80) > 0) { | ||
position = end | ||
console.log('utf8 slice') | ||
return src.utf8Slice(start, end) | ||
} | ||
} | ||
if (srcStringEnd < end) { | ||
srcStringStart = start | ||
srcStringEnd = start + 8192 | ||
srcString = src.toString('latin1', start, srcStringEnd) | ||
} | ||
return srcString.slice(start - srcStringStart, end - srcStringStart) | ||
}*/ | ||
let fromCharCode = String.fromCharCode | ||
@@ -564,2 +578,73 @@ function longStringInJS(length) { | ||
} | ||
let keyCache = new Array(4096) | ||
function readKey() { | ||
let length = src[position++] | ||
if (length >= 0x60 && length < 0x78) { | ||
// fixstr, potentially use key cache | ||
length = length - 0x60 | ||
if (srcStringEnd >= position) // if it has been extracted, must use it (and faster anyway) | ||
return srcString.slice(position - srcStringStart, (position += length) - srcStringStart) | ||
else if (!(srcStringEnd == 0 && srcEnd < 180)) | ||
return readFixedString(length) | ||
} else { // not cacheable, go back and do a standard read | ||
position-- | ||
return read() | ||
} | ||
let key = ((length << 5) ^ (length > 1 ? dataView.getUint16(position) : length > 0 ? src[position] : 0)) & 0xfff | ||
let entry = keyCache[key] | ||
let checkPosition = position | ||
let end = position + length - 3 | ||
let chunk | ||
let i = 0 | ||
if (entry && entry.bytes == length) { | ||
while (checkPosition < end) { | ||
chunk = dataView.getUint32(checkPosition) | ||
if (chunk != entry[i++]) { | ||
checkPosition = 0x70000000 | ||
break | ||
} | ||
checkPosition += 4 | ||
} | ||
end += 3 | ||
while (checkPosition < end) { | ||
chunk = src[checkPosition++] | ||
if (chunk != entry[i++]) { | ||
checkPosition = 0x70000000 | ||
break | ||
} | ||
} | ||
if (checkPosition === end) { | ||
position = checkPosition | ||
return entry.string | ||
} | ||
end -= 3 | ||
checkPosition = position | ||
} | ||
entry = [] | ||
keyCache[key] = entry | ||
entry.bytes = length | ||
while (checkPosition < end) { | ||
chunk = dataView.getUint32(checkPosition) | ||
entry.push(chunk) | ||
checkPosition += 4 | ||
} | ||
end += 3 | ||
while (checkPosition < end) { | ||
chunk = src[checkPosition++] | ||
entry.push(chunk) | ||
} | ||
// for small blocks, avoiding the overhead of the extract call is helpful | ||
let string = length < 16 ? shortStringInJS(length) : longStringInJS(length) | ||
if (string != null) | ||
return entry.string = string | ||
return entry.string = readFixedString(length) | ||
} | ||
class Tag { | ||
constructor(value) { | ||
this.value = value | ||
} | ||
} | ||
let glbl = typeof window == 'object' ? window : global | ||
@@ -596,2 +681,3 @@ | ||
} | ||
recordDefinition.handlesRead = true | ||
@@ -691,1 +777,4 @@ currentExtensions[40006] = recordDefinition | ||
exports.typedArrays = typedArrays | ||
exports.useRecords = false | ||
exports.mapsAsObjects = true | ||
exports.Tag = Tag |
@@ -272,5 +272,2 @@ "use strict" | ||
targetView.setFloat64(position, value) | ||
/*if (!target[position[4] && !target[position[5] && !target[position[6] && !target[position[7] && !(target[0] & 0x78) < ) { | ||
// something like this can be represented as a float with binary rounding | ||
}*/ | ||
position += 8 | ||
@@ -356,2 +353,10 @@ } | ||
} | ||
if (value[Symbol.iterator]) { | ||
target[position++] = 0x9f // indefinite length array | ||
for (let entry of value) { | ||
encode(entry) | ||
} | ||
target[position++] = 0xff // stop-code | ||
return | ||
} | ||
// no extension found, write as object | ||
@@ -379,2 +384,3 @@ writeObject(value, false) | ||
const writeObject = this.useRecords === false ? this.variableMapSize ? (object) => { | ||
// this method is slightly slower, but generates "preferred serialization" (optimally small for smaller objects) | ||
let keys = Object.keys(object) | ||
@@ -381,0 +387,0 @@ let length = keys.length |
@@ -41,2 +41,5 @@ declare module 'cbor-x' { | ||
} | ||
export class Tag { | ||
value: any | ||
} | ||
} |
@@ -13,3 +13,5 @@ exports.Encoder = require('./encode').Encoder | ||
exports.encode = encoder.encode | ||
exports.C1 = decodeModule.C1 | ||
exports.Tag = decodeModule.Tag | ||
exports.useRecords = false | ||
exports.mapsAsObjects = true | ||
Object.assign(exports, { | ||
@@ -28,4 +30,4 @@ ALWAYS:1, | ||
else | ||
console.warn('For browser usage, directly use msgencoder/decode or msgencoder/encode modules. ' + error.message.split('\n')[0]) | ||
console.warn('For browser usage, directly use encode/decode modules. ' + error.message.split('\n')[0]) | ||
} | ||
} |
{ | ||
"name": "cbor-x", | ||
"author": "Kris Zyp", | ||
"version": "0.6.0", | ||
"version": "0.6.3", | ||
"description": "Ultra-fast CBOR implementation with tag extensions for records and structured cloning", | ||
"license": "MIT", | ||
@@ -6,0 +7,0 @@ "types": "./index.d.ts", |
@@ -9,3 +9,3 @@ # cbor-x | ||
The cbor-x package is an extremely fast CBOR NodeJS/JavaScript implementation. Currently, it is significantly faster than any other known implementations, faster than Avro (for JS), and generally faster than native V8 JSON.stringify/parse. It implements the CBOR format as specificed in [RFC-7049](https://tools.ietf.org/html/rfc7049), numerous [registered IANA tag extensions](https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml) (the `x` in cbor-x), [RFC-8746](https://tools.ietf.org/html/rfc8746) and proposed optional record extension, for defining record structures that makes CBOR even faster and more compact, often over twice as fast as even native JSON functions, several times faster than other JS implementations, and 15-50% more compact. See the performance section for more details. Structured cloning (with support for cyclical references) is supported through these tag extensions. | ||
The cbor-x package is an extremely fast CBOR NodeJS/JavaScript implementation. Currently, it is significantly faster than any other known implementations, faster than Avro (for JS), and generally faster than native V8 JSON.stringify/parse. It implements the CBOR format as specificed in [RFC-8949](https://www.rfc-editor.org/rfc/rfc8949.html), numerous [registered IANA tag extensions](https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml) (the `x` in cbor-x), [RFC-8746](https://tools.ietf.org/html/rfc8746) and proposed optional record extension, for defining record structures that makes CBOR even faster and more compact, often over twice as fast as even native JSON functions, several times faster than other JS implementations, and 15-50% more compact. See the performance section for more details. Structured cloning (with support for cyclical references) is supported through these tag extensions. | ||
@@ -190,3 +190,3 @@ ## Basic Usage | ||
## Custom Extensions | ||
You can add your own custom extensions, which can be used to encode specific types/classes in certain ways. This is done by using the `addExtension` function, and specifying the class, extension type code (should be a number greater than 256, all others are reserved for CBOR or cbor-x), and your encode and decode functions (or just the one you need). You can use cbor-x encoding and decoding within your extensions: | ||
You can add your own custom extensions, which can be used to encode specific types/classes in certain ways. This is done by using the `addExtension` function, and specifying the class, extension type code (custom extensions should be a number greater than 40500, all others are reserved for CBOR or cbor-x), and your encode and decode functions (or just the one you need). You can use cbor-x encoding and decoding within your extensions: | ||
``` | ||
@@ -200,3 +200,3 @@ import { addExtension, Encoder } from 'cbor-x'; | ||
Class: MyCustomClass, | ||
tag: 311, // register our own extension code (a tag code > 255) | ||
tag: 43311, // register our own extension code (a tag code) | ||
encode(instance, encode) { | ||
@@ -215,2 +215,8 @@ // define how your custom class should be encoded | ||
## Unknown Tags | ||
If no extension is registered for a tag, the decoder will return an instance of the `Tag` class, where the value provided for the tag will be available in the `value` property of the `Tag` instance. The `Tag` class is an export of the package and decode module. | ||
### CBOR Compliance | ||
The cbor-x package is designed to encode and decode to the CBOR extended generic data model, implementing extensions to support the extended model, and will generally attempt to use preferred serializations where feasible. When duplicate keys are encountered in maps, previous entries will be lost, and the final entry is preserved. | ||
### Additional Performance Optimizations | ||
@@ -223,6 +229,6 @@ Cbor-x is already fast, but here are some tips for making it faster. | ||
## Extensions | ||
Cbor-x uses tag ids 40000 to 40500 for its extensions. | ||
Cbor-x currently uses tag ids 40000 to 40500 for its proposed extensions (until accepted). | ||
## Record Structure Extension Definition | ||
The record struction extension uses tag 40006 to declare a new record structure. This is followed by an array where the first byte indicates the tag id of the record structure to declare and the next element is an array of the field names, and the third element is array of the property values. The extension declaration must be immediately follow by the field names of the record structure. | ||
The record struction extension uses tag 40006 to declare a new record structure. This is followed by an array where the first element indicates the tag id of the record structure to declare and the next element is an array of the field names, and the third element is array of the property values. The extension declaration must be immediately follow by the field names of the record structure. | ||
@@ -236,5 +242,5 @@ ### Dates | ||
## Alternate Encoding/Package | ||
The high-performance serialization and deserialization algorithms in the msgpackr package are also available in the [msgpackr](https://github.com/kriszyp/msgpackr) for the MessagePack format, with the same API and design. A quick summary of the pros and cons of using MessagePack vs CBOR are: | ||
The high-performance serialization and deserialization algorithms in this package are also available in the [msgpackr](https://github.com/kriszyp/msgpackr) for the MessagePack format, with the same API and design. A quick summary of the pros and cons of using MessagePack vs CBOR are: | ||
* MessagePack has wider adoption, and, at least with this implementation is slightly more efficient (by roughly 2-4%). | ||
* CBOR has an [official IETF standardization track](https://tools.ietf.org/html/rfc7049), and the record extensions is conceptually/philosophically a better fit for CBOR tags. | ||
* CBOR has an [official IETF standardization track](https://www.rfc-editor.org/rfc/rfc8949.html), and the record extensions is conceptually/philosophically a better fit for CBOR tags. | ||
@@ -241,0 +247,0 @@ ## License |
@@ -36,3 +36,3 @@ //var inspector = require('inspector') | ||
var fs = require('fs') | ||
var sampleData = JSON.parse(fs.readFileSync(__dirname + '/example4.json')) | ||
var sampleData = JSON.parse(fs.readFileSync(__dirname + '/example5.json')) | ||
} else { | ||
@@ -100,2 +100,11 @@ var xhr = new XMLHttpRequest() | ||
var data = sampleData | ||
var serialized = CBOR.encode(data) | ||
var deserialized = CBOR.decode(serialized) | ||
assert.deepEqual(deserialized, data) | ||
var serialized = CBOR.encode(data) | ||
var deserialized = CBOR.decode(serialized) | ||
assert.deepEqual(deserialized, data) | ||
}) | ||
test('encode/decode sample data with records', function(){ | ||
var data = sampleData | ||
let structures = [] | ||
@@ -249,2 +258,21 @@ let encoder = new Encoder({ structures, useRecords: true }) | ||
}) | ||
test('key caching', function() { | ||
var data = { | ||
foo: 2, | ||
bar: 'test', | ||
four: 4, | ||
seven: 7, | ||
foz: 3, | ||
} | ||
var serialized = CBOR.encode(data) | ||
var deserialized = CBOR.decode(serialized) | ||
assert.deepEqual(deserialized, data) | ||
// do multiple times to test caching | ||
var serialized = CBOR.encode(data) | ||
var deserialized = CBOR.decode(serialized) | ||
assert.deepEqual(deserialized, data) | ||
var serialized = CBOR.encode(data) | ||
var deserialized = CBOR.decode(serialized) | ||
assert.deepEqual(deserialized, data) | ||
}) | ||
test('decimal float32', function() { | ||
@@ -284,2 +312,12 @@ var data = { | ||
test('iterator/indefinite length array', function(){ | ||
class NotArray { | ||
} | ||
let data = ['a', 'b', 'c', ['d']] // iterable | ||
data.constructor = NotArray | ||
var serialized = encode(data) | ||
var deserialized = decode(serialized) | ||
assert.deepEqual(deserialized, data) | ||
}) | ||
test('buffers', function() { | ||
@@ -286,0 +324,0 @@ var data = { |
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
222211
2899
251