Socket
Socket
Sign inDemoInstall

aerospike

Package Overview
Dependencies
139
Maintainers
5
Versions
134
Alerts
File Explorer

Advanced tools

Install Socket

Detect and block malicious and high-risk dependencies

Install

Comparing version 5.8.0 to 5.9.0

lib/binding/node-v108-win32-x64/aerospike.dll

36

lib/config.js

@@ -404,2 +404,38 @@ // *****************************************************************************

/**
* @name Config#maxErrorRate
*
* @summary Maximum number of errors allowed per node per error_rate_window before backoff algorithm returns
* AEROSPIKE_MAX_ERROR_RATE for database commands to that node. If max_error_rate is zero, there is no error limit.
* The counted error types are any error that causes the connection to close (socket errors and client timeouts),
* server device overload and server timeouts.
*
* The application should backoff or reduce the transaction load until AEROSPIKE_MAX_ERROR_RATE stops being returned.
*
* @description If the backoff algorithm has been activated, transactions will fail with {@link
* module:aerospike/status.AEROSPIKE_MAX_ERROR_RATE|AEROSPIKE_MAX_ERROR_RATE} until the {@link Config#errorRateWindow} has passed and the
* error count has been reset.
*
* @type {number}
*
* @default 100
*/
if (Number.isInteger(config.maxErrorRate)) {
this.maxErrorRate = config.maxErrorRate
}
/**
* @name Config#errorRateWindow
*
* @summary The number of cluster tend iterations that defines the window for {@link Config#maxErrorRate} to be surpassed. One tend iteration is defined
* as {@link Config#tendInterval} plus the time to tend all nodes. At the end of the window, the error count is reset to zero and backoff state is removed on all nodes.
*
* @type {number}
*
* @default 1
*/
if (Number.isInteger(config.errorRateWindow)) {
this.errorRateWindow = config.errorRateWindow
}
/**
* @name Config#minConnsPerNode

@@ -406,0 +442,0 @@ *

8

lib/exp_operations.js

@@ -51,3 +51,3 @@ // *****************************************************************************

* exp.add(exp.binInt('b'), exp.binInt('b')),
* 0),
* exp.expReadFlags.DEFAULT),
* op.read('a'),

@@ -100,2 +100,5 @@ * op.read('b')

* @param {string} bin - The name of the bin.
* @param {string} name - The name of bin to store expression result
* @param {AerospikeExp} exp - The expression to evaluate
* @param flags - Expression read flags. <code>flags</code> must be an integer. See {@link exp.expReadFlags} for more information.
* @returns {Operation} Operation that can be passed to the {@link Client#operate} command.

@@ -112,2 +115,5 @@ */

* @param {any} value - The value to set the bin to.
* @param {string} binName - The variable name of read expression result. This name can be used as the bin name when retrieving bin results from the record.
* @param {AerospikeExp} exp - The expression to evaluate
* @param flags - Expression write flags. <code>flags</code> must be an integer. See {@link exp.expWriteFlags} for more information.
* @returns {Operation} Operation that can be passed to the {@link Client#operate} command.

@@ -114,0 +120,0 @@ */

@@ -21,2 +21,4 @@ // *****************************************************************************

const exp = as.exp
const writeFlags = as.expWriteFlags
const readFlags = as.expReadFlags
const BIN_TYPE_UNDEF = 0

@@ -434,2 +436,4 @@

* because record meta data is cached in memory.
* Requires server version between 5.3.0 inclusive and 7.0 exclusive.
* Use {@link #recordSize} for server version 7.0+.
*

@@ -494,3 +498,4 @@ * @function

* in memory.
* Requires server version 5.3.0+.
* Requires server version between 5.3.0 inclusive and 7.0 exclusive.
* Use {@link #recordSize} for server version 7.0+.
*

@@ -503,2 +508,13 @@ * @function

/**
* Create expression that returns the record size. This expression usually evaluates
* quickly because record meta data is cached in memory.
* Requires server version 7.0+. This expression replaces {@link #deviceSize} and
* {@link #memorySize} since those older expressions are equivalent on server version 7.0+.
*
* @function
* @return {AerospikeExp} integer value size of the record in Megabytes.
*/
exports.recordSize = _metaExp(exp.ops.RECORD_SIZE)
/**
* Create expression that returns record digest modulo as integer.

@@ -1017,1 +1033,65 @@ *

exports.hll = require('./exp_hll')
/**
*
* @readonly
* @enum {number}
* @description Expression read bit flags. Use BITWISE OR to combine flags.
*/
exports.expReadFlags = {
/**
* Default.
* @const {number}
*/
DEFAULT: readFlags.DEFAULT,
/**
* Ignore failures caused by the expression resolving to unknown or a non-bin type.
* @const {number}
*/
EVAL_NO_FAIL: readFlags.EVAL_NO_FAIL
}
/**
*
* @readonly
* @enum {number}
* @description Expression write bit flags. Use BITWISE OR to combine flags.
*/
exports.expWriteFlags = {
/**
* Default.
* @const {number}
*/
DEFAULT: writeFlags.DEFAULT,
/**
* If bin does not exist, a new bin will be created.
* @const {number}
*/
CREATE_ONLY: writeFlags.CREATE_ONLY,
/**
* If bin exists, the bin will be overwritten.
* @const {number}
*/
UPDATE_ONLY: writeFlags.UPDATE_ONLY,
/**
* If expression results in nil value, then delete the bin.
* @const {number}
*/
ALLOW_DELETE: writeFlags.ALLOW_DELETE,
/**
* Do not raise error if operation is denied.
* @const {number}
*/
POLICY_NO_FAIL: writeFlags.POLICY_NO_FAIL,
/**
* Ignore failures caused by the expression resolving to unknown or a non-bin type.
* @const {number}
*/
EVAL_NO_FAIL: writeFlags.EVAL_NO_FAIL
}

@@ -113,2 +113,6 @@ // *****************************************************************************

default:
if (Buffer.isBuffer(value)) {
return as.indexDataType.BLOB
}
throw new TypeError('Unknown data type for filter value.')

@@ -115,0 +119,0 @@ }

@@ -1278,1 +1278,52 @@ // *****************************************************************************

}
/**
* @summary Creates map create operation.
*
*
* @param {string} bin - bin name.
* @param {number} order - map order.
* @param {persistIndex} count - if true, persist map index. A map index improves lookup performance, but requires more storage.
* A map index can be created for a top-level ordered map only. Nested and unordered map indexes are not supported.
* @param {number} ctx - optional path to nested map. If not defined, the top-level map is used.
*
* @returns {Object} Operation that can be passed to the {@link Client#operate} command.
*
* @example
*
* const Aerospike = require('aerospike')
* const maps = Aerospike.maps
* const key = new Aerospike.Key('test', 'demo', 'mapKey')
*
* // INSERT HOSTNAME AND PORT NUMBER OF AEROSPIKE SERVER NODE HERE!
* var config = {
* hosts: '192.168.33.10:3000',
* // Timeouts disabled, latency dependent on server location. Configure as needed.
* policies: {
* operate : new Aerospike.OperatePolicy({socketTimeout : 0, totalTimeout : 0})
* }
* }
*
* Aerospike.connect(config).then(async client => {
* let ops = [
* maps.create('map', maps.order.KEY_ORDERED, true)
* ]
* let result = await client.operate(key, ops)
* console.log(result.bins) // => { map: null }
* let record = await client.get(key)
* console.log(record.bins) // => { map: {} }
*
* await client.remove(key)
* client.close()
* })
*/
exports.create = function (bin, order, persistIndex = false, ctx) {
const op = new MapOperation(opcodes.MAP_CREATE, bin)
op.order = order
op.persistIndex = persistIndex
if (ctx === undefined) {
return op
}
return op.withContext(ctx)
}

2

package.json
{
"name": "aerospike",
"version": "5.8.0",
"version": "5.9.0",
"description": "Aerospike Client Library",

@@ -5,0 +5,0 @@ "keywords": [

@@ -17,3 +17,2 @@ # Aerospike Node.js Client [![travis][travis-image]][travis-url] [![codecov][codecov-image]][codecov-url] [![npm][npm-image]][npm-url] [![downloads][downloads-image]][downloads-url]

- RHEL 8/9
- Debian 10 (x86_64 architecture only)
- Debian 11

@@ -23,3 +22,3 @@ - Amazon Linux 2023

- Many Linux distributions compatible with one of the above OS releases.
- macOS versions 11/12/13 are also supported. (Node.js 14 install unavailable on M1 Mac systems)
- macOS 12/13

@@ -26,0 +25,0 @@ The client is compatible with arm64, aarch64, and x86_64 architectures.

@@ -75,3 +75,28 @@ // *****************************************************************************

})
it('Will return records even if generation values is not correct', async function () {
const batchRecords = [
new Key(helper.namespace, helper.set, 'test/batch_remove/6'),
new Key(helper.namespace, helper.set, 'test/batch_remove/7'),
new Key(helper.namespace, helper.set, 'test/batch_remove/8'),
new Key(helper.namespace, helper.set, 'test/batch_remove/9'),
new Key(helper.namespace, helper.set, 'test/batch_remove/0')
]
try {
await client.batchRemove(batchRecords, null, new Aerospike.BatchRemovePolicy({ gen: Aerospike.policy.gen.EQ, generation: 10 }))
// Will fail if code makes it here
expect(1).to.eql(2)
} catch (error) {
// code will fail with undefined if expect(1).to.eql(2) executes
expect(error.code).to.eql(-16)
const results = await client.batchRemove(batchRecords)
expect(results.length).to.equal(5)
results.forEach(function (result) {
expect(result.status).to.equal(Aerospike.status.OK)
// expect(results.record.bins).to.be.empty()
// console.log(util.inspect(result, true, 10, true))
})
}
})
})
})

@@ -284,2 +284,24 @@ // *****************************************************************************

})
it('Returns correct status and error with async', async function () {
const batchRecords = [
{
type: batchType.BATCH_WRITE,
key: new Key(helper.namespace, helper.set, 'test/batch_write/11'),
ops: [
op.write('geo', new GeoJSON({ type: 'Point', coordinates: [123.456, 1.308] })),
op.write('blob', Buffer.from('bar'))
],
policy: new Aerospike.BatchWritePolicy({
exists: Aerospike.policy.exists.CREATE
})
}
]
await client.batchWrite(batchRecords)
const results = await client.batchWrite(batchRecords)
expect(results[0].status).to.equal(status.ERR_RECORD_EXISTS)
})
})

@@ -286,0 +308,0 @@

@@ -51,2 +51,4 @@ // *****************************************************************************

minConnsPerNode: 10,
maxErrorRate: 100,
errorRateWindow: 1,
modlua: { userPath: '/user/path' },

@@ -78,2 +80,4 @@ password: 'sekret',

expect(config).to.have.property('connTimeoutMs')
expect(config).to.have.property('maxErrorRate')
expect(config).to.have.property('errorRateWindow')
expect(config).to.have.property('hosts')

@@ -80,0 +84,0 @@ expect(config).to.have.property('log')

@@ -258,2 +258,17 @@ // *****************************************************************************

describe('recordSize', function () {
helper.skipUnlessVersion('>= 7.0.0', this)
it('evaluates to true if any expression evaluates to true', async function () {
const key = await createRecord({ tags: { a: 'blue', b: 'green', c: 'yellow' } })
await testNoMatch(key, exp.eq(exp.recordSize(), exp.int(1)))
await testMatch(key, exp.eq(exp.recordSize(), exp.int(96)))
})
it('evaluates to true if any expression evaluates to true', async function () {
const key = await createRecord({ tags: { a: '123456789', b: 'green', c: 'yellow' } })
await testNoMatch(key, exp.eq(exp.recordSize(), exp.int(1)))
await testMatch(key, exp.eq(exp.recordSize(), exp.int(112)))
})
})
describe('wildcard', function () {

@@ -268,2 +283,19 @@ it('evaluates to true if any expression evaluates to true', async function () {

describe('expWriteFlags', function () {
it('write flags have correct value', async function () {
expect(exp.expWriteFlags).to.have.property('DEFAULT', 0)
expect(exp.expWriteFlags).to.have.property('CREATE_ONLY', 1)
expect(exp.expWriteFlags).to.have.property('UPDATE_ONLY', 2)
expect(exp.expWriteFlags).to.have.property('ALLOW_DELETE', 4)
expect(exp.expWriteFlags).to.have.property('POLICY_NO_FAIL', 8)
expect(exp.expWriteFlags).to.have.property('EVAL_NO_FAIL', 16)
})
})
describe('expReadFlags', function () {
it('read flags have correct value', async function () {
expect(exp.expReadFlags).to.have.property('DEFAULT', 0)
expect(exp.expReadFlags).to.have.property('EVAL_NO_FAIL', 16)
})
})
describe('arithmetic expressions', function () {

@@ -276,3 +308,3 @@ describe('int bin add expression', function () {

exp.add(exp.binInt('intVal'), exp.binInt('intVal')),
0),
exp.expWriteFlags.DEFAULT),
op.read('intVal')

@@ -290,3 +322,3 @@ ]

exp.add(exp.binInt('intVal'), exp.binInt('intVal')),
0),
exp.expWriteFlags.DEFAULT),
op.read('intVal')

@@ -303,3 +335,3 @@ ]

exp.add(exp.binInt('intVal'), exp.binInt('intVal')),
0),
exp.expWriteFlags.DEFAULT),
op.read('intVal')

@@ -306,0 +338,0 @@ ]

@@ -197,2 +197,17 @@ // *****************************************************************************

describe('Client#createBlobIndex()', function () {
helper.skipUnlessVersion('>= 7.0.0', this)
it('should create a blob index', function () {
const args = {
ns: helper.namespace,
set: helper.set,
bin: testIndex.bin,
index: testIndex.name
}
return client.createBlobIndex(args)
.then(() => verifyIndexExists(helper.namespace, testIndex.name))
})
})
describe('Client#indexRemove()', async function () {

@@ -199,0 +214,0 @@ beforeEach(() => {

@@ -25,2 +25,3 @@ // *****************************************************************************

const maps = Aerospike.maps
const op = Aerospike.operations
const Context = Aerospike.cdt.Context

@@ -66,2 +67,44 @@ const status = Aerospike.status

describe('maps.create', function () {
it('Creates a new map', function () {
return initState()
.then(createRecord({ map: { c: 1, b: 2, a: 3 } }))
.then(orderByKey('map'))
.then(operate(maps.create('emptyMap', maps.order.KEY_ORDERED)))
.then(operate(op.read('dap')))
.then(assertRecordEql({ emptyMap: {}, map: { a: 3, b: 2, c: 1 } }))
.then(cleanup())
})
it('Creates a new map from a cdt context', function () {
return initState()
.then(createRecord({ map: { c: 1, b: 2, a: 3 } }))
.then(orderByKey('map'))
.then(operate(maps.create('map', maps.order.KEY_ORDERED).withContext(ctx => ctx.addMapKeyCreate('nested'))))
.then(assertRecordEql({ map: { a: 3, b: 2, c: 1, nested: {} } }))
.then(cleanup())
})
it('Creates a new map from a cdt context as parameter', function () {
return initState()
.then(createRecord({ map: { c: 1, b: 2, a: 3 } }))
.then(orderByKey('map'))
.then(operate(maps.create('map', maps.order.KEY_ORDERED, false, new Context().addMapKeyCreate('nested'))))
.then(assertRecordEql({ map: { a: 3, b: 2, c: 1, nested: {} } }))
.then(cleanup())
})
context('persistent indexes added in 7.0', function () {
helper.skipUnlessVersion('>= 7.0.0', this)
it('Creates a new map with persistent index', function () {
return initState()
.then(createRecord({ map: { c: 1, b: 2, a: 3 } }))
.then(orderByKey('map'))
.then(operate(maps.create('emptyMap', maps.order.KEY_ORDERED, true)))
.then(assertRecordEql({ emptyMap: {}, map: { a: 3, b: 2, c: 1 } }))
.then(cleanup())
})
})
})
describe('maps.put', function () {

@@ -68,0 +111,0 @@ it('adds the item to the map and returns the size of the map', function () {

@@ -38,2 +38,4 @@ // *****************************************************************************

const GEO2DSPHERE = Aerospike.indexDataType.GEO2DSPHERE
const BLOB = Aerospike.indexDataType.BLOB
const LIST = Aerospike.indexType.LIST

@@ -116,3 +118,3 @@ const MAPVALUES = Aerospike.indexType.MAPVALUES

]
const numberOfSamples = samples.length
const indexes = [

@@ -139,5 +141,7 @@ ['qidxName', 'name', STRING],

['qidxGeoMapNested', 'mg', GEO2DSPHERE, MAPVALUES, new Context().addMapKey('nested')],
['qidxAggregateMapNested', 'nested', STRING, MAPKEYS],
['qidxAggregateMapDoubleNested', 'nested', STRING, MAPKEYS, new Context().addMapKey('doubleNested')]
]
let keys = []

@@ -166,2 +170,30 @@

}
if (helper.cluster.isVersionInRange('>= 7.0.0')) {
samples.push({ name: 'blob match', blob: Buffer.from('guava') })
samples.push({ name: 'blob non-match', blob: Buffer.from('pumpkin') })
samples.push({ name: 'blob list match', lblob: [Buffer.from('guava'), Buffer.from('papaya')] })
samples.push({ name: 'blob list non-match', lblob: [Buffer.from('pumpkin'), Buffer.from('turnip')] })
samples.push({ name: 'blob map match', mblob: { a: Buffer.from('guava'), b: Buffer.from('papaya') } })
samples.push({ name: 'blob map non-match', mblob: { a: Buffer.from('pumpkin'), b: Buffer.from('turnip') } })
samples.push({ name: 'blob mapkeys match', mkblob: new Map([[Buffer.from('guava'), 1], [Buffer.from('papaya'), 2]]) })
samples.push({ name: 'blob mapkeys non-match', mkblob: new Map([[Buffer.from('pumpkin'), 3], [Buffer.from('turnip'), 4]]) })
samples.push({ name: 'nested blob match', blob: { nested: Buffer.from('guava') } })
samples.push({ name: 'nested blob non-match', blob: { nested: Buffer.from('pumpkin') } })
samples.push({ name: 'nested blob list match', lblob: { nested: [Buffer.from('guava'), Buffer.from('papaya')] } })
samples.push({ name: 'nested blob list non-match', lblob: { nested: [Buffer.from('pumpkin'), Buffer.from('turnip')] } })
samples.push({ name: 'nested blob map match', mblob: { nested: { a: Buffer.from('guava'), b: Buffer.from('papaya') } } })
samples.push({ name: 'nested blob map non-match', mblob: { nested: { a: Buffer.from('pumpkin'), b: Buffer.from('turnip') } } })
samples.push({ name: 'nested blob mapkeys match', mkblob: { nested: new Map([[Buffer.from('guava'), 1], [Buffer.from('papaya'), 2]]) } })
samples.push({ name: 'nested blob mapkeys non-match', mkblob: { nested: new Map([[Buffer.from('pumpkin'), 3], [Buffer.from('turnip'), 4]]) } })
indexes.push(['qidxBlob', 'blob', BLOB])
indexes.push(['qidxBlobList', 'lblob', BLOB, LIST])
indexes.push(['qidxBlobMap', 'mblob', BLOB, MAPVALUES])
indexes.push(['qidxBlobMapKeys', 'mkblob', BLOB, MAPKEYS])
indexes.push(['qidxBlobListNested', 'lblob', BLOB, LIST, new Context().addMapKey('nested')])
indexes.push(['qidxBlobMapNested', 'mblob', BLOB, MAPVALUES, new Context().addMapKey('nested')])
indexes.push(['qidxBlobMapKeysNested', 'mkblob', BLOB, MAPKEYS, new Context().addMapKey('nested')])
}
const numberOfSamples = samples.length
return Promise.all([

@@ -580,3 +612,9 @@ putgen.put(numberOfSamples, generators)

})
context('Uses blob Secondary indexes', function () {
helper.skipUnlessVersion('>= 7.0.0', this)
it('should match equal blob values', function (done) {
const args = { filters: [filter.equal('blob', Buffer.from('guava'))] }
verifyQueryResults(args, 'blob match', done)
})
})
it('should match equal string values', function (done) {

@@ -670,3 +708,34 @@ const args = { filters: [filter.equal('s', 'banana')] }

})
context('Uses blob Secondary indexes', function () {
helper.skipUnlessVersion('>= 7.0.0', this)
it('should match lists containing a blob', function (done) {
const args = { filters: [filter.contains('lblob', Buffer.from('guava'), LIST)] }
verifyQueryResults(args, 'blob list match', done)
})
it('should match lists containing a blob in a nested context', function (done) {
const args = { filters: [filter.contains('lblob', Buffer.from('guava'), LIST, new Context().addMapKey('nested'))] }
verifyQueryResults(args, 'nested blob list match', done)
})
it('should match maps containing a blob value', function (done) {
const args = { filters: [filter.contains('mblob', Buffer.from('guava'), MAPVALUES)] }
verifyQueryResults(args, 'blob map match', done)
})
it('should match maps containing a blob value in a nested context', function (done) {
const args = { filters: [filter.contains('mblob', Buffer.from('guava'), MAPVALUES, new Context().addMapKey('nested'))] }
verifyQueryResults(args, 'nested blob map match', done)
})
it('should match maps containing a blob key', function (done) {
const args = { filters: [filter.contains('mkblob', Buffer.from('guava'), MAPKEYS)] }
verifyQueryResults(args, 'blob mapkeys match', done)
})
it('should match maps containing a blob key in a nested context', function (done) {
const args = { filters: [filter.contains('mkblob', Buffer.from('guava'), MAPKEYS, new Context().addMapKey('nested'))] }
verifyQueryResults(args, 'nested blob mapkeys match', done)
})
})
it('throws a type error if the comparison value is of invalid type', function () {

@@ -673,0 +742,0 @@ const fn = () => filter.contains('list', { foo: 'bar' }, LIST)

@@ -180,2 +180,4 @@ // *****************************************************************************

}
// Disable maxErrorRate
config.maxErrorRate = 0
return config

@@ -182,0 +184,0 @@ }

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc