Socket
Socket
Sign inDemoInstall

aerospike

Package Overview
Dependencies
Maintainers
3
Versions
135
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

aerospike - npm Package Compare versions

Comparing version 2.0.4 to 2.1.0

docs/tutorials/node_clusters.md

3

benchmarks/config.json

@@ -42,4 +42,3 @@ {

],
"summary" : true,
"help" : "Please follow the README.md to specify configurations to run this benchmark"
"summary" : true
}

@@ -121,3 +121,3 @@ // *****************************************************************************

function report_step (p, i, o, code, stdout, stderr) {
function reportStep (p, i, o, code, stdout, stderr) {
console.log('processes: %d, iterations: %d, operations: %d, status: %d', p, i, o, code)

@@ -163,3 +163,3 @@

function report_final () {
function reportFinal () {
console.log()

@@ -197,9 +197,9 @@ console.log('SUMMARY')

var group_ops = {}
var groupOps = {}
matched.forEach(function (res) {
var ops = res.configuration.operations
var group = (group_ops[ops] || [])
var group = (groupOps[ops] || [])
group.push(res)
group_ops[ops] = group
groupOps[ops] = group
})

@@ -209,4 +209,4 @@

console.log()
for (var k in group_ops) {
var ops = group_ops[k]
for (var k in groupOps) {
var ops = groupOps[k]
console.log('operations: %d', k)

@@ -231,7 +231,7 @@ for (var o = 0; o < ops.length; o++) {

var o_hist = {}
var opsHist = {}
matched.forEach(function (res) {
var ops = res.configuration.operations
o_hist[ops] = (o_hist[ops] || 0) + 1
opsHist[ops] = (opsHist[ops] || 0) + 1
})

@@ -241,3 +241,3 @@

console.log('Number of Concurrent Transactions:')
stats.print_histogram(o_hist, console.log, ' ')
stats.print_histogram(opsHist, console.log, ' ')

@@ -272,3 +272,3 @@ console.log()

proc.on('close', function (code) {
report_step(p, i, o, code, stdout.toString(), stderr.toString())
reportStep(p, i, o, code, stdout.toString(), stderr.toString())
step(p, i, o)

@@ -293,3 +293,3 @@ })

if (p > P_MAX) {
report_final()
reportFinal()
return

@@ -296,0 +296,0 @@ }

@@ -46,4 +46,4 @@ // *****************************************************************************

//
var interval_stats = new Array(OP_TYPES)
reset_interval_stats()
var intervalStats = new Array(OP_TYPES)
resetIntervalStats()

@@ -77,3 +77,3 @@ if (argv.querySpec !== undefined) {

if (argv.time !== undefined) {
argv.time = stats.parse_time_to_secs(argv.time)
argv.time = stats.parseTimeToSecs(argv.time)
argv.iterations = undefined

@@ -106,19 +106,19 @@ }

if (argv.summary === true && rwWorkers > 0) {
return stats.report_final(argv, console.log)
return stats.reportFinal(argv, console.log)
}
}
function worker_spawn () {
function workerSpawn () {
var worker = cluster.fork()
worker.iteration = 0
worker.on('message', worker_results(worker))
worker.on('message', workerResults(worker))
}
function worker_exit (worker) {
function workerExit (worker) {
worker.send(['end'])
}
function worker_shutdown () {
function workerShutdown () {
Object.keys(cluster.workers).forEach(function (id) {
worker_exit(cluster.workers[id])
workerExit(cluster.workers[id])
})

@@ -130,3 +130,3 @@ }

*/
function worker_probe () {
function workerProbe () {
Object.keys(cluster.workers).forEach(function (id) {

@@ -183,7 +183,7 @@ cluster.workers[id].send(['trans'])

*/
var counter = 0 // Number of times worker_results_interval is called
function worker_results_interval (worker, interval_worker_stats) {
var counter = 0 // Number of times workerResultsInterval is called
function workerResultsInterval (worker, intervalWorkerStats) {
for (var i = 0; i < OP_TYPES; i++) {
for (var j = 0; j < STATS; j++) {
interval_stats[i][j] = interval_stats[i][j] + interval_worker_stats[i][j]
intervalStats[i][j] = intervalStats[i][j] + intervalWorkerStats[i][j]
}

@@ -193,9 +193,9 @@ }

stats.interval({
'read': interval_stats[0],
'write': interval_stats[1],
'query': interval_stats[2],
'scan': interval_stats[3]
'read': intervalStats[0],
'write': intervalStats[1],
'query': intervalStats[2],
'scan': intervalStats[3]
})
if (!argv.silent) {
print_interval_stats()
printIntervalStats()
}

@@ -205,35 +205,35 @@ }

function print_interval_stats () {
function printIntervalStats () {
if (rwWorkers > 0) {
logger.info('%s read(tps=%d timeouts=%d errors=%d) write(tps=%d timeouts=%d errors=%d) ',
new Date().toString(), interval_stats[0][0], interval_stats[0][1], interval_stats[0][2],
interval_stats[1][0], interval_stats[1][1], interval_stats[1][2])
new Date().toString(), intervalStats[0][0], intervalStats[0][1], intervalStats[0][2],
intervalStats[1][0], intervalStats[1][1], intervalStats[1][2])
}
if (queryWorkers) {
logger.info('%s query(records = %d timeouts = %d errors = %d)',
new Date().toString(), interval_stats[2][0], interval_stats[2][1], interval_stats[2][2])
new Date().toString(), intervalStats[2][0], intervalStats[2][1], intervalStats[2][2])
}
if (scanWorkers) {
logger.info('%s scan(records = %d timeouts = %d errors = %d)',
new Date().toString(), interval_stats[3][0], interval_stats[3][1], interval_stats[3][2])
new Date().toString(), intervalStats[3][0], intervalStats[3][1], intervalStats[3][2])
}
}
function worker_results_iteration (worker, op_stats) {
stats.iteration(op_stats)
function workerResultsIteration (worker, opStats) {
stats.iteration(opStats)
if (argv.iterations === undefined || worker.iteration < argv.iterations || argv.time !== undefined) {
rwWorkerJob(worker)
} else {
worker_exit(worker)
workerExit(worker)
}
}
function worker_results (worker) {
function workerResults (worker) {
return function (message) {
if (message[0] === 'stats') {
worker_results_iteration(worker, message[1])
workerResultsIteration(worker, message[1])
} else if (message[0] === 'alert') {
alerts.handleAlert(message[1].alert, message[1].severity)
} else {
worker_results_interval(worker, message[1])
workerResultsInterval(worker, message[1])
}

@@ -254,7 +254,7 @@ }

/**
* Flush out the current interval_stats and probe the worker every second.
* Flush out the current intervalStats and probe the worker every second.
*/
setInterval(function () {
reset_interval_stats()
worker_probe(cluster)
resetIntervalStats()
workerProbe(cluster)
}, 1000)

@@ -265,4 +265,4 @@

*/
function reset_interval_stats () {
interval_stats = [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]
function resetIntervalStats () {
intervalStats = [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]
}

@@ -327,5 +327,5 @@

setTimeout(function () {
reset_interval_stats()
worker_probe(cluster)
worker_shutdown(cluster)
resetIntervalStats()
workerProbe(cluster)
workerShutdown(cluster)
}, argv.time * 1000)

@@ -341,3 +341,3 @@ }

for (var p = 0; p < argv.processes; p++) {
worker_spawn()
workerSpawn()
}

@@ -51,6 +51,6 @@ // *****************************************************************************

var mem_cnt = 0
var mem_min
var mem_max
var mem_ranges = []
var memCnt = 0
var memMin
var memMax
var memRanges = []

@@ -99,16 +99,16 @@ // *****************************************************************************

function memory_bar (min_used_mb, max_used_mb) {
var min_used_len = Math.floor(min_used_mb / MEM_MAX_MB * MEM_BUCKETS)
var min_used_bar = new Buffer(min_used_len)
if (min_used_len > 0) {
min_used_bar.fill(']')
function memoryBar (minUsedMb, maxUsedMb) {
var minUsedLen = Math.floor(minUsedMb / MEM_MAX_MB * MEM_BUCKETS)
var minUsedBar = new Buffer(minUsedLen)
if (minUsedLen > 0) {
minUsedBar.fill(']')
}
var max_used_len = Math.floor(max_used_mb / MEM_MAX_MB * MEM_BUCKETS)
var max_used_bar = new Buffer(max_used_len - min_used_len)
if (max_used_len > 0) {
max_used_bar.fill(']')
var maxUsedLen = Math.floor(maxUsedMb / MEM_MAX_MB * MEM_BUCKETS)
var maxUsedBar = new Buffer(maxUsedLen - minUsedLen)
if (maxUsedLen > 0) {
maxUsedBar.fill(']')
}
return min_used_bar.toString().blue + max_used_bar.toString().red
return minUsedBar.toString().blue + maxUsedBar.toString().red
}

@@ -129,3 +129,3 @@

var unfiltered = mem_ranges
var unfiltered = memRanges

@@ -163,3 +163,3 @@ var filtered = unfiltered.filter(function (r, i) {

(r[1] - r[0]).toFixed(3),
memory_bar(r[0], r[1])
memoryBar(r[0], r[1])
])

@@ -174,3 +174,3 @@ } else {

(r[1] - r[0]).toFixed(3),
memory_bar(r[0], r[1])
memoryBar(r[0], r[1])
])

@@ -277,10 +277,10 @@ }

if (mem_min === undefined) {
mem_min = mem
mem_cnt = 0
if (memMin === undefined) {
memMin = mem
memCnt = 0
}
if (mem_max === undefined || mem > mem_max) {
mem_max = mem
mem_cnt++
if (memMax === undefined || mem > memMax) {
memMax = mem
memCnt++
} else {

@@ -290,8 +290,8 @@ // this is where the magic happens

// we will filter based on a factor
mem_ranges.push([mem_min, mem_max, mem_cnt])
memRanges.push([memMin, memMax, memCnt])
// reset
mem_min = mem
mem_max = mem
mem_cnt = 0
memMin = mem
memMax = mem
memCnt = 0
RANGE_COUNT++

@@ -307,3 +307,3 @@ }

var last_line
var lastLine

@@ -315,5 +315,5 @@ process.stdin.on('data', function (chunk) {

for (i = 0, j = chunk.indexOf('\n', i); j !== -1; i = j + 1, j = chunk.indexOf('\n', i)) {
if (last_line) {
readline(last_line + chunk.slice(i, j))
last_line = undefined
if (lastLine) {
readline(lastLine + chunk.slice(i, j))
lastLine = undefined
} else {

@@ -325,3 +325,3 @@ readline(chunk.slice(i, j))

if (chunk.length > i) {
last_line = chunk.slice(i)
lastLine = chunk.slice(i)
}

@@ -328,0 +328,0 @@ })

@@ -27,3 +27,3 @@ // *****************************************************************************

var hist = {}
var time_hist = {
var timeHist = {
'<= 1': 0,

@@ -40,4 +40,4 @@ '> 1': 0,

var start_time
var total_duration
var startTime
var totalDuration

@@ -81,10 +81,10 @@ const TABLE_CHARS = {

function parse_time_to_secs (time) {
function parseTimeToSecs (time) {
if (time !== undefined) {
var time_match = time.toString().match(/(\d+)([smh])?/)
if (time_match !== null) {
if (time_match[2] !== null) {
time = parseInt(time_match[1], 10)
var time_unit = time_match[2]
switch (time_unit) {
var timeMatch = time.toString().match(/(\d+)([smh])?/)
if (timeMatch !== null) {
if (timeMatch[2] !== null) {
time = parseInt(timeMatch[1], 10)
var timeUnit = timeMatch[2]
switch (timeUnit) {
case 'm':

@@ -103,3 +103,3 @@ time = time * 60

function time_histogram (operations) {
function timeHistogram (operations) {
operations.map(function (op) {

@@ -110,16 +110,16 @@ return duration(op[OPERATION_TIME_START], op[OPERATION_TIME_END])

if (d > 32) {
time_hist['> 32']++
timeHist['> 32']++
}
if (d > 16) {
time_hist['> 16']++
timeHist['> 16']++
} else if (d > 8) {
time_hist['> 8']++
timeHist['> 8']++
} else if (d > 4) {
time_hist['> 4']++
timeHist['> 4']++
} else if (d > 2) {
time_hist['> 2']++
timeHist['> 2']++
} else if (d > 1) {
time_hist['> 1']++
timeHist['> 1']++
} else {
time_hist['<= 1']++
timeHist['<= 1']++
}

@@ -129,7 +129,7 @@ })

function number_format (v, precision) {
function numberFormat (v, precision) {
return v.toFixed(precision || 0).toString().replace(/\B(?=(\d{3})+(?!\d))/g, ',')
}
function time_units (v) {
function timeUnits (v) {
var u = v === 1 ? 'second' : 'seconds'

@@ -147,7 +147,7 @@

return number_format(v, 2) + ' ' + u
return numberFormat(v, 2) + ' ' + u
}
function calculate_tps (transactions) {
var seconds = total_duration / 1000
function calculateTPS (transactions) {
var seconds = totalDuration / 1000
Object.keys(transactions).forEach(function (stat) {

@@ -158,3 +158,3 @@ transactions[stat]['tps'] = transactions[stat]['count'] / seconds

function status_histogram (operations) {
function statusHistogram (operations) {
operations.map(function (op) {

@@ -167,3 +167,3 @@ return op[OPERATION_STATUS]

function print_table (table, print, prefix) {
function printTable (table, print, prefix) {
table.toString().split('\n').forEach(function (l) {

@@ -176,3 +176,3 @@ if (l.length > 0) {

function print_env_table (print, prefix) {
function printEnvTable (print, prefix) {
var envTable = new Table({

@@ -186,6 +186,6 @@ chars: TABLE_CHARS,

print_table(envTable, print, prefix)
printTable(envTable, print, prefix)
}
function print_config_table (config, print, prefix) {
function printConfigTable (config, print, prefix) {
var configTable = new Table({

@@ -199,8 +199,8 @@ chars: TABLE_CHARS,

configTable.push({'processes': config.processes})
configTable.push({'time': config.time === undefined ? 'undefined' : time_units(config.time)})
configTable.push({'time': config.time === undefined ? 'undefined' : timeUnits(config.time)})
print_table(configTable, print, prefix)
printTable(configTable, print, prefix)
}
function print_transactions (transactions, print, prefix) {
function printTransactions (transactions, print, prefix) {
var thead = []

@@ -220,3 +220,3 @@ thead.push('')

var row = columns.map(function (col) {
return number_format(transactions[col]['count'], 0)
return numberFormat(transactions[col]['count'], 0)
})

@@ -226,3 +226,3 @@ table.push({'Total': row})

row = columns.map(function (col) {
return number_format(transactions[col]['tps'], 0)
return numberFormat(transactions[col]['tps'], 0)
})

@@ -232,3 +232,3 @@ table.push({'TPS': row})

row = columns.map(function (col) {
return number_format(transactions[col]['min_tps'], 0)
return numberFormat(transactions[col]['min_tps'], 0)
})

@@ -238,10 +238,10 @@ table.push({'Min TPS': row})

row = columns.map(function (col) {
return number_format(transactions[col]['max_tps'], 0)
return numberFormat(transactions[col]['max_tps'], 0)
})
table.push({'Max TPS': row})
print_table(table, print, prefix)
printTable(table, print, prefix)
}
function print_histogram (histogram, print, prefix) {
function printHistogram (histogram, print, prefix) {
var total = Object.keys(histogram).map(function (k) {

@@ -256,3 +256,3 @@ return histogram[k]

thead.push(k)
tbody.push(number_format(histogram[k] / total * 100, 1) + '%')
tbody.push(numberFormat(histogram[k] / total * 100, 1) + '%')
}

@@ -268,21 +268,21 @@

print_table(table, print, prefix)
printTable(table, print, prefix)
}
function start () {
start_time = process.hrtime()
startTime = process.hrtime()
}
function stop () {
var end_time = process.hrtime()
total_duration = duration(start_time, end_time)
var endTime = process.hrtime()
totalDuration = duration(startTime, endTime)
}
function iteration (operations) {
status_histogram(operations)
time_histogram(operations)
statusHistogram(operations)
timeHistogram(operations)
}
function aggregate_interval_stats (stat_name, tx) {
var stats = trans[stat_name] = trans[stat_name] || { count: 0, max_tps: 0, min_tps: Infinity }
function aggregateIntervalStats (statName, tx) {
var stats = trans[statName] = trans[statName] || { count: 0, max_tps: 0, min_tps: Infinity }
stats['count'] += tx

@@ -293,14 +293,14 @@ if (tx > stats['max_tps']) stats['max_tps'] = tx

function interval (interval_stats) {
var total_tx = 0
for (var stat in interval_stats) {
var tx = interval_stats[stat][0]
total_tx += tx
aggregate_interval_stats(stat, tx)
function interval (intervalStats) {
var totalTX = 0
for (var stat in intervalStats) {
var tx = intervalStats[stat][0]
totalTX += tx
aggregateIntervalStats(stat, tx)
}
aggregate_interval_stats('total', total_tx)
aggregateIntervalStats('total', totalTX)
}
function report_final (argv, print) {
calculate_tps(trans)
function reportFinal (argv, print) {
calculateTPS(trans)
if (!argv.json) {

@@ -311,15 +311,15 @@ print()

print(' Environment')
print_env_table(print)
printEnvTable(print)
print()
print(' Configuration')
print_config_table(argv, print)
printConfigTable(argv, print)
print()
print(' Transactions')
print_transactions(trans, print)
printTransactions(trans, print)
print()
print(' Durations')
print_histogram(time_hist, print)
printHistogram(timeHist, print)
print()
print(' Status Codes')
print_histogram(hist, print)
printHistogram(hist, print)
print()

@@ -337,5 +337,5 @@ } else {

},
duration: total_duration,
duration: totalDuration,
transactions: trans,
durations: time_hist,
durations: timeHist,
status_codes: hist

@@ -354,5 +354,5 @@ }

interval: interval,
print_histogram: print_histogram,
report_final: report_final,
parse_time_to_secs: parse_time_to_secs
printHistogram: printHistogram,
reportFinal: reportFinal,
parseTimeToSecs: parseTimeToSecs
}

@@ -48,3 +48,3 @@ // *****************************************************************************

argv.ttl = stats.parse_time_to_secs(argv.ttl)
argv.ttl = stats.parseTimeToSecs(argv.ttl)

@@ -145,7 +145,7 @@ // variables to track memory growth(RSS) of worker process.

case 'BYTES':
var buf_data = STRING_DATA
while (buf_data.length < bin.size) {
buf_data += STRING_DATA
var bufData = STRING_DATA
while (bufData.length < bin.size) {
bufData += STRING_DATA
}
data[bin.name] = new Buffer(buf_data)
data[bin.name] = new Buffer(bufData)
break

@@ -162,7 +162,7 @@ default:

function get (key, done) {
var time_start = process.hrtime()
var timeStart = process.hrtime()
client.get(key, function (_error, _record, _metadata, _key) {
var time_end = process.hrtime()
var timeEnd = process.hrtime()
var status = (_error && _error.code) || 0
done(status, time_start, time_end, READ)
done(status, timeStart, timeEnd, READ)
})

@@ -177,7 +177,7 @@ }

function put (options, done) {
var time_start = process.hrtime()
var timeStart = process.hrtime()
client.put(options.key, options.record, metadata, function (_error, _record, _metadata, _key) {
var time_end = process.hrtime()
var timeEnd = process.hrtime()
var status = (_error && _error.code) || 0
done(status, time_start, time_end, WRITE)
done(status, timeStart, timeEnd, WRITE)
})

@@ -187,4 +187,4 @@ }

// Structure to store per second statistics.
var interval_data = new Array(OP_TYPES)
reset_interval_data()
var intervalData = new Array(OP_TYPES)
resetIntervalData()

@@ -202,12 +202,12 @@ function run (options) {

var operations = Array(expected)
var read_ops = options.rops
var write_ops = options.wops
var readOps = options.rops
var writeOps = options.wops
function done (op_status, op_time_start, op_time_end, op_type) {
operations[completed] = [op_status, op_time_start, op_time_end]
interval_data[op_type][TPS]++
if (op_status === status.AEROSPIKE_ERR_TIMEOUT) {
interval_data[op_type][TIMEOUT]++
} else if (op_status !== status.AEROSPIKE_OK && op_status !== status.AEROSPIKE_ERR_TIMEOUT) {
interval_data[op_type][ERROR]++
function done (opStatus, opTimeStart, opTimeEnd, opType) {
operations[completed] = [opStatus, opTimeStart, opTimeEnd]
intervalData[opType][TPS]++
if (opStatus === status.AEROSPIKE_ERR_TIMEOUT) {
intervalData[opType][TIMEOUT]++
} else if (opStatus !== status.AEROSPIKE_OK && opStatus !== status.AEROSPIKE_ERR_TIMEOUT) {
intervalData[opType][ERROR]++
}

@@ -222,3 +222,3 @@

while (write_ops > 0 || read_ops > 0) {
while (writeOps > 0 || readOps > 0) {
var k = keygen(options.keyRange.min, options.keyRange.max)

@@ -228,8 +228,8 @@ var key = {ns: options.namespace, set: options.set, key: k}

var ops = {key: key, record: record}
if (write_ops > 0) {
write_ops--
if (writeOps > 0) {
writeOps--
put(ops, done)
}
if (read_ops > 0) {
read_ops--
if (readOps > 0) {
readOps--
get(key, done)

@@ -241,17 +241,17 @@ }

/*
* Sends the populated interval_data to the parent and resets it for the next second
* Sends the populated intervalData to the parent and resets it for the next second
*/
function respond () {
process.send(['trans', interval_data])
reset_interval_data()
process.send(['trans', intervalData])
resetIntervalData()
}
/*
* Reset interval_data
* Reset intervalData
*/
function reset_interval_data () {
interval_data[READ] = [0, 0, 0] // [reads_performed, reads_timeout, reads_error]
interval_data[WRITE] = [0, 0, 0] // [writes_performed, writes_timeout, writes_error]
interval_data[QUERY] = [0, 0, 0] // [QueryRecords, query_timeout, query_error]
interval_data[SCAN] = [0, 0, 0]
function resetIntervalData () {
intervalData[READ] = [0, 0, 0] // [reads_performed, reads_timeout, reads_error]
intervalData[WRITE] = [0, 0, 0] // [writes_performed, writes_timeout, writes_error]
intervalData[QUERY] = [0, 0, 0] // [QueryRecords, query_timeout, query_error]
intervalData[SCAN] = [0, 0, 0]
}

@@ -268,8 +268,8 @@

// count the records returned
interval_data[opType][TPS]++
intervalData[opType][TPS]++
})
stream.on('error', function (error) {
interval_data[opType][ERROR]++
intervalData[opType][ERROR]++
if (error.code === status.AEROSPIKE_ERR_TIMEOUT) {
interval_data[opType][TIMEOUT]++
intervalData[opType][TIMEOUT]++
}

@@ -276,0 +276,0 @@ })

# Backward Incompatible API Changes
## Version 2.1.0
### New modules for Scalar, List & Map operations
The `Aerospike.operator` module has been split into three separate modules for operations on scalar values, lists and maps:
- `Aerospike.operations` - Operations on scalar values (Strings, Integers, Doubles, etc.).
- `Aerospike.lists` - Operations on Lists, e.g. append, insert, remove.
- `Aerospike.maps` - Operations on Sorted Maps, e.g. put, getByKey, removeByIndex.
The old `Aerospike.operator` module has been deprecated and will be removed in the next major release.
### Deprecations
| Deprecated Function | Replacement | Remarks |
| ---------------------------- | ---------------------------------------------- | ------------------------------------------- |
| `Aerospike.operator.append` | `Aerospike.operations.append` | - |
| `Aerospike.operator.incr` | `Aerospike.operations.incr` | - |
| `Aerospike.operator.prepend` | `Aerospike.operations.prepend` | - |
| `Aerospike.operator.read` | `Aerospike.operations.read` | - |
| `Aerospike.operator.touch` | `Aerospike.operations.touch` | - |
| `Aerospike.operator.write` | `Aerospike.operations.write` | - |
| `Aerospike.operator.list<*>` | `Aerospike.lists.<*>` | - |
## Version 2.0.0

@@ -4,0 +28,0 @@

@@ -7,4 +7,4 @@ # Aerospike Node.js Client API

installed via npm from the [npmjs.com](https://www.npmjs.com/package/aerospike)
package repository. The source code is available at
[github.com](https://github.com/aerospike/aerospike-client-nodejs). For more
package repository. The source code is available on
[GitHub](https://github.com/aerospike/aerospike-client-nodejs). For more
information about the Aerospike high-performance NoSQL database, please refer

@@ -15,22 +15,30 @@ to [http://www.aerospike.com/](http://www.aerospike.com/).

The `aerospike` package exports the `aerospike` module, which provides a number
of submodules, classes as well as module level functions which provide a client
for Aerospike database clusters.
The `aerospike` npm package provides the `aerospike` module, which includes a
number of submodules, classes as well as module level functions which together
form the Client SDK enabling Node.js applications to connect to Aerospike
database clusters.
*Modules*
### Modules
The main modules included in the `aerospike` package are:
* [`aerospike`]{@link module:aerospike} - The aerospike module contains the
core classes that make up the Client API, such as the {@link Client}, {@link
Query} and {@link Scan} classes. It provides module level functions to
connect to an Aerospike cluster.
core classes that make up the Client API, such as the Client, Query and
Scan classes. It provides module level functions to connect to an Aerospike
cluster.
* [`aerospike.filter`]{@link module:aerospike/filter} - The filter module is a
submodule containing predicate helpers for use with the {@link Query} class.
* [`aerospike.operator`]{@link module:aerospike/operator} - The operator module provides
helper functions for the {@link Client#operate} command.
submodule containing predicate helpers for use with the Query class.
* [`aerospike.operations`]{@link module:aerospike/operations},
[`aerospike.lists`]{@link module:aerospike/lists},
[`aerospike.maps`]{@link module:aerospike/maps} - These three modules define
the operations on scalar, list and map values that can be executed with the
{@link Client#operate} command.
*Classes*
### Classes
The main classes included in the `aerospike` module are:
* {@link Client} - The main interface of the Aerospike client. Through the
Client class commands such as {@link Client#put|put}, {@link Client#get|get}
or {@link Client#query|query} can be sent to an Aerospike database cluster.
Client class commands such as put, get or query can be sent to an Aerospike
database cluster.
* {@link Query} - The Query class can be used to perform value-based searches

@@ -53,10 +61,75 @@ on secondary indexes.

## Example
The following is very simple example of how to write and read a record from Aerospike.
```js
const Aerospike = require('aerospike')
const Key = Aerospike.Key
const Double = Aerospike.Double
const GeoJSON = Aerospike.GeoJSON
const op = Aerospike.operations
const lists = Aerospike.lists
const maps = Aerospike.maps
const config = {
hosts: '192.168.33.10:3000'
}
Aerospike.connect(config, (error, client) => {
if (error) throw error
var key = new Key('test', 'demo', 'demo')
var record = {
i: 123,
s: 'hello',
b: new Buffer('world'),
d: new Double(3.1415),
g: new GeoJSON({type: 'Point', coordinates: [103.913, 1.308]}),
l: [1, 'a', {x: 'y'}],
m: {foo: 4, bar: 7}
}
var meta = { ttl: 10000 }
var policy = { exists: Aerospike.policy.exists.CREATE_OR_REPLACE }
client.put(key, record, meta, policy, (error) => {
if (error) throw error
var ops = [
op.incr('i', 1),
op.read('i'),
lists.append('l', 'z'),
maps.removeByKey('m', 'bar')
]
client.operate(key, ops, (error, result) => {
if (error) throw error
console.log(result) // => { c: 4, i: 124 }
client.get(key, (error, record, meta) => {
if (error) throw error
console.log(record) // => { i: 124,
// s: 'hello',
// b: <Buffer 77 6f 72 6c 64>,
// d: 3.1415,
// g: '{"type":"Point","coordinates":[103.913,1.308]}',
// l: [ 1, 'a', { x: 'y' }, 'z' ] },
// m: { foo: 4 }
client.close()
})
})
})
})
```
## Tutorials
The following tutorials provide more in-depth examples for specific aspects of working with the Aerospike Node.js Client SDK:
* {@tutorial getting_started}
* {@tutorial node_clusters}
## Further Documentation
For a detailed technical documentation of the Aerospike distributed, NoSQL
For a detailed technical documentation of the Aerospike distributed NoSQL
database, including an architecture overview and in-depth feature guides,
please visit <a href="http://www.aerospike.com/docs">http://www.aerospike.com/docs</a>.
{
"getting_started": {
"title": "Getting Started - Connecting to an Aerospike database cluster"
},
"node_clusters": {
"title": "Managing Aerospike connections in a Node cluster"
}
}

@@ -90,5 +90,5 @@ // *****************************************************************************

var keyv = argv._.shift()
var udf_module = argv._.shift()
var udf_function = argv._.shift()
var udf_args = argv._
var udfModule = argv._.shift()
var udfFunction = argv._.shift()
var udfArgs = argv._

@@ -107,3 +107,3 @@ if (argv.help === true) {

if (!udf_module) {
if (!udfModule) {
console.error('Error: Please provide a key for the operation')

@@ -115,3 +115,3 @@ console.error()

if (!udf_function) {
if (!udfFunction) {
console.error('Error: Please provide a key for the operation')

@@ -150,5 +150,5 @@ console.error()

var udf = {
module: udf_module,
funcname: udf_function,
args: udf_args.map(function (v) {
module: udfModule,
funcname: udfFunction,
args: udfArgs.map(function (v) {
try {

@@ -155,0 +155,0 @@ return JSON.parse(v)

@@ -26,3 +26,3 @@ // *****************************************************************************

var Operator = Aerospike.operator
const operations = Aerospike.operations

@@ -133,7 +133,7 @@ // *****************************************************************************

var ops = [
Operator.touch(1000),
Operator.incr('i', 1),
Operator.write('s', 'some_val'),
Operator.read('i'),
Operator.read('s')
operations.touch(1000),
operations.incr('i', 1),
operations.write('s', 'some_val'),
operations.read('i'),
operations.read('s')
]

@@ -140,0 +140,0 @@

@@ -109,6 +109,6 @@ // *****************************************************************************

var g_nkeys = 20
var g_index = 'points-loc-index'
var numberOfRecords = 20
var geoIndex = 'points-loc-index'
function execute_query (client, done) {
function executeQuery (client, done) {
var count = 0

@@ -141,5 +141,5 @@ var region = GeoJSON.Polygon([-122.500000, 37.000000], [-121.000000, 37.000000],

function insert_records (client, ndx, end, done) {
function insertRecords (client, ndx, end, done) {
if (ndx >= end) {
return execute_query(client, done)
return executeQuery(client, done)
}

@@ -153,7 +153,7 @@

if (err) throw err
insert_records(client, ndx + 1, end, done)
insertRecords(client, ndx + 1, end, done)
})
}
function create_index (client, done) {
function createIndex (client, done) {
var options = {

@@ -163,3 +163,3 @@ ns: argv.namespace,

bin: 'loc',
index: g_index
index: geoIndex
}

@@ -170,3 +170,3 @@ client.createGeo2DSphereIndex(options, function (err, job) {

if (err) throw err
insert_records(client, 0, g_nkeys, done)
insertRecords(client, 0, numberOfRecords, done)
})

@@ -176,4 +176,4 @@ })

function remove_index (client, done) {
client.indexRemove(argv.namespace, g_index, function (err) {
function removeIndex (client, done) {
client.indexRemove(argv.namespace, geoIndex, function (err) {
if (err) throw err

@@ -184,5 +184,5 @@ done(client)

function remove_records (client, ndx, end, done) {
function removeRecords (client, ndx, end, done) {
if (ndx >= end) {
return remove_index(client, done)
return removeIndex(client, done)
}

@@ -194,3 +194,3 @@

if (err && err.code !== status.AEROSPIKE_ERR_RECORD_NOT_FOUND) throw err
remove_records(client, ndx + 1, end, done)
removeRecords(client, ndx + 1, end, done)
})

@@ -200,3 +200,3 @@ }

function cleanup (client, done) {
remove_records(client, 0, g_nkeys, done)
removeRecords(client, 0, numberOfRecords, done)
}

@@ -206,5 +206,5 @@

if (err) throw err
create_index(client, function () {
createIndex(client, function () {
client.close()
})
})

@@ -144,6 +144,6 @@ // *****************************************************************************

const max_concurrent = 200
var in_flight = 0
const maxConcurrent = 200
var inFlight = 0
function exists_done (client, start, end, skip) {
function existsDone (client, start, end, skip) {
var total = end - start + 1

@@ -160,3 +160,3 @@ var done = 0

return function (err, metadata, key, skippy) {
in_flight--
inFlight--
if (skippy === true) {

@@ -192,4 +192,4 @@ console.log('SKIP - ', key)

function exists_start (client, start, end, skip) {
var done = exists_done(client, start, end, skip)
function existsStart (client, start, end, skip) {
var done = existsDone(client, start, end, skip)
var i = start

@@ -207,4 +207,4 @@ var s = 0

in_flight++
deasync.loopWhile(function () { return in_flight > max_concurrent })
inFlight++
deasync.loopWhile(function () { return inFlight > maxConcurrent })
client.exists(key, done)

@@ -214,3 +214,3 @@ }

exists_start(client, argv.start, argv.end, argv.skip)
existsStart(client, argv.start, argv.end, argv.skip)
})

@@ -146,6 +146,6 @@ // *****************************************************************************

const max_concurrent = 200
var in_flight = 0
const maxConcurrent = 200
var inFlight = 0
function get_done (client, start, end, skip) {
function getDone (client, start, end, skip) {
var total = end - start + 1

@@ -162,3 +162,3 @@ var done = 0

return function (err, record, metadata, key, skippy) {
in_flight--
inFlight--
if (skippy === true) {

@@ -194,4 +194,4 @@ console.log('SKIP - ', key)

function get_start (client, start, end, skip) {
var done = get_done(client, start, end, skip)
function getStart (client, start, end, skip) {
var done = getDone(client, start, end, skip)
var i = start

@@ -209,4 +209,4 @@ var s = 0

in_flight++
deasync.loopWhile(function () { return in_flight > max_concurrent })
inFlight++
deasync.loopWhile(function () { return inFlight > maxConcurrent })
client.get(key, done)

@@ -216,3 +216,3 @@ }

get_start(client, argv.start, argv.end, argv.skip)
getStart(client, argv.start, argv.end, argv.skip)
})

@@ -144,6 +144,6 @@ // *****************************************************************************

const max_concurrent = 200
var in_flight = 0
const maxConcurrent = 200
var inFlight = 0
function put_done (client, start, end, skip) {
function putDone (client, start, end, skip) {
var total = end - start + 1

@@ -159,3 +159,3 @@ var done = 0

return function (err, key, skippy) {
in_flight--
inFlight--
if (skippy === true) {

@@ -184,4 +184,4 @@ console.log('SKIP - ', key)

function put_start (client, start, end, skip) {
var done = put_done(client, start, end, skip)
function putStart (client, start, end, skip) {
var done = putDone(client, start, end, skip)
var i = start

@@ -211,4 +211,4 @@ var s = 0

in_flight++
deasync.loopWhile(function () { return in_flight > max_concurrent })
inFlight++
deasync.loopWhile(function () { return inFlight > maxConcurrent })
client.put(key, record, metadata, done)

@@ -218,3 +218,3 @@ }

put_start(client, argv.start, argv.end, argv.skip)
putStart(client, argv.start, argv.end, argv.skip)
})

@@ -146,6 +146,6 @@ // *****************************************************************************

const max_concurrent = 200
var in_flight = 0
const maxConcurrent = 200
var inFlight = 0
function remove_done (client, start, end, skip) {
function removeDone (client, start, end, skip) {
var total = end - start + 1

@@ -162,3 +162,3 @@ var done = 0

return function (err, key, skippy) {
in_flight--
inFlight--
if (skippy === true) {

@@ -194,4 +194,4 @@ console.log('SKIP - ', key)

function remove_start (client, start, end, skip) {
var done = remove_done(client, start, end, skip)
function removeStart (client, start, end, skip) {
var done = removeDone(client, start, end, skip)
var i = start

@@ -209,4 +209,4 @@ var s = 0

in_flight++
deasync.loopWhile(function () { return in_flight > max_concurrent })
inFlight++
deasync.loopWhile(function () { return inFlight > maxConcurrent })
client.remove(key, done)

@@ -216,3 +216,3 @@ }

remove_start(client, argv.start, argv.end, argv.skip)
removeStart(client, argv.start, argv.end, argv.skip)
})

@@ -140,6 +140,6 @@ // *****************************************************************************

const max_concurrent = 200
var in_flight = 0
const maxConcurrent = 200
var inFlight = 0
function put_done (client, start, end) {
function putDone (client, start, end) {
var total = end - start + 1

@@ -152,3 +152,3 @@ var done = 0

return function (err, key) {
in_flight--
inFlight--
if (err) {

@@ -164,3 +164,3 @@ console.log('ERR - ', err, key)

console.log()
get_start(client, start, end)
getStart(client, start, end)
}

@@ -170,4 +170,4 @@ }

function put_start (client, start, end) {
var done = put_done(client, start, end)
function putStart (client, start, end) {
var done = putDone(client, start, end)
var i = 0

@@ -188,4 +188,4 @@

in_flight++
deasync.loopWhile(function () { return in_flight > max_concurrent })
inFlight++
deasync.loopWhile(function () { return inFlight > maxConcurrent })
client.put(key, record, metadata, done)

@@ -195,3 +195,3 @@ }

function get_done (client, start, end) {
function getDone (client, start, end) {
var total = end - start + 1

@@ -204,3 +204,3 @@ var done = 0

return function (err, record, metadata, key) {
in_flight--
inFlight--
done++

@@ -232,4 +232,4 @@ if (err) {

function get_start (client, start, end) {
var done = get_done(client, start, end)
function getStart (client, start, end) {
var done = getDone(client, start, end)
var i = 0

@@ -239,4 +239,4 @@

var key = new Aerospike.Key(argv.namespace, argv.set, i)
in_flight++
deasync.loopWhile(function () { return in_flight > max_concurrent })
inFlight++
deasync.loopWhile(function () { return inFlight > maxConcurrent })
client.get(key, done)

@@ -246,3 +246,3 @@ }

put_start(client, argv.start, argv.end)
putStart(client, argv.start, argv.end)
})

@@ -24,2 +24,3 @@ // *****************************************************************************

const yargs = require('yargs')
const path = require('path')
const iteration = require('./iteration')

@@ -133,3 +134,3 @@

if (err) throw err
client.udfRegisterWait(file, 1000, function (err) {
client.udfRegisterWait(path.basename(file), 1000, function (err) {
if (err) throw err

@@ -136,0 +137,0 @@ !argv.quiet && console.log('UDF Registration Successful - %s', file)

@@ -0,1 +1,24 @@

v2.1.0 / 2016-07-03
===================
* **New Features**
* Support for operations on Sorted Maps. Requires Aerospike server version 3.8.4 or later.
* **Improvements**
* Key objects returned in callbacks now include the digest
* Code cleanup to support standard@7.0.0 which adds several new rules
* **Fixes**
* Fix compile time error with Node 0.12 using gcc 4.4. [#131](https://github.com/aerospike/aerospike-client-nodejs/issues/131)
* **Changes**
* The `aerospike.operator` module has been split up into two seperate modules `aerospike.operations` and `aerospike.lists` for operations on scalar and
list data types respectively. See detailed list of [API changes](https://github.com/aerospike/aerospike-client-nodejs/blob/master/docs/api-changes.md#version-210)
for further details.
* **Documentation**
* Pulled client configuration out into a separate class and expanded the documentation.
* Documented `sharedMemory` configuration.
* Added tutorial for using Aerospike client in Node.js cluster setup.
v2.0.4 / 2016-05-09

@@ -2,0 +25,0 @@ ===================

@@ -114,7 +114,15 @@ // *****************************************************************************

const Key = require('./key')
const asEventLoop = require('./event_loop')
const filter = require('./filter')
const info = require('./info')
const operator = require('./operator')
const asEventLoop = require('./event_loop')
const lists = require('./lists')
const maps = require('./maps')
const operations = require('./operations')
const utils = require('./utils')
// copy maps related enums into maps module
Object.keys(as.maps).forEach(function (key) {
maps[key] = as.maps[key]
})
/**

@@ -125,6 +133,5 @@ * @function module:aerospike.client

*
* @param {Client~Config} config - The configuration for the client.
* @param {Config} config - The configuration for the client.
*/
function client (config) {
config = config || {}
return new Client(config)

@@ -138,3 +145,3 @@ }

*
* @param {Client~Config} [config] - The configuration for the client.
* @param {Config} [config] - The configuration for the client.
* @param {Client~connectCallback} callback - The funcation to call, once the client is connected to the cluster successfully.

@@ -183,2 +190,3 @@ */

this.key = key
this.print = utils.print

@@ -189,9 +197,36 @@ this.releaseEventLoop = asEventLoop.releaseEventLoop

this.filter = filter
this.operator = operator
this.info = info
this.lists = lists
this.maps = maps
this.operations = operations
// Include both scalar and list operations in the Aerospike.operator
// namespace for backwards compatibility. Aerospike.operator is deprecated
// and will be removed in the next major release.
var operator = {}
Object.keys(operations).forEach(function (key) {
operator[key] = operations[key]
})
Object.keys(lists).forEach(function (key) {
var listKey = 'list' + key.substr(0, 1).toUpperCase() + key.substr(1)
operator[listKey] = lists[key]
})
this.operator = operator
// enums imported from C client library
/**
* Enumeration of UDF types.
*
* @member {Object} language
* @readonly
* @static
*
* @property LUA - Lua (only supported UDF type at the moment)
*/
this.language = as.language
/**
* Enumeration of log levels
*
* @member {Object} log

@@ -201,4 +236,2 @@ * @readonly

*
* Enumeration of log levels
*
* @example

@@ -222,7 +255,7 @@ *

*
* @property {number} OFF
* @property {number} ERROR
* @property {number} WARN
* @property {number} DEBUG
* @property {number} DETAIL
* @property OFF
* @property ERROR
* @property WARN
* @property DEBUG
* @property DETAIL
*/

@@ -232,2 +265,4 @@ this.log = as.log

/**
* Enumeration of policy values.
*
* @member {Object} policy

@@ -237,4 +272,2 @@ * @readonly

*
* Enumeration of policy values.
*
* @example

@@ -277,5 +310,4 @@ *

*
* @property {number} retry.NONE - Only attempt an operation once.
*
* @property {number} retry.ONCE - If an operation fails, attempt the operation one
* @property retry.NONE - Only attempt an operation once.
* @property retry.ONCE - If an operation fails, attempt the operation one
* more time.

@@ -286,26 +318,24 @@ *

*
* @property {number} gen.IGNORE - Write a record, regardless of generation.
* @property gen.IGNORE - Write a record, regardless of generation.
* @property gen.EQ - Write a record, ONLY if generations are equal.
* @propery gen.GT - Write a record, ONLY if local generation is greater than
* remote generation.
*
* @property {number} gen.EQ - Write a record, ONLY if generations are equal.
*
* @propery {number} gen.GT - Write a record, ONLY if local generation is
* greater than remote generation.
*
* @property {object} key - Key Policy - Specifies the behavior for whether
* keys or digests should be sent to the cluster.
*
* @property {number} key.DIGEST - Send the digest value of the key. This is
* the recommended mode of operation. This calculates the digest and sends
* the digest to the server. The digest is only calculated on the client, and
* not the server.
* @property key.DIGEST - Send the digest value of the key. This is the
* recommended mode of operation. This calculates the digest and sends the
* digest to the server. The digest is only calculated on the client, and not
* the server.
*
* @property {number} key.SEND - Send the key, in addition to the digest
* value. If you want keys to be returned when scanning or querying, the keys
* must be stored on the server. This policy causes a write operation to
* store the key. Once the key is stored, the server will keep it - there is
* no need to use this policy on subsequent updates of the record. If this
* policy is used on read or delete operations, or on subsequent updates of a
* record with a stored key, the key sent will be compared with the key
* stored on the server. A mismatch will cause
* AEROSPIKE_ERR_RECORD_KEY_MISMATCH to be returned.
* @property key.SEND - Send the key, in addition to the digest value. If you
* want keys to be returned when scanning or querying, the keys must be
* stored on the server. This policy causes a write operation to store the
* key. Once the key is stored, the server will keep it - there is no need to
* use this policy on subsequent updates of the record. If this policy is
* used on read or delete operations, or on subsequent updates of a record
* with a stored key, the key sent will be compared with the key stored on
* the server. A mismatch will cause
* <code>AEROSPIKE_ERR_RECORD_KEY_MISMATCH</code> to be returned.
*

@@ -315,23 +345,15 @@ * @property {object} exists - Existence Policy - Specifies the behavior for

*
* @property {number} exists.IGNORE - Write the record, regardless of
* existence. (I.e. create or update.)
* @property exists.IGNORE - Write the record, regardless of existence.
* (I.e. create or update.)
* @property exists.CREATE - Create a record, ONLY if it doesn't exist.
* @property exists.UPDATE - Update a record, ONLY if it exists.
* @property exists.REPLACE - Completely replace a record, ONLY if it exists.
* @property exists.CREATE_OR_REPLACE - Completely replace a record if it
* exists, otherwise create it.
*
* @property {number} exists.CREATE - Create a record, ONLY if it doesn't
* exist.
*
* @property {number} exists.UPDATE - Update a record, ONLY if it exists.
*
* @property {number} exists.REPLACE - Completely replace a record, ONLY if
* it exists.
*
* @property {number} exists.CREATE_OR_REPLACE - Completely replace a record
* if it exists, otherwise create it.
*
* @property {object} replica - Specifies which partition replica to read from.
*
* @property {number} replica.MASTER - Read from the partition master replica
* node.
* @property replica.MASTER - Read from the partition master replica node.
* @property replica.ANY - Read from an unspecified replica node.
*
* @property {number} replica.ANY - Read from an unspecified replica node.
*
* @property {object} consistencyLevel - Specifies the number of replicas to

@@ -341,8 +363,6 @@ * be consulted in a read operation to provide the desired consistency

*
* @property {number} consistencyLevel.ONE - Involve a single replica in the
* @property consistencyLevel.ONE - Involve a single replica in the
* operation.
* @property consistencyLevel.ALL - Involve all replicas in the operation.
*
* @property {number} consistencyLevel.ALL - Involve all replicas in the
* operation.
*
* @property {object} commitLevel - Specifies the number of replicas required

@@ -352,6 +372,5 @@ * to be successfully committed before returning success in a write operation

*
* @property {number} commitLevel.ALL - Return success only after
* successfully committing all replicas.
*
* @property {number} commitLevel.MASTER - Return success after successfully
* @property commitLevel.ALL - Return success only after successfully
* committing all replicas.
* @property commitLevel.MASTER - Return success after successfully
* committing the master replica.

@@ -368,2 +387,4 @@ */

/**
* Enumeration of job status codes.
*
* @member {Object} jobStatus

@@ -373,11 +394,8 @@ * @readonly

*
* @description Enumeration of job status codes.
*
* @see {@link Job#infoCallback} returns the job status.
*
* @property {number} UNDEF - The job status is undefined. This is likely due to the status not being properly checked.
*
* @property {number} INPROGRESS - The job is currently running.
*
* @property {number} COMPLETED - The job completed successfully.
* @property UNDEF - The job status is undefined. This is likely due to the
* status not being properly checked.
* @property INPROGRESS - The job is currently running.
* @property COMPLETED - The job completed successfully.
*/

@@ -387,2 +405,4 @@ this.jobStatus = as.jobStatus

/**
* Enumeration of priority levels for a scan operation.
*
* @member {Object} scanPriority

@@ -392,13 +412,9 @@ * @readonly

*
* @description Enumeration of priority levels for a scan operation.
*
* @see {@link Scan#priority}
*
* @property {number} AUTO - The cluster will auto adjust the scan priority.
*
* @property {number} LOW - Low scan priority.
*
* @property {number} MEDIUM - Medium scan priority.
*
* @property {number} HIGH - High scan priority.
* @property AUTO - The cluster will auto adjust the scan priority.
* @property LOW - Low scan priority.
* @property MEDIUM - Medium scan priority.
* @property HIGH - High scan priority.
*/

@@ -408,2 +424,4 @@ this.scanPriority = as.scanPriority

/**
* Enumeration of secondary index data types.
*
* @member {Object} indexDataType

@@ -413,10 +431,6 @@ * @readonly

*
* @description Enumeration of secondary index data types.
* @property STRING - Values contained in the secondary index are strings.
* @property NUMERIC - Values contained in the secondary index are integers.
* @property GEO2DSPHERE - Values contained in the secondary index are GeoJSON values (points or polygons).
*
* @property {number} STRING - Values contained in the secondary index are strings.
*
* @property {number} NUMERIC - Values contained in the secondary index are integers.
*
* @property {number} GEO2DSPHERE - Values contained in the secondary index are GeoJSON values (points or polygons)..
*
* @see {@link Client#createIndex}

@@ -427,2 +441,4 @@ */

/**
* Enumeration of secondary index types.
*
* @member {Object} indexType

@@ -432,15 +448,13 @@ * @readonly

*
* @description Enumeration of secondary index types.
* @property DEFAULT - Default secondary index type for bins containing scalar values (i.e. integer, string).
*
* @property {number} DEFAULT - Default secondary index type for bins containing scalar values (i.e. integer, string).
*
* @property {number} LIST - Secondary index for bins containing
* @property LIST - Secondary index for bins containing
* <a href="http://www.aerospike.com/docs/guide/cdt-list.html" title="Aerospike List Data Type">&uArr;Lists</a>;
* the index will be build over the individual entries of the list.
*
* @property {number} MAPKEYS - Secondary index for bins containing
* @property MAPKEYS - Secondary index for bins containing
* <a href="http://www.aerospike.com/docs/guide/cdt-map.html" title="Aerospike Maps Data Type">&uArr;Maps</a>;
* the index will be build over the individual keys of the map entries.
*
* @property {number} MAPVALUES - Secondary index for bins containing
* @property MAPVALUES - Secondary index for bins containing
* <a href="http://www.aerospike.com/docs/guide/cdt-map.html" title="Aerospike Maps Data Type">&uArr;Maps</a>;

@@ -454,2 +468,4 @@ * the index will be build over the individual values of the map entries.

/**
* Enumeration of error status codes.
*
* @member {Object} status

@@ -459,301 +475,108 @@ * @readonly

*
* @description Enumeration of error status codes.
* @property AEROSPIKE_ERR_INVALID_NODE - Node invalid or could not be found.
* @property AEROSPIKE_ERR_NO_MORE_CONNECTIONS - Asynchronous connection error.
* @property AEROSPIKE_ERR_ASYNC_CONNECTION - Asynchronous connection error.
* @property AEROSPIKE_ERR_CLIENT_ABORT - Query or scan was aborted in user's callback.
* @property AEROSPIKE_ERR_INVALID_HOST - Host name could not be found in DNS lookup.
* @property AEROSPIKE_NO_MORE_RECORDS - No more records available when parsing batch, scan or query records.
* @property AEROSPIKE_ERR_PARAM - Invalid client API parameter.
* @property AEROSPIKE_ERR_CLIENT - Generic client API usage error.
* @property AEROSPIKE_ERR - Generic client error (deprecated).
* @property AEROSPIKE_OK - Generic success.
* @property AEROSPIKE_ERR_SERVER - Generic error returned by the server.
* @property AEROSPIKE_ERR_RECORD_NOT_FOUND - Record does not exist in database. May be returned by read, or write with policy <code>exists: Aerospike.policy.exists.UPDATE</code>
* @property AEROSPIKE_ERR_RECORD_GENERATION - Generation of record in database does not satisfy write policy.
* @property AEROSPIKE_ERR_REQUEST_INVALID - Request protocol invalid, or invalid protocol field.
* @property AEROSPIKE_ERR_RECORD_EXISTS - Record already exists. May be returned by write with policy <code>exists: Aerospike.policy.exists.CREATE</code>.
* @property AEROSPIKE_ERR_BIN_EXISTS - Bin already exists.
* @property AEROSPIKE_ERR_CLUSTER_CHANGE - A cluster state change occurred during the request.
* @property AEROSPIKE_ERR_SERVER_FULL - The server node is running out of memory and/or storage device space reserved for the specified namespace.
* @property AEROSPIKE_ERR_TIMEOUT - Request timed out. Can be triggered by client or server.
* @property AEROSPIKE_ERR_NO_XDR - XDR not available for the cluster.
* @property AEROSPIKE_ERR_CLUSTER - Generic cluster discovery & connection error.
* @property AEROSPIKE_ERR_BIN_INCOMPATIBLE_TYPE - Bin modification operation cannot be done on an existing bin due to its value type.
* @property AEROSPIKE_ERR_RECORD_TOO_BIG - Record being (re-)written cannot fit in a storage write block.
* @property AEROSPIKE_ERR_RECORD_BUSY - Too many concurrent requests for one record - a "hot key" situation.
* @property AEROSPIKE_ERR_SCAN_ABORTED - Scan aborted by user.
* @property AEROSPIKE_ERR_UNSUPPORTED_FEATURE - Sometimes our doc, or our customers' wishes, get ahead of us. We may have processed something that the server is not ready for (unsupported feature).
* @property AEROSPIKE_ERR_BIN_NOT_FOUND - Bin-level replace-only supported on server but not on client.
* @property AEROSPIKE_ERR_DEVICE_OVERLOAD - The server node's storage device(s) can't keep up with the write load.
* @property AEROSPIKE_ERR_RECORD_KEY_MISMATCH - Record key sent with transaction did not match key stored on server.
* @property AEROSPIKE_ERR_NAMESPACE_NOT_FOUND - Namespace in request not found on server.
* @property AEROSPIKE_ERR_BIN_NAME - Sent too-long bin name or exceeded namespace's bin name quota.
* @property AEROSPIKE_ERR_FAIL_FORBIDDEN - Operation not allowed at this time.
* @property AEROSPIKE_QUERY_END - There are no more records left for query.
* @property AEROSPIKE_SECURITY_NOT_SUPPORTED - Security functionality not supported by connected server.
* @property AEROSPIKE_SECURITY_NOT_ENABLED - Security functionality not enabled by connected server.
* @property AEROSPIKE_SECURITY_SCHEME_NOT_SUPPORTED - Security type not supported by connected server.
* @property AEROSPIKE_INVALID_COMMAND - Administration command is invalid.
* @property AEROSPIKE_INVALID_FIELD - Administration field is invalid.
* @property AEROSPIKE_ILLEGAL_STATE - Security protocol not followed.
* @property AEROSPIKE_INVALID_USER - User name is invalid.
* @property AEROSPIKE_USER_ALREADY_EXISTS - User was previously created.
* @property AEROSPIKE_INVALID_PASSWORD - Password is invalid.
* @property AEROSPIKE_EXPIRED_PASSWORD - Password has expired.
* @property AEROSPIKE_FORBIDDEN_PASSWORD - Forbidden password (e.g. recently used).
* @property AEROSPIKE_INVALID_CREDENTIAL - Security credential is invalid.
* @property AEROSPIKE_INVALID_ROLE - Role name is invalid.
* @property AEROSPIKE_ROLE_ALREADY_EXISTS - Role name already exists.
* @property AEROSPIKE_INVALID_PRIVILEGE - Privilege is invalid.
* @property AEROSPIKE_NOT_AUTHENTICATED - User must be authenticated before performing database operations.
* @property AEROSPIKE_ROLE_VIOLATION - User does not possess the required role to perform the database operation.
* @property AEROSPIKE_ERR_UDF - Generic UDF error.
* @property AEROSPIKE_ERR_LARGE_ITEM_NOT_FOUND - The requested item in a large collection was not found.
* @property AEROSPIKE_ERR_BATCH_DISABLED - Batch functionality has been disabled.
* @property AEROSPIKE_ERR_BATCH_MAX_REQUESTS_EXCEEDED - Batch max. requests have been exceeded.
* @property AEROSPIKE_ERR_BATCH_QUEUES_FULL - All batch queues are full.
* @property AEROSPIKE_ERR_GEO_INVALID_GEOJSON - Invalid/unsupported GeoJSON.
* @property AEROSPIKE_ERR_INDEX_FOUND - Index found.
* @property AEROSPIKE_ERR_INDEX_NOT_FOUND - Index not found.
* @property AEROSPIKE_ERR_INDEX_OOM - Index is out of memory.
* @property AEROSPIKE_ERR_INDEX_NOT_READABLE - Unable to read the index.
* @property AEROSPIKE_ERR_INDEX - Generic secondary index error.
* @property AEROSPIKE_ERR_INDEX_NAME_MAXLEN - Index name is too long.
* @property AEROSPIKE_ERR_INDEX_MAXCOUNT - System alrady has maximum allowed indeces.
* @property AEROSPIKE_ERR_QUERY_ABORTED - Query was aborted.
* @property AEROSPIKE_ERR_QUERY_QUEUE_FULL - Query processing queue is full.
* @property AEROSPIKE_ERR_QUERY_TIMEOUT - Secondary index query timed out on server.
* @property AEROSPIKE_ERR_QUERY - Generic query error.
* @property AEROSPIKE_ERR_UDF_NOT_FOUND - UDF does not exist.
* @property AEROSPIKE_ERR_LUA_FILE_NOT_FOUND - LUA file does not exist.
* @property AEROSPIKE_ERR_LDT_INTERNAL - Internal LDT error.
* @property AEROSPIKE_ERR_LDT_NOT_FOUND - LDT item not found.
* @property AEROSPIKE_ERR_LDT_UNIQUE_KEY - Unique key violation: Duplicated item inserted when 'unique key' was set.
* @property AEROSPIKE_ERR_LDT_INSERT - General error during insert operation.
* @property AEROSPIKE_ERR_LDT_SEARCH - General error during search operation.
* @property AEROSPIKE_ERR_LDT_DELETE - General error during delete operation.
* @property AEROSPIKE_ERR_LDT_INPUT_PARM - General input parameter error.
* @property AEROSPIKE_ERR_LDT_TYPE_MISMATCH - LDT type mismatch for this bin.
* @property AEROSPIKE_ERR_LDT_NULL_BIN_NAME - The supplied LDT bin name is null.
* @property AEROSPIKE_ERR_LDT_BIN_NAME_NOT_STRING - The supplied LDT bin name must be a string.
* @property AEROSPIKE_ERR_LDT_BIN_NAME_TOO_LONG - The supplied LDT bin name exceeded the 14 char limit.
* @property AEROSPIKE_ERR_LDT_TOO_MANY_OPEN_SUBRECS - Internal Error: too many open records at one time.
* @property AEROSPIKE_ERR_LDT_TOP_REC_NOT_FOUND - Internal Error: Top Record not found.
* @property AEROSPIKE_ERR_LDT_SUB_REC_NOT_FOUND - Internal Error: Sub Record not found.
* @property AEROSPIKE_ERR_LDT_BIN_DOES_NOT_EXIST - LDT Bin does not exist.
* @property AEROSPIKE_ERR_LDT_BIN_ALREADY_EXISTS - Collision: LDT Bin already exists.
* @property AEROSPIKE_ERR_LDT_BIN_DAMAGED - LDT control structures in the Top Record are damanged. Cannot proceed.
* @property AEROSPIKE_ERR_LDT_SUBREC_POOL_DAMAGED - Internal Error: LDT Subrecord pool is damanged.
* @property AEROSPIKE_ERR_LDT_SUBREC_DAMAGED - LDT control structure in the Sub Record are damaged. Cannot proceed.
* @property AEROSPIKE_ERR_LDT_SUBREC_OPEN - Error encountered while opening a Sub Record.
* @property AEROSPIKE_ERR_LDT_SUBREC_UPDATE - Error encountered while updating a Sub Record.
* @property AEROSPIKE_ERR_LDT_SUBREC_CREATE - Error encountered while creating a Sub Record.
* @property AEROSPIKE_ERR_LDT_SUBREC_DELETE - Error encountered while deleting a Sub Record.
* @property AEROSPIKE_ERR_LDT_SUBREC_CLOSE - Error encountered while closing a Sub Record.
* @property AEROSPIKE_ERR_LDT_TOPREC_UPDATE - Error encountered while updating a TOP Record.
* @property AEROSPIKE_ERR_LDT_TOPREC_CREATE - Error encountered while creating a TOP Record.
* @property AEROSPIKE_ERR_LDT_FILTER_FUNCTION_BAD - The filter function name was invalid.
* @property AEROSPIKE_ERR_LDT_FILTER_FUNCTION_NOT_FOUND - The filter function was not found.
* @property AEROSPIKE_ERR_LDT_KEY_FUNCTION_BAD - The function to extract the Unique Value from a complex object was invalid.
* @property AEROSPIKE_ERR_LDT_KEY_FUNCTION_NOT_FOUND - The function to extract the Unique Value from a complex object was not found.
* @property AEROSPIKE_ERR_LDT_TRANS_FUNCTION_BAD - The function to transform an object into a binary form was invalid.
* @property AEROSPIKE_ERR_LDT_TRANS_FUNCTION_NOT_FOUND - The function to transform an object into a binary form was not found.
* @property AEROSPIKE_ERR_LDT_UNTRANS_FUNCTION_BAD - The function to untransform an object from binary form to live form was invalid.
* @property AEROSPIKE_ERR_LDT_UNTRANS_FUNCTION_NOT_FOUND - The function to untransform an object from binary form to live form was not found.
* @property AEROSPIKE_ERR_LDT_USER_MODULE_BAD - The UDF user module name for LDT Overrides was invalid.
* @property AEROSPIKE_ERR_LDT_USER_MODULE_NOT_FOUND - The UDF user module name for LDT Overrides was not found.
*
* @property {number} AEROSPIKE_ERR_INVALID_NODE - Node invalid or could not
* be found.
*
* @property {number} AEROSPIKE_ERR_NO_MORE_CONNECTIONS - Asynchronous
* connection error.
*
* @property {number} AEROSPIKE_ERR_ASYNC_CONNECTION - Asynchronous
* connection error.
*
* @property {number} AEROSPIKE_ERR_CLIENT_ABORT - Query or scan was aborted
* in user's callback.
*
* @property {number} AEROSPIKE_ERR_INVALID_HOST - Host name could not be
* found in DNS lookup.
*
* @property {number} AEROSPIKE_NO_MORE_RECORDS - No more records available
* when parsing batch, scan or query records.
*
* @property {number} AEROSPIKE_ERR_PARAM - Invalid client API parameter.
*
* @property {number} AEROSPIKE_ERR_CLIENT - Generic client API usage error.
*
* @property {number} AEROSPIKE_ERR - Generic client error (deprecated)
*
* @property {number} AEROSPIKE_OK - Generic success.
*
* @property {number} AEROSPIKE_ERR_SERVER - Generic error returned by the
* server.
*
* @property {number} AEROSPIKE_ERR_RECORD_NOT_FOUND - Record does not exist
* in database. May be returned by read, or write with policy
* `{ exists: Aerospike.policy.exists.UPDATE }`
*
* @property {number} AEROSPIKE_ERR_RECORD_GENERATION - Generation of record
* in database does not satisfy write policy.
*
* @property {number} AEROSPIKE_ERR_REQUEST_INVALID - Request protocol
* invalid, or invalid protocol field.
*
* @property {number} AEROSPIKE_ERR_RECORD_EXISTS - Record already exists.
* May be returned by write with policy
* `{ exists: Aerospike.policy.exists.CREATE }`.
*
* @property {number} AEROSPIKE_ERR_BIN_EXISTS - Bin already exists.
*
* @property {number} AEROSPIKE_ERR_CLUSTER_CHANGE - A cluster state change
* occurred during the request.
*
* @property {number} AEROSPIKE_ERR_SERVER_FULL - The server node is running
* out of memory and/or storage device space reserved for the specified
* namespace.
*
* @property {number} AEROSPIKE_ERR_TIMEOUT - Request timed out. Can be
* triggered by client or server.
*
* @property {number} AEROSPIKE_ERR_NO_XDR - XDR not available for the
* cluster.
*
* @property {number} AEROSPIKE_ERR_CLUSTER - Generic cluster discovery &
* connection error.
*
* @property {number} AEROSPIKE_ERR_BIN_INCOMPATIBLE_TYPE - Bin modification
* operation cannot be done on an existing bin due to its value type.
*
* @property {number} AEROSPIKE_ERR_RECORD_TOO_BIG - Record being
* (re-)written cannot fit in a storage write block.
*
* @property {number} AEROSPIKE_ERR_RECORD_BUSY - Too many concurrent
* requests for one record - a "hot key" situation.
*
* @property {number} AEROSPIKE_ERR_SCAN_ABORTED - Scan aborted by user.
*
* @property {number} AEROSPIKE_ERR_UNSUPPORTED_FEATURE - Sometimes our doc,
* or our customers' wishes, get ahead of us. We may have processed something
* that the server is not ready for (unsupported feature).
*
* @property {number} AEROSPIKE_ERR_BIN_NOT_FOUND - Bin-level replace-only
* supported on server but not on client.
*
* @property {number} AEROSPIKE_ERR_DEVICE_OVERLOAD - The server node's
* storage device(s) can't keep up with the write load.
*
* @property {number} AEROSPIKE_ERR_RECORD_KEY_MISMATCH - Record key sent
* with transaction did not match key stored on server.
*
* @property {number} AEROSPIKE_ERR_NAMESPACE_NOT_FOUND - Namespace in
* request not found on server.
*
* @property {number} AEROSPIKE_ERR_BIN_NAME - Sent too-long bin name (should
* be impossible in this client) or exceeded namespace's bin name quota.
*
* @property {number} AEROSPIKE_ERR_FAIL_FORBIDDEN - Operation not allowed at
* this time.
*
* @property {number} AEROSPIKE_QUERY_END - There are no more records left
* for query.
*
* @property {number} AEROSPIKE_SECURITY_NOT_SUPPORTED - Security
* functionality not supported by connected server.
*
* @property {number} AEROSPIKE_SECURITY_NOT_ENABLED - Security functionality
* not enabled by connected server.
*
* @property {number} AEROSPIKE_SECURITY_SCHEME_NOT_SUPPORTED - Security type
* not supported by connected server.
*
* @property {number} AEROSPIKE_INVALID_COMMAND - Administration command is
* invalid.
*
* @property {number} AEROSPIKE_INVALID_FIELD - Administration field is
* invalid.
*
* @property {number} AEROSPIKE_ILLEGAL_STATE - Security protocol not
* followed.
*
* @property {number} AEROSPIKE_INVALID_USER - User name is invalid.
*
* @property {number} AEROSPIKE_USER_ALREADY_EXISTS - User was previously
* created.
*
* @property {number} AEROSPIKE_INVALID_PASSWORD - Password is invalid.
*
* @property {number} AEROSPIKE_EXPIRED_PASSWORD - Password has expired.
*
* @property {number} AEROSPIKE_FORBIDDEN_PASSWORD - Forbidden password (e.g.
* recently used).
*
* @property {number} AEROSPIKE_INVALID_CREDENTIAL - Security credential is
* invalid.
*
* @property {number} AEROSPIKE_INVALID_ROLE - Role name is invalid.
*
* @property {number} AEROSPIKE_ROLE_ALREADY_EXISTS - Role name already
* exists.
*
* @property {number} AEROSPIKE_INVALID_PRIVILEGE - Privilege is invalid.
*
* @property {number} AEROSPIKE_NOT_AUTHENTICATED - User must be
* authenticated before performing database operations.
*
* @property {number} AEROSPIKE_ROLE_VIOLATION - User does not possess the
* required role to perform the database operation.
*
* @property {number} AEROSPIKE_ERR_UDF - Generic UDF error.
*
* @property {number} AEROSPIKE_ERR_LARGE_ITEM_NOT_FOUND - The requested item
* in a large collection was not found.
*
* @property {number} AEROSPIKE_ERR_BATCH_DISABLED - Batch functionality has
* been disabled.
*
* @property {number} AEROSPIKE_ERR_BATCH_MAX_REQUESTS_EXCEEDED - Batch max.
* requests have been exceeded.
*
* @property {number} AEROSPIKE_ERR_BATCH_QUEUES_FULL - All batch queues are
* full.
*
* @property {number} AEROSPIKE_ERR_GEO_INVALID_GEOJSON - Invalid/unsupported
* GeoJSON.
*
* @property {number} AEROSPIKE_ERR_INDEX_FOUND - Index found.
*
* @property {number} AEROSPIKE_ERR_INDEX_NOT_FOUND - Index not found.
*
* @property {number} AEROSPIKE_ERR_INDEX_OOM - Index is out of memory.
*
* @property {number} AEROSPIKE_ERR_INDEX_NOT_READABLE - Unable to read the
* index.
*
* @property {number} AEROSPIKE_ERR_INDEX - Generic secondary index error.
*
* @property {number} AEROSPIKE_ERR_INDEX_NAME_MAXLEN - Index name is too long.
*
* @property {number} AEROSPIKE_ERR_INDEX_MAXCOUNT - System alrady has
* maximum allowed indeces.
*
* @property {number} AEROSPIKE_ERR_QUERY_ABORTED - Query was aborted.
*
* @property {number} AEROSPIKE_ERR_QUERY_QUEUE_FULL - Query processing queue
* is full.
*
* @property {number} AEROSPIKE_ERR_QUERY_TIMEOUT - Secondary index query
* timed out on server.
*
* @property {number} AEROSPIKE_ERR_QUERY - Generic query error.
*
* @property {number} AEROSPIKE_ERR_UDF_NOT_FOUND - UDF does not exist.
*
* @property {number} AEROSPIKE_ERR_LUA_FILE_NOT_FOUND - LUA file does not exist.
*
* @property {number} AEROSPIKE_ERR_LDT_INTERNAL - Internal LDT error.
*
* @property {number} AEROSPIKE_ERR_LDT_NOT_FOUND - LDT item not found.
*
* @property {number} AEROSPIKE_ERR_LDT_UNIQUE_KEY - Unique key violation:
* Duplicated item inserted when 'unique key' was set.
*
* @property {number} AEROSPIKE_ERR_LDT_INSERT - General error during insert
* operation.
*
* @property {number} AEROSPIKE_ERR_LDT_SEARCH - General error during search
* operation.
*
* @property {number} AEROSPIKE_ERR_LDT_DELETE - General error during delete
* operation.
*
* @property {number} AEROSPIKE_ERR_LDT_INPUT_PARM - General input parameter
* error.
*
* @property {number} AEROSPIKE_ERR_LDT_TYPE_MISMATCH - LDT type mismatch for
* this bin.
*
* @property {number} AEROSPIKE_ERR_LDT_NULL_BIN_NAME - The supplied LDT bin
* name is null.
*
* @property {number} AEROSPIKE_ERR_LDT_BIN_NAME_NOT_STRING - The supplied
* LDT bin name must be a string.
*
* @property {number} AEROSPIKE_ERR_LDT_BIN_NAME_TOO_LONG - The supplied LDT
* bin name exceeded the 14 char limit.
*
* @property {number} AEROSPIKE_ERR_LDT_TOO_MANY_OPEN_SUBRECS - Internal
* Error: too many open records at one time.
*
* @property {number} AEROSPIKE_ERR_LDT_TOP_REC_NOT_FOUND - Internal Error:
* Top Record not found.
*
* @property {number} AEROSPIKE_ERR_LDT_SUB_REC_NOT_FOUND - Internal Error:
* Sub Record not found.
*
* @property {number} AEROSPIKE_ERR_LDT_BIN_DOES_NOT_EXIST - LDT Bin does not
* exist.
*
* @property {number} AEROSPIKE_ERR_LDT_BIN_ALREADY_EXISTS - Collision: LDT
* Bin already exists.
*
* @property {number} AEROSPIKE_ERR_LDT_BIN_DAMAGED - LDT control structures
* in the Top Record are damanged. Cannot proceed.
*
* @property {number} AEROSPIKE_ERR_LDT_SUBREC_POOL_DAMAGED - Internal Error:
* LDT Subrecord pool is damanged.
*
* @property {number} AEROSPIKE_ERR_LDT_SUBREC_DAMAGED - LDT control
* structure in the Sub Record are damaged. Cannot proceed.
*
* @property {number} AEROSPIKE_ERR_LDT_SUBREC_OPEN - Error encountered while
* opening a Sub Record.
*
* @property {number} AEROSPIKE_ERR_LDT_SUBREC_UPDATE - Error encountered
* while updating a Sub Record.
*
* @property {number} AEROSPIKE_ERR_LDT_SUBREC_CREATE - Error encountered
* while creating a Sub Record.
*
* @property {number} AEROSPIKE_ERR_LDT_SUBREC_DELETE - Error encountered
* while deleting a Sub Record.
*
* @property {number} AEROSPIKE_ERR_LDT_SUBREC_CLOSE - Error encountered
* while closing a Sub Record.
*
* @property {number} AEROSPIKE_ERR_LDT_TOPREC_UPDATE - Error encountered
* while updating a TOP Record.
*
* @property {number} AEROSPIKE_ERR_LDT_TOPREC_CREATE - Error encountered
* while creating a TOP Record.
*
* @property {number} AEROSPIKE_ERR_LDT_FILTER_FUNCTION_BAD - The filter
* function name was invalid.
*
* @property {number} AEROSPIKE_ERR_LDT_FILTER_FUNCTION_NOT_FOUND - The
* filter function was not found.
*
* @property {number} AEROSPIKE_ERR_LDT_KEY_FUNCTION_BAD - The function to
* extract the Unique Value from a complex object was invalid.
*
* @property {number} AEROSPIKE_ERR_LDT_KEY_FUNCTION_NOT_FOUND - The function
* to extract the Unique Value from a complex object was not found.
*
* @property {number} AEROSPIKE_ERR_LDT_TRANS_FUNCTION_BAD - The function to
* transform an object into a binary form was invalid.
*
* @property {number} AEROSPIKE_ERR_LDT_TRANS_FUNCTION_NOT_FOUND - The
* function to transform an object into a binary form was not found.
*
* @property {number} AEROSPIKE_ERR_LDT_UNTRANS_FUNCTION_BAD - The function
* to untransform an object from binary form to live form was invalid.
*
* @property {number} AEROSPIKE_ERR_LDT_UNTRANS_FUNCTION_NOT_FOUND - The
* function to untransform an object from binary form to live form was not
* found.
*
* @property {number} AEROSPIKE_ERR_LDT_USER_MODULE_BAD - The UDF user module
* name for LDT Overrides was invalid.
*
* @property {number} AEROSPIKE_ERR_LDT_USER_MODULE_NOT_FOUND - The UDF user
* module name for LDT Overrides was not found.
*
* @see {@link AerospikeError#code}

@@ -760,0 +583,0 @@ */

@@ -19,11 +19,11 @@ // *****************************************************************************

const AerospikeError = require('./aerospike_error')
const Config = require('./config')
const IndexJob = require('./index_job')
const LargeList = require('./llist')
const Query = require('./query')
const Scan = require('./scan')
const operator = require('./operator')
const asEventLoop = require('./event_loop')
const operations = require('./operations')
const utils = require('./utils')
const IndexJob = require('./index_job')
const asEventLoop = require('./event_loop')
// number of client instances currently connected to any Aerospike cluster

@@ -38,3 +38,3 @@ var _connectedClients = 0

*
* @param {Client~Config} config - Configuration used to initialize the client.
* @param {Config} config - Configuration used to initialize the client.
*/

@@ -45,7 +45,2 @@ function Client (config) {

}
config = config || {}
config.hosts = config.hosts || process.env.AEROSPIKE_HOSTS || 'localhost'
if (typeof config.hosts === 'string') {
config.hosts = utils.parseHostsString(config.hosts)
}

@@ -57,8 +52,8 @@ /**

*
* @type {Client~Config}
* @type {Config}
*/
this.config = config
this.config = new Config(config)
/** @private */
this.as_client = as.client(config)
this.as_client = as.client(this.config)

@@ -81,5 +76,5 @@ /** @private */

var error = (err instanceof AerospikeError) ? err : AerospikeError.fromASError(err)
callback(error)
return callback(error)
} else {
callback(null, arg1, arg2, arg3)
return callback(null, arg1, arg2, arg3)
}

@@ -390,3 +385,10 @@ }

*
* ###### Usage Notes:
* Once the client is connected to at least one server node, it will start
* polling each cluster node regularly to discover the current cluster status.
* As new nodes are added to the cluster, or existing nodes are removed, the
* client will establish or close down connections to these nodes. If the
* client gets disconnected from the cluster, it will keep polling the last
* known server endpoints, and will reconnect automatically if the connection
* is reestablished.
*
* It is recommended that you use the {@link module:aerospike.connect} method

@@ -401,7 +403,13 @@ * to connect to the cluster. You will receive the client instance in the

*
* @see {@link Config#connTimeoutMs} - Initial host connection timeout in milliseconds.
* @see {@link Config#tenderInterval} - Polling interval in milliseconds for cluster tender.
*
* @example
*
* client.connect(function (error, client) {
* const Aerospike = require('aerospike')
*
* Aerospike.connect((error, client) => {
* if (error) {
* // handle failure
* console.error('Failed to connect to cluster: %s', error.message)
* process.exit()
* } else {

@@ -1022,3 +1030,3 @@ * // client is ready to accept commands

* const Aerospike = require('aerospike')
* const op = Aerospike.operator
* const op = Aerospike.operations
*

@@ -1066,3 +1074,3 @@ * var key = new Aerospike.Key('test', 'demo', 'mykey1')

*
* @summary Shortcut for applying the {@link module:aerospike/operator.append} operation to one or more record bins.
* @summary Shortcut for applying the {@link module:aerospike/operations.append} operation to one or more record bins.
*

@@ -1076,3 +1084,3 @@ * @param {Key} key - The key of the record.

* @see {@link Client#operate}
* @see {@link module:aerospike/operator.append}
* @see {@link module:aerospike/operations.append}
*/

@@ -1083,3 +1091,3 @@

*
* @summary Shortcut for applying the {@link module:aerospike/operator.prepend} operation to one or more record bins.
* @summary Shortcut for applying the {@link module:aerospike/operations.prepend} operation to one or more record bins.
*

@@ -1093,3 +1101,3 @@ * @param {Key} key - The key of the record.

* @see {@link Client#operate}
* @see {@link module:aerospike/operator.prepend}
* @see {@link module:aerospike/operations.prepend}
*/

@@ -1100,3 +1108,3 @@

*
* @summary Shortcut for applying the {@link module:aerospike/operator.incr} operation to one or more record bins.
* @summary Shortcut for applying the {@link module:aerospike/operations.incr} operation to one or more record bins.
*

@@ -1112,10 +1120,10 @@ * @param {Key} key - The key of the record.

* @see {@link Client#operate}
* @see {@link module:aerospike/operator.incr}
* @see {@link module:aerospike/operations.incr}
*/
// Shortcuts for some operators
// Shortcuts for some operations
;['append', 'prepend', 'incr'].forEach(function (op) {
Client.prototype[op] = function (key, bins, metadata, policy, callback) {
var ops = Object.keys(bins).map(function (bin) {
return operator[op](bin, bins[bin])
return operations[op](bin, bins[bin])
})

@@ -1352,24 +1360,23 @@ if (!this.isConnected(false)) {

*
* @summary Registers an UDF to the database cluster.
* @summary Registers a UDF module with the database cluster.
*
* @description
* To verify that UDF is present in all the nodes
* refer {@link Client#udfRegisterWait}.
* @description This method loads a Lua script from the local filesystem into
* the Aerospike database cluster and registers it for use as a UDF module. The
* client uploads the module to a single cluster node. It then gets distributed
* within the whole cluster automatically. The callback function is called once
* the initial upload into the cluster has completed (or if an error occurred
* during the upload). To verify that the UDF module has been registered on
* every cluster node you can use the {@link Client#udfRegisterWait} method.
*
* @param {string} udfModule - The filename of the UDF module.
* @param {number} [udfType] - UDF type - only Lua is supported at the moment.
* @param {string} path - The file path to the Lua script to load into the server.
* @param {number} [udfType] - Language of the UDF script. Lua is the default
* and only supported scripting language for UDF modules at the moment; ref.
* {@link module:aerospike.language}.
* @param {Client~InfoPolicy} [policy] - The Info Policy to use for this operation.
* @param {Client~doneCallback} callback - The function to call when the operation completes with the result of the operation.
* @param {Client~doneCallback} callback - The function to call when the
* operation completes with the result of the operation.
*
* @example
*
* client.udfRegister('./udf/my_module.lua', function (error) {
* if (error) {
* // handle failure
* } else {
* // handle success
* }
* })
* @see {@link Client#udfRegisterWait} for an example.
*/
Client.prototype.udfRegister = function (udfModule, udfType, policy, callback) {
Client.prototype.udfRegister = function (path, udfType, policy, callback) {
if (typeof udfType === 'function') {

@@ -1391,3 +1398,3 @@ callback = udfType

var self = this
this.as_client.udfRegister(udfModule, udfType, policy, function udfRegisterCb (err) {
this.as_client.udfRegister(path, udfType, policy, function udfRegisterCb (err) {
self.callbackHandler(callback, err)

@@ -1400,25 +1407,40 @@ })

*
* @summary Wait until the UDF registration succeeds in aerospike cluster. This
* function returns only when the UDF registered is available with all the nodes in aerospike cluster.
* @summary Waits until a UDF module has been successfully registered on all
* cluster nodes.
*
* @param {string} udfModule - The filename of the UDF module.
* @param {number} pollInterval - Poll interval used to check the status of the UDF module registration in milliseconds.
* @description This function periodically polls the cluster nodes to check for
* the presence of a previously registered UDF module. It calls the provided
* callback function once all nodes have successfully registered the module.
*
* @param {string} udfModule - The name of the UDF module; this is the basename
* of the UDF file registered with {@link Client#udfRegister}, i.e. the
* filename, optionally including the file extension, but without the directory
* name.
* @param {number} pollInterval - Poll interval in milliseconds used to check
* the presence of the on the cluster nodes.
* @param {Client~InfoPolicy} [policy] - The Info Policy to use for this operation.
* @param {Client~doneCallback} callback - The function to call when the operation completes with the result of the operation.
* @param {Client~doneCallback} callback - The function to call when the
* operation completes with the result of the operation.
*
* @example
*
* var module = './udf/my_module.lua'
* client.udfRegister(module, function (error) {
* if (error) {
* // handle failure
* } else {
* client.udfRegisterWait(module, 1000, function (error) {
* if (error) {
* // handle failure
* } else {
* // UDF module was successfully registered on all cluster nodes
* }
* const Aerospike = require('aerospike')
* const path = require('path')
*
* Aerospike.connect((error, client) => {
* if (error) throw error
*
* var path = './udf/my_module.lua'
* client.udfRegister(path, (error) => {
* if (error) throw error
*
* var module = path.basename(path)
* client.udfRegisterWait(module, 100, (error) => {
* if (error) throw error
*
* // UDF module was successfully registered on all cluster nodes
*
* client.close()
* })
* }
* })
* })

@@ -1480,80 +1502,2 @@ */

/**
* @typedef {Object} Client~Config
*
* @summary Configuration for an Aerospike client instance.
*
* @property {string} [user] - The user name to use when authenticating to the
* cluster. Leave empty for clusters running without access management.
* (Security features are available in the Aerospike DAtabase Enterprise
* Edition.)
*
* @property {string} [password] - The password to use when authenticating to the cluster.
*
* @property {(Object[]|string)} [hosts=process.env.AEROSPIKE_HOSTS] - List of
* hosts with which the client should attempt to connect.
*
* @property {number} [connTimeoutMs=1000] - Initial host connection timeout in
* milliseconds. The client observes this timeout when opening a connection to
* the cluster for the first time.
*
* @property {number} [tenderInterval=1000] - Polling interval in milliseconds
* for cluster tender.
*
* @property {Object} [policies] - Global policies for the client. A policy is
* a set of values which modify the behavior of an operation, like timeouts or
* how an operation handles data. The policies defined in the configuration are
* used as global defaults, which can be overridden by individual operations as
* needed.
*
* @property {Object} [log] - Configuration for logging done by the client.
*
* @property {Object} [modlua] - Configuration values for the mod-lua system and user paths.
*
* @example
*
* const Aerospike = require('aerospike')
*
* var config = {
* // username and password must be passed only to cluster with security feature enabled.
* // security feature is available only in Enterprise edition of Aerospike.
* user: 'username',
* password: 'password',
* hosts: [
* // add three nodes in the cluster.
* { addr: '192.168.0.1', port: 3000 },
* { addr: '192.168.0.2', port: 3000 },
* { addr: '192.168.0.3', port: 3000 }
* ],
* policies = {
* // default timeout for all operations is 100ms
* timeout: 100
* },
* connTimeoutMs: 1000, // initial connection timeout
* tenderInterval: 1000, // tender interval
* log : {
* level: Aerospike.log.INFO,
* file: fd // fd opened by the application using fs.open()
* },
* modlua: {
* systemPath: 'path to system UDF files',
* userPath: 'path to user UDF files'
* }
* }
*
* Aerospike.connect(config, function (error, client) {
* if (error) {
* // handler error
* } else {
* // client is ready to accept commands
* }
* })
*
* @example <caption>Setting <code>hosts</code> using a String</caption>
*
* var config = {
* hosts: '192.168.0.1:3000,192.168.0.2:3000'
* }
*/
/**
* @typedef {Object} Client~ApplyPolicy

@@ -1560,0 +1504,0 @@ *

@@ -49,3 +49,3 @@ // *****************************************************************************

* // double automatically, e.g. 1.0
* client.operate(key, [Aerospike.operator.incr('d', new Double(value))], (error) => {
* client.operate(key, [Aerospike.operations.incr('d', new Double(value))], (error) => {
* if (error) throw error

@@ -52,0 +52,0 @@ * })

@@ -36,5 +36,31 @@ // *****************************************************************************

function print (err, results) {
if (err) {
console.error(err.message)
} else {
results = Array.from(arguments)
.slice(1)
.filter(function (elem) {
return typeof elem !== 'undefined'
})
console.info.apply(null, ['Response: '].concat(results))
}
}
function kvlistToMap (kvList, MapConstructor) {
MapConstructor = MapConstructor || Map
var map = new MapConstructor()
for (var i = 0; i < kvList.length; i = i + 2) {
var key = kvList[i]
var value = kvList[i + 1]
map.set(key, value)
}
return map
}
module.exports = {
parseHostString: parseHostString,
parseHostsString: parseHostsString
parseHostsString: parseHostsString,
print: print,
kvlistToMap: kvlistToMap
}
{
"name": "aerospike",
"version": "2.0.4",
"version": "2.1.0",
"description": "Aerospike Client Library",

@@ -39,3 +39,3 @@ "tags": [

"yargs": "1.2.1",
"standard": "^5.4.1",
"standard": "^7.0",
"deasync": "^0.1.4",

@@ -42,0 +42,0 @@ "jsdoc": "^3.4.0",

@@ -36,3 +36,5 @@ # Aerospike Node.js Client [![travis][travis-image]][travis-url] [![npm][npm-image]][npm-url] [![downloads][downloads-image]][downloads-url]

const Aerospike = require('aerospike')
const op = Aerospike.operator
const op = Aerospike.operations
const lists = Aerospike.lists
const maps = Aerospike.maps
const Key = Aerospike.Key

@@ -55,3 +57,4 @@ const Double = Aerospike.Double

g: new GeoJSON({type: 'Point', coordinates: [103.913, 1.308]}),
c: [1, 'a', {x: 'y'}]
l: [1, 'a', {x: 'y'}],
m: {foo: 4, bar: 7}
}

@@ -66,4 +69,5 @@ var meta = { ttl: 10000 }

op.incr('i', 1),
op.listAppend('c', 'z'),
op.read('i')
op.read('i'),
lists.append('l', 'z'),
maps.removeByKey('m', 'bar')
]

@@ -82,3 +86,4 @@

// g: '{"type":"Point","coordinates":[103.913,1.308]}',
// c: [ 1, 'a', { x: 'y' }, 'z' ] }
// l: [ 1, 'a', { x: 'y' }, 'z' ] },
// m: { foo: 4 }
client.close()

@@ -85,0 +90,0 @@ })

@@ -58,3 +58,3 @@ // *****************************************************************************

var applypolicy = {timeout: 1500}
var udfArgs = {module: 'udf', funcname: 'withArguments', args: [[1, 2, 3]]}
var udfArgs = { module: 'udf', funcname: 'withArguments', args: [[1, 2, 3]] }
client.apply(key, udfArgs, applypolicy, function (error, result) {

@@ -61,0 +61,0 @@ if (error) throw error

@@ -45,3 +45,3 @@ // *****************************************************************************

it('returns the status whether each key was found or not', function (done) {
var batch_records = [
var batchRecords = [
{key: new Key(helper.namespace, helper.set, 'test/batch_read/1')},

@@ -54,3 +54,3 @@ {key: new Key(helper.namespace, helper.set, 'test/batch_read/3')},

client.batchRead(batch_records, function (err, results) {
client.batchRead(batchRecords, function (err, results) {
expect(err).not.to.be.ok()

@@ -60,4 +60,4 @@ expect(results.length).to.be(5)

expect(found.length).to.be(3)
var not_found = results.filter(function (result) { return result.status === status.AEROSPIKE_ERR_RECORD_NOT_FOUND })
expect(not_found.length).to.be(2)
var notFound = results.filter(function (result) { return result.status === status.AEROSPIKE_ERR_RECORD_NOT_FOUND })
expect(notFound.length).to.be(2)
done()

@@ -68,3 +68,3 @@ })

it('returns only meta data if no bins are selected', function (done) {
var batch_records = [
var batchRecords = [
{key: new Key(helper.namespace, helper.set, 'test/batch_read/1')},

@@ -75,3 +75,3 @@ {key: new Key(helper.namespace, helper.set, 'test/batch_read/3')},

client.batchRead(batch_records, function (err, results) {
client.batchRead(batchRecords, function (err, results) {
expect(err).not.to.be.ok()

@@ -89,3 +89,3 @@ expect(results.length).to.be(3)

it('returns just the selected bins', function (done) {
var batch_records = [
var batchRecords = [
{key: new Key(helper.namespace, helper.set, 'test/batch_read/1'), bins: ['i']},

@@ -96,3 +96,3 @@ {key: new Key(helper.namespace, helper.set, 'test/batch_read/3'), bins: ['i']},

client.batchRead(batch_records, function (err, results) {
client.batchRead(batchRecords, function (err, results) {
expect(err).not.to.be.ok()

@@ -110,3 +110,3 @@ expect(results.length).to.be(3)

it('returns the entire record', function (done) {
var batch_records = [
var batchRecords = [
{key: new Key(helper.namespace, helper.set, 'test/batch_read/1'), read_all_bins: true},

@@ -117,3 +117,3 @@ {key: new Key(helper.namespace, helper.set, 'test/batch_read/3'), read_all_bins: true},

client.batchRead(batch_records, function (err, results) {
client.batchRead(batchRecords, function (err, results) {
expect(err).not.to.be.ok()

@@ -131,3 +131,3 @@ expect(results.length).to.be(3)

it('returns selected bins for each key', function (done) {
var batch_records = [
var batchRecords = [
{key: new Key(helper.namespace, helper.set, 'test/batch_read/1'), read_all_bins: true},

@@ -138,3 +138,3 @@ {key: new Key(helper.namespace, helper.set, 'test/batch_read/3'), read_all_bins: false, bins: ['i']},

client.batchRead(batch_records, function (err, results) {
client.batchRead(batchRecords, function (err, results) {
expect(err).not.to.be.ok()

@@ -141,0 +141,0 @@ expect(results.length).to.be(3)

@@ -30,3 +30,3 @@ // *****************************************************************************

if (record) {
callback(record.key, record. bins, record.meta)
callback(record.key, record.bins, record.meta)
inFlight--

@@ -33,0 +33,0 @@ }

@@ -23,3 +23,2 @@ // *****************************************************************************

const Double = Aerospike.Double
const GeoJSON = Aerospike.GeoJSON

@@ -36,15 +35,2 @@ // Returns a random integer between min (included) and max (excluded)

// Returns a random point in a circle of radius r (in meters) around the
// geographical coordiates lat, lng
// Source: http://gis.stackexchange.com/a/25883/10736
function randomPoint (lat, lng, r) {
r = r / 111300 // radius in degrees
var w = r * Math.sqrt(Math.random())
var t = 2 * Math.PI * Math.random()
var x = w * Math.cos(t)
var y = w * Math.sin(t)
x = x / Math.cos(lng) // adjust for shrinking of east-west distances
return [lat + x, lng + y]
}
function merge (o1, o2) {

@@ -162,16 +148,2 @@ var o3 = {}

function geojsonPoint (options) {
var opt = merge(geojsonPoint.defaults, options)
return function () {
var coords = (opt.random === true) ? randomPoint(opt.lat, opt.lng, opt.r) : [opt.lat, opt.lng]
return new GeoJSON({type: 'Point', coordinates: coords.reverse()})
}
}
geojsonPoint.defaults = {
random: true,
lat: 37.4214209,
lng: -122.1008744,
r: 1000
}
function array (options) {

@@ -197,20 +169,2 @@ var opt = merge(array.defaults, options)

function array_of_array () {
return function () {
var arr = array()
var obj = map()
var list = [ arr(), obj() ]
return list
}
}
function map_of_map () {
return function () {
var arr = array()
var obj = map()
var map_of_list = {inner_list: arr(), inner_map: obj()}
return map_of_list
}
}
module.exports = {

@@ -222,7 +176,4 @@ bytes: bytes,

double: double,
geojsonPoint: geojsonPoint,
array: array,
map: map,
array_of_array: array_of_array,
map_of_map: map_of_map
map: map
}

@@ -30,5 +30,5 @@ // *****************************************************************************

it('should fetch object count from specific cluster node', function (done) {
client.info('objects', host, function (err, response, responding_host) {
client.info('objects', host, function (err, response, respondingHost) {
expect(err).not.to.be.ok()
expect(responding_host).to.eql(host)
expect(respondingHost).to.eql(host)
expect(info.parseInfo(response)).to.have.property('objects')

@@ -40,6 +40,6 @@ done()

it('should accept a string with the host address', function (done) {
var host_str = host.addr + ':' + host.port
client.info('objects', host_str, function (err, response, responding_host) {
var hostAddress = host.addr + ':' + host.port
client.info('objects', hostAddress, function (err, response, respondingHost) {
expect(err).not.to.be.ok()
expect(responding_host).to.eql(host)
expect(respondingHost).to.eql(host)
expect(info.parseInfo(response)).to.have.property('objects')

@@ -61,7 +61,7 @@ done()

it('should call the done callback after the info callback', function (done) {
var info_cb_called = 0
var infoCbCalled = 0
client.info(function () {
info_cb_called += 1
infoCbCalled += 1
}, function () {
expect(info_cb_called).to.not.eql(0)
expect(infoCbCalled).to.not.eql(0)
done()

@@ -68,0 +68,0 @@ })

@@ -28,3 +28,3 @@ // *****************************************************************************

const status = Aerospike.status
const op = Aerospike.operator
const op = Aerospike.operations

@@ -34,3 +34,3 @@ describe('client.operate()', function () {

describe('operator.incr()', function () {
describe('operations.incr()', function () {
it('should increment bin with integer value', function (done) {

@@ -45,3 +45,3 @@ var key = keygen.string(helper.namespace, helper.set, {prefix: 'test/incr/int'})()

client.operate(key, ops, function (err) {
client.operate(key, ops, function (err, result) {
expect(err).to.not.be.ok()

@@ -212,3 +212,3 @@

if (err) throw err
var ttl_diff = metadata3.ttl - meta.ttl
var ttlDiff = metadata3.ttl - meta.ttl

@@ -222,4 +222,4 @@ client.operate(key, ops, function (err, record1, metadata1, key1) {

expect(record['s']).to.equal(record2['s'])
expect(2592000 + ttl_diff + 10).to.be.above(metadata2.ttl)
expect(2592000 + ttl_diff - 10).to.be.below(metadata2.ttl)
expect(2592000 + ttlDiff + 10).to.be.above(metadata2.ttl)
expect(2592000 + ttlDiff - 10).to.be.below(metadata2.ttl)

@@ -226,0 +226,0 @@ client.remove(key2, function (err, key) {

@@ -219,2 +219,14 @@ // *****************************************************************************

})
it('writes bin with Map value as map and reads it back', function (done) {
this.skip('pending support for Map values')
var record = { map: new Map([['a', 1], ['b', 'foo'], ['c', 1.23],
['d', new Double(3.14)], ['e', new Buffer('bar')], ['f', GeoJSON.Point(103.8, 1.283)],
['g', [1, 2, 3]], ['h', { a: 1, b: 2 }]])
}
var expected = { map: { a: 1, b: 'foo', c: 1.23, d: 3.14, e: new Buffer('bar'),
f: '{"type":"Point","coordinates":[103.8,1.283]}', g: [1, 2, 3], h: { a: 1, b: 2 } }
}
putGetVerify(record, expected, done)
})
})

@@ -221,0 +233,0 @@

@@ -181,5 +181,5 @@ // *****************************************************************************

it('returns the key if it was stored on the server', function (done) {
var unique_key = 'test/query/record_with_stored_key'
var key = new Aerospike.Key(helper.namespace, testSet, unique_key)
var record = { name: unique_key }
var uniqueKey = 'test/query/record_with_stored_key'
var key = new Aerospike.Key(helper.namespace, testSet, uniqueKey)
var record = { name: uniqueKey }
var meta = { ttl: 300 }

@@ -190,3 +190,3 @@ var policy = { key: Aerospike.policy.key.SEND }

var query = client.query(helper.namespace, testSet)
query.where(Aerospike.filter.equal('name', unique_key))
query.where(Aerospike.filter.equal('name', uniqueKey))
var stream = query.foreach()

@@ -197,3 +197,3 @@ var count = 0

expect(key).to.be.a(Key)
expect(key.key).to.equal(unique_key)
expect(key.key).to.equal(uniqueKey)
})

@@ -200,0 +200,0 @@ stream.on('end', done)

@@ -104,7 +104,7 @@ // *****************************************************************************

var remove_policy = {
var removePolicy = {
gen: Aerospike.policy.gen.EQ,
generation: 2
}
client.remove(key, remove_policy, function (err) {
client.remove(key, removePolicy, function (err) {
expect(err.code).to.be(status.AEROSPIKE_ERR_RECORD_GENERATION)

@@ -111,0 +111,0 @@

@@ -74,5 +74,5 @@ // *****************************************************************************

if (err) throw err
var select_key = {ns: helper.namespace, set: helper.set}
var selectKey = {ns: helper.namespace, set: helper.set}
client.select(select_key, bins, function (err, _record) {
client.select(selectKey, bins, function (err, _record) {
expect(err.code).to.equal(status.AEROSPIKE_ERR_PARAM)

@@ -79,0 +79,0 @@

@@ -143,9 +143,9 @@ // *****************************************************************************

var udf_helper = new UDFHelper(client)
var index_helper = new IndexHelper(client)
var server_info_helper = new ServerInfoHelper(client)
var udfHelper = new UDFHelper(client)
var indexHelper = new IndexHelper(client)
var serverInfoHelper = new ServerInfoHelper(client)
exports.udf = udf_helper
exports.index = index_helper
exports.cluster = server_info_helper
exports.udf = udfHelper
exports.index = indexHelper
exports.cluster = serverInfoHelper

@@ -163,4 +163,4 @@ exports.fail = function fail (message) {

if (err) throw err
server_info_helper.fetch_info()
server_info_helper.fetch_namespace_config(options.namespace)
serverInfoHelper.fetch_info()
serverInfoHelper.fetch_namespace_config(options.namespace)
done()

@@ -167,0 +167,0 @@ })

@@ -21,2 +21,3 @@ // *****************************************************************************

const helper = require('./test_helper')
const path = require('path')

@@ -30,3 +31,3 @@ const status = Aerospike.status

var script = 'udf.lua'
var filename = __dirname + '/' + script
var filename = path.join(__dirname, script)
var infopolicy = { timeout: 1000, send_as_is: true, check_bounds: false }

@@ -33,0 +34,0 @@ client.udfRegister(filename, infopolicy, function (err) {

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc