Socket
Socket
Sign inDemoInstall

@cloudant/couchbackup

Package Overview
Dependencies
Maintainers
6
Versions
479
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@cloudant/couchbackup - npm Package Compare versions

Comparing version 2.9.16 to 2.9.17-SNAPSHOT.185

.scannerwork/scanner-report/changesets-23.pb

4

.scannerwork/report-task.txt

@@ -6,3 +6,3 @@ projectKey=couchbackup

dashboardUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/dashboard?id=couchbackup&branch=main
ceTaskId=AYz3uUdQyB21OaKNKmTI
ceTaskUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/api/ce/task?id=AYz3uUdQyB21OaKNKmTI
ceTaskId=AYz4ZSUnyB21OaKNKmTU
ceTaskUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/api/ce/task?id=AYz4ZSUnyB21OaKNKmTU

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -14,20 +14,29 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const u = require('./citestutils.js');
// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
describe('Encryption tests', function() {
// Note CLI only to use openssl command
const p = { useApi: false, encryption: true };
module.exports = function(onChange) {
const change = new stream.Transform({ objectMode: true });
it('should backup and restore animaldb via an encrypted file', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const encryptedBackup = `./${this.fileName}`;
return u.testBackupAndRestoreViaFile(p, 'animaldb', encryptedBackup, this.dbName).then(() => {
return u.assertEncryptedFile(encryptedBackup);
});
});
});
change._transform = function(line, encoding, done) {
let obj = null;
// one change per line - remove the trailing comma
line = line.trim().replace(/,$/, '');
// extract thee last_seq at the end of the changes feed
if (line.match(/^"last_seq":/)) {
line = '{' + line;
}
try {
obj = JSON.parse(line);
} catch (e) {
}
onChange(obj);
done();
};
return change;
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -14,201 +14,34 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it beforeEach */
'use strict';
const assert = require('assert');
const nock = require('nock');
const request = require('../includes/request.js');
const error = require('../includes/error.js');
// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
const url = 'http://localhost:7777/testdb';
const db = request.client(url, { parallelism: 1 });
const timeoutDb = request.client(url, { parallelism: 1, requestTimeout: 500 });
const longTestTimeout = 3000;
module.exports = function() {
const liner = new stream.Transform({ objectMode: true });
beforeEach('Clean nock', function() {
nock.cleanAll();
});
liner._transform = function(chunk, encoding, done) {
let data = chunk.toString();
if (this._lastLineData) {
data = this._lastLineData + data;
}
describe('#unit Check request headers', function() {
it('should have a couchbackup user-agent', async function() {
const couch = nock(url)
.matchHeader('user-agent', /couchbackup-cloudant\/\d+\.\d+\.\d+(?:-SNAPSHOT)? \(Node.js v\d+\.\d+\.\d+\)/)
.head('/good')
.reply(200);
const lines = data.split('\n');
this._lastLineData = lines.splice(lines.length - 1, 1)[0];
return db.service.headDocument({ db: db.db, docId: 'good' }).then(() => {
assert.ok(couch.isDone());
});
});
});
for (const i in lines) {
this.push(lines[i]);
}
done();
};
describe('#unit Check request response error callback', function() {
it('should not callback with error for 200 response', async function() {
const couch = nock(url)
.get('/good')
.reply(200, { ok: true });
liner._flush = function(done) {
if (this._lastLineData) {
this.push(this._lastLineData);
}
this._lastLineData = null;
done();
};
return db.service.getDocument({ db: db.db, docId: 'good' }).then(response => {
assert.ok(response.result);
assert.ok(couch.isDone());
});
});
it('should callback with error after 3 500 responses', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.reply(500, function(uri, requestBody) {
this.req.response.statusMessage = 'Internal Server Error';
return { error: 'foo', reason: 'bar' };
});
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `500 Internal Server Error: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should callback with error after 3 POST 503 responses', async function() {
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.times(3)
.reply(503, function(uri, requestBody) {
this.req.response.statusMessage = 'Service Unavailable';
return { error: 'service_unavailable', reason: 'Service unavailable' };
});
return assert.rejects(
db.service.postBulkGet({ db: db.db, revs: true, docs: [] }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 Service Unavailable: post ${url}/_bulk_get - Error: service_unavailable, Reason: Service unavailable`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should callback with error after 3 429 responses', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.reply(429, function(uri, requestBody) {
this.req.response.statusMessage = 'Too Many Requests';
return { error: 'foo', reason: 'bar' };
});
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `429 Too Many Requests: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should callback with fatal error for 404 response', async function() {
const couch = nock(url)
.get('/bad')
.reply(404, function(uri, requestBody) {
this.req.response.statusMessage = 'Not Found';
return { error: 'foo', reason: 'bar' };
});
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `404 Not Found: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
});
it('should callback with same error for no status code error response', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.replyWithError('testing badness');
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
const err2 = error.convertResponseError(err);
assert.strictEqual(err, err2);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should retry request if HTTP request gets timed out', async function() {
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.delay(1000)
.reply(200, { results: { docs: [{ id: '1', ok: { _id: '1' } }] } })
.post('/_bulk_get')
.query(true)
.reply(200, { results: { docs: [{ id: '1', ok: { _id: '1' } }, { id: '2', ok: { _id: '2' } }] } });
return timeoutDb.service.postBulkGet({ db: db.db, revs: true, docs: [] }).then((response) => {
assert.ok(response);
assert.ok(response.result);
assert.ok(response.result.results);
assert.ok(response.result.results.docs);
assert.strictEqual(response.result.results.docs.length, 2);
assert.ok(couch.isDone());
});
});
it('should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out', async function() {
// Increase the timeout for this test to allow for the delays
this.timeout(3000);
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.delay(1000)
.times(3)
.reply(200, { ok: true });
return assert.rejects(
timeoutDb.service.postBulkGet({ db: db.db, revs: true, docs: [] }),
(err) => {
err = error.convertResponseError(err);
// Note axios returns ECONNABORTED rather than ESOCKETTIMEDOUT
// See https://github.com/axios/axios/issues/2710 via https://github.com/axios/axios/issues/1543`
assert.strictEqual(err.statusText, 'ECONNABORTED');
assert.strictEqual(err.message, `timeout of 500ms exceeded: post ${url}/_bulk_get ECONNABORTED`);
assert.ok(couch.isDone());
return true;
});
});
describe('#unit Check credentials', async function() {
it('should properly decode username and password', async function() {
const username = 'user%123';
const password = 'colon:at@321';
const url = `http://${encodeURIComponent(username)}:${encodeURIComponent(password)}@localhost:7777/testdb`;
const sessionUrl = 'http://localhost:7777';
const couch = nock(sessionUrl)
.post('/_session', { username: username, password: password })
.reply(200, { ok: true }, { 'Set-Cookie': 'AuthSession=ABC123DEF4356;' })
.get('/')
.reply(200);
const db = request.client(url, { parallelism: 1 });
return db.service.getServerInformation().then(response => {
assert.ok(response);
assert.ok(couch.isDone());
});
});
});
});
return liner;
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//

@@ -14,89 +14,19 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const assert = require('assert');
const fs = require('fs');
const { once } = require('node:events');
const u = require('./citestutils.js');
module.exports = function(db, options, readstream, ee, callback) {
const liner = require('../includes/liner.js')();
const writer = require('../includes/writer.js')(db, options.bufferSize, options.parallelism, ee);
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Resume tests', params), function() {
it('should create a log file', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 60);
// pipe the input to the output, via transformation functions
readstream
.pipe(liner) // transform the input stream into per-line
.on('error', function(err) {
// Forward the error to the writer event emitter where we already have
// listeners on for handling errors
writer.emit('error', err);
})
.pipe(writer); // transform the data
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const p = u.p(params, { opts: { log: logFile } });
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
assert.ok(fs.existsSync(logFile), 'The log file should exist.');
});
});
it('should restore corrupted animaldb to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_corrupted.json');
const dbName = this.dbName;
const p = u.p(params, { expectedRestoreErrorRecoverable: { name: 'BackupFileJsonError' } });
return once(input, 'open')
.then(() => {
return u.testRestore(p, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
it('should restore resumed animaldb with blank line to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_resumed_blank.json');
const dbName = this.dbName;
return once(input, 'open')
.then(() => {
return u.testRestore(params, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
});
});
describe('Resume tests', function() {
// Currently cannot abort API backups, when we do this test should be run for
// both API and CLI
it('should correctly backup and restore backup10m', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 90);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
// Use abort parameter to terminate the backup
const p = u.p(params, { abort: true }, { opts: { log: logFile } });
const restoreDb = this.dbName;
// Set the database doc count as fewer than this should be written during
// resumed backup.
p.exclusiveMaxExpected = 5096;
return u.testBackupAbortResumeRestore(p, 'backup10m', actualBackup, restoreDb);
});
// Note --output is only valid for CLI usage, this test should only run for CLI
const params = { useApi: false };
it('should correctly backup and restore backup10m using --output', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 90);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
// Use abort parameter to terminate the backup
const p = u.p(params, { abort: true }, { opts: { output: actualBackup, log: logFile } });
const restoreDb = this.dbName;
// Set the database doc count as fewer than this should be written during
// resumed backup.
p.exclusiveMaxExpected = 5096;
return await u.testBackupAbortResumeRestore(p, 'backup10m', actualBackup, restoreDb);
});
});
callback(null, writer);
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,118 +14,117 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it before after */
'use strict';
const assert = require('assert');
const applyEnvVars = require('../includes/config.js').applyEnvironmentVariables;
const cliutils = require('./cliutils.js');
const config = require('./config.js');
const error = require('./error.js');
const path = require('path');
const pkg = require('../package.json');
describe('#unit Configuration', function() {
let processEnvCopy;
function parseBackupArgs() {
const program = require('commander');
before('Save env', function() {
// Copy env so we can reset it after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
});
// Option CLI defaults
const defaults = config.cliDefaults();
after('Reset env', function() {
process.env = processEnvCopy;
});
// Options set by environment variables
const envVarOptions = {};
config.applyEnvironmentVariables(envVarOptions);
it('respects the COUCH_URL env variable', function() {
process.env.COUCH_URL = 'http://user:pass@myurl.com';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.url, 'string');
assert.strictEqual(config.url, process.env.COUCH_URL);
});
program
.version(pkg.version)
.description('Backup a CouchDB/Cloudant database to a backup text file.')
.usage('[options...]')
.option('-b, --buffer-size <n>',
cliutils.getUsage('number of documents fetched at once', defaults.bufferSize),
Number)
.option('-d, --db <db>',
cliutils.getUsage('name of the database to backup', defaults.db))
.option('-k, --iam-api-key <API key>',
cliutils.getUsage('IAM API key to access the Cloudant server'))
.option('-l, --log <file>',
cliutils.getUsage('file to store logging information during backup; invalid in "shallow" mode', 'a temporary file'),
path.normalize)
.option('-m, --mode <mode>',
cliutils.getUsage('"shallow" if only a superficial backup is done (ignoring conflicts and revision tokens), else "full" for complete backup', defaults.mode),
(mode) => { return mode.toLowerCase(); })
.option('-o, --output <file>',
cliutils.getUsage('file name to store the backup data', 'stdout'),
path.normalize)
.option('-p, --parallelism <n>',
cliutils.getUsage('number of HTTP requests to perform in parallel when performing a backup; ignored in "shallow" mode', defaults.parallelism),
Number)
.option('-q, --quiet',
cliutils.getUsage('suppress batch messages', defaults.quiet))
.option('-r, --resume',
cliutils.getUsage('continue a previous backup from its last known position; invalid in "shallow" mode', defaults.resume))
.option('-t, --request-timeout <n>',
cliutils.getUsage('milliseconds to wait for a response to a HTTP request before retrying the request', defaults.requestTimeout),
Number)
.option('-u, --url <url>',
cliutils.getUsage('URL of the CouchDB/Cloudant server', defaults.url))
.parse(process.argv);
it('respects the COUCH_DATABASE env variable', function() {
process.env.COUCH_DATABASE = 'mydb';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.db, 'string');
assert.strictEqual(config.db, process.env.COUCH_DATABASE);
});
// Remove defaults that don't apply when using shallow mode
if (program.opts().mode === 'shallow' || envVarOptions.mode === 'shallow') {
delete defaults.parallelism;
delete defaults.log;
delete defaults.resume;
}
it('respects the COUCH_BUFFER_SIZE env variable', function() {
process.env.COUCH_BUFFER_SIZE = '1000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.bufferSize, 'number');
assert.strictEqual(config.bufferSize, 1000);
});
// Apply the options in order so that the CLI overrides env vars and env variables
// override defaults.
const opts = Object.assign({}, defaults, envVarOptions, program.opts());
it('respects the COUCH_PARALLELISM env variable', function() {
process.env.COUCH_PARALLELISM = '20';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.parallelism, 'number');
assert.strictEqual(config.parallelism, 20);
});
if (opts.resume && (opts.log === defaults.log)) {
// If resuming and the log file arg is the newly generated tmp name from defaults then we know that --log wasn't specified.
// We have to do this check here for the CLI case because of the default.
error.terminationCallback(new error.BackupError('NoLogFileName', 'To resume a backup, a log file must be specified'));
}
it('respects the COUCH_REQUEST_TIMEOUT env variable', function() {
process.env.COUCH_REQUEST_TIMEOUT = '10000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.requestTimeout, 'number');
assert.strictEqual(config.requestTimeout, 10000);
});
return opts;
}
it('respects the CLOUDANT_IAM_API_KEY env variable', function() {
const key = 'ABC123-ZYX987_cba789-xyz321';
process.env.CLOUDANT_IAM_API_KEY = key;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamApiKey, 'string');
assert.strictEqual(config.iamApiKey, key);
});
function parseRestoreArgs() {
const program = require('commander');
it('respects the CLOUDANT_IAM_TOKEN_URL env variable', function() {
const u = 'https://testhost.example:1234/identity/token';
process.env.CLOUDANT_IAM_TOKEN_URL = u;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamTokenUrl, 'string');
assert.strictEqual(config.iamTokenUrl, u);
});
// Option CLI defaults
const defaults = config.cliDefaults();
it('respects the COUCH_LOG env variable', function() {
process.env.COUCH_LOG = 'my.log';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.log, 'string');
assert.strictEqual(config.log, process.env.COUCH_LOG);
});
// Options set by environment variables
const envVarOptions = {};
config.applyEnvironmentVariables(envVarOptions);
it('respects the COUCH_RESUME env variable', function() {
process.env.COUCH_RESUME = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.resume, 'boolean');
assert.strictEqual(config.resume, true);
});
program
.version(pkg.version)
.description('Restore a CouchDB/Cloudant database from a backup text file.')
.usage('[options...]')
.option('-b, --buffer-size <n>',
cliutils.getUsage('number of documents restored at once', defaults.bufferSize),
Number)
.option('-d, --db <db>',
cliutils.getUsage('name of the new, existing database to restore to', defaults.db))
.option('-k, --iam-api-key <API key>',
cliutils.getUsage('IAM API key to access the Cloudant server'))
.option('-p, --parallelism <n>',
cliutils.getUsage('number of HTTP requests to perform in parallel when restoring a backup', defaults.parallelism),
Number)
.option('-q, --quiet',
cliutils.getUsage('suppress batch messages', defaults.quiet))
.option('-t, --request-timeout <n>',
cliutils.getUsage('milliseconds to wait for a response to a HTTP request before retrying the request', defaults.requestTimeout),
Number)
.option('-u, --url <url>',
cliutils.getUsage('URL of the CouchDB/Cloudant server', defaults.url))
.parse(process.argv);
it('respects the COUCH_OUTPUT env variable', function() {
process.env.COUCH_OUTPUT = 'myfile.txt';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.output, 'string');
assert.strictEqual(config.output, process.env.COUCH_OUTPUT);
});
// Apply the options in order so that the CLI overrides env vars and env variables
// override defaults.
const opts = Object.assign({}, defaults, envVarOptions, program.opts());
it('respects the COUCH_MODE env variable', function() {
process.env.COUCH_MODE = 'shallow';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.mode, 'string');
assert.strictEqual(config.mode, 'shallow');
});
return opts;
}
it('respects the COUCH_QUIET env variable', function() {
process.env.COUCH_QUIET = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.quiet, 'boolean');
assert.strictEqual(config.quiet, true);
});
});
module.exports = {
parseBackupArgs: parseBackupArgs,
parseRestoreArgs: parseRestoreArgs
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -14,437 +14,80 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global */
'use strict';
const assert = require('node:assert');
const { once } = require('node:events');
const fs = require('node:fs');
const { PassThrough } = require('node:stream');
const { pipeline } = require('node:stream/promises');
const { createGzip, createGunzip } = require('node:zlib');
const debug = require('debug');
const { Tail } = require('tail');
const app = require('../app.js');
const dbUrl = require('../includes/cliutils.js').databaseUrl;
const compare = require('./compare.js');
const request = require('../includes/request.js');
const { cliBackup, cliDecrypt, cliEncrypt, cliGzip, cliGunzip, cliRestore } = require('./test_process.js');
const testLogger = debug('couchbackup:test:utils');
const fs = require('fs');
const stream = require('stream');
const liner = require('./liner.js');
function scenario(test, params) {
return `${test} ${(params.useApi) ? 'using API' : 'using CLI'}`;
}
const onLine = function(onCommand, getDocs) {
const change = new stream.Transform({ objectMode: true });
function params() {
const p = {};
for (let i = 0; i < arguments.length; i++) {
Object.assign(p, arguments[i]);
}
return p;
}
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
// Returns the event emitter for API calls, or the child process for CLI calls
async function testBackup(params, databaseName, outputStream) {
const pipelineStreams = [];
const promises = [];
let matches;
// Configure API key if needed
augmentParamsWithApiKey(params);
let backup;
let backupStream;
let backupPromise;
let tail;
if (params.useApi) {
if (params.useStdOut) {
backupStream = outputStream;
} else {
backupStream = new PassThrough();
}
const backupCallbackPromise = new Promise((resolve, reject) => {
backup = app.backup(
dbUrl(process.env.COUCH_URL, databaseName),
backupStream,
params.opts,
(err, data) => {
if (err) {
testLogger(`API backup callback with ${JSON.stringify(err)}, will reject.`);
reject(err);
} else {
testLogger(`API backup callback with ${JSON.stringify(data)}, will resolve.`);
resolve(data);
}
});
});
const backupFinshedPromise = once(backup, 'finished')
.then((summary) => {
testLogger(`Resolving API backup event promise with ${JSON.stringify(summary)}`);
if (params.resume) {
assertWrittenFewerThan(summary.total, params.exclusiveMaxExpected);
}
})
.catch((err) => {
testLogger(`Rejecting API backup event promise with error ${JSON.stringify(err)}`);
throw err;
});
backupPromise = Promise.all([backupCallbackPromise, backupFinshedPromise])
.then(() => {
testLogger('Both API backup promises resolved.');
});
} else {
backup = cliBackup(databaseName, params);
backupStream = backup.stream;
backupPromise = backup.childProcessPromise;
if (params.abort) {
// Create the log file for abort tests so we can tail it, other tests assert
// the log file is usually created normally by the backup process.
const f = fs.openSync(params.opts.log, 'w');
fs.closeSync(f);
// Use tail to watch the log file for a batch to be completed then abort
tail = new Tail(params.opts.log, { useWatchFile: true, fsWatchOptions: { interval: 500 }, follow: false });
tail.on('line', function(data) {
const matches = data.match(/:d batch\d+/);
if (matches !== null) {
// Turn off the tail.
tail.unwatch();
// Abort the backup
backup.childProcess.kill();
}
});
}
if (params.resume) {
const listenerPromise = new Promise((resolve, reject) => {
const listener = function(data) {
const matches = data.toString().match(/.*Finished - Total document revisions written: (\d+).*/);
if (matches !== null) {
try {
assertWrittenFewerThan(matches[1], params.exclusiveMaxExpected);
resolve();
} catch (err) {
reject(err);
}
process.stderr.removeListener('data', listener);
}
};
backup.childProcess.stderr.on('data', listener);
});
promises.push(listenerPromise);
}
}
promises.push(backupPromise);
if (!params.useStdOut) {
pipelineStreams.push(backupStream);
}
if (params.compression) {
if (params.useApi) {
pipelineStreams.push(createGzip());
} else {
const gzipProcess = cliGzip();
pipelineStreams.push(gzipProcess.stream);
promises.push(gzipProcess.childProcessPromise);
}
}
// Pipe via encryption if requested
if (params.encryption) {
if (params.useApi) {
// Currently only CLI support for testing encryption
return Promise.reject(new Error('Not implemented: cannot test encrypted API backups at this time.'));
} else {
const encryptProcess = cliEncrypt();
pipelineStreams.push(encryptProcess.stream);
promises.push(encryptProcess.childProcessPromise);
}
}
if (!params.useStdOut) {
// Finally add the outputStream to the list we want to pipeline
pipelineStreams.push(outputStream);
// Create the promisified pipeline and add it to the array of promises we'll wait for
promises.unshift(pipeline(pipelineStreams));
}
// Wait for the promises and then assert
return Promise.all(promises)
.then(() => testLogger('All backup promises resolved.'))
.then(() => {
if (params.expectedBackupError) {
return Promise.reject(new Error('Backup passed when it should have failed.'));
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
})
.catch((err) => {
if (params.expectedBackupError || params.abort) {
if (params.useApi) {
assert.strictEqual(err.name, params.expectedBackupError.name, 'The backup should receive the expected error.');
} else {
if (params.abort) {
// The tail should be stopped when we match a line and abort, but if
// something didn't work we need to make sure the tail is stopped
tail.unwatch();
// Assert that the process was aborted as expected
assert.strictEqual(err.signal, 'SIGTERM', `The backup should have terminated with SIGTERM, but was ${err.signal}.`);
} else if (params.expectedBackupError) {
assert.strictEqual(err.code, params.expectedBackupError.code, `The backup exited with unexpected code ${err.code} and signal ${err.signal}.`);
}
}
} else {
return Promise.reject(err);
}
});
}
async function testRestore(params, inputStream, databaseName) {
const pipelineStreams = [inputStream];
const promises = [];
// Configure API key if needed
augmentParamsWithApiKey(params);
let restore;
let restoreStream;
let restorePromise;
if (params.useApi) {
restoreStream = new PassThrough();
const restoreCallbackPromise = new Promise((resolve, reject) => {
restore = app.restore(
restoreStream,
dbUrl(process.env.COUCH_URL, databaseName),
params.opts,
(err, data) => {
if (err) {
testLogger(`API restore callback with ${err}, will reject.`);
reject(err);
} else {
resolve(data);
}
});
});
const restoreFinshedPromise = once(restore, 'finished')
.then((summary) => {
testLogger(`Resolving API restore promise with ${summary}`);
})
.catch((err) => {
testLogger(`Handling API restore error event ${JSON.stringify(err)}`);
if (params.expectedRestoreErrorRecoverable) {
testLogger(`Expecting restore error ${params.expectedRestoreErrorRecoverable.name}`);
assert.strictEqual(err.name, params.expectedRestoreErrorRecoverable.name, 'The restore should receive the expected recoverable error.');
} else {
testLogger(`API restore will reject by throwing error event ${JSON.stringify(err)}`);
return Promise.reject(err);
}
});
restorePromise = Promise.all([restoreCallbackPromise, restoreFinshedPromise]);
} else {
restore = cliRestore(databaseName, params);
restoreStream = restore.stream;
restorePromise = restore.childProcessPromise;
}
promises.push(restorePromise);
// Pipe via decompression if requested
if (params.compression) {
if (params.useApi) {
pipelineStreams.push(createGunzip());
} else {
const gunzipProcess = cliGunzip();
pipelineStreams.push(gunzipProcess.stream);
promises.push(gunzipProcess.childProcessPromise);
}
}
// Pipe via decryption if requested
if (params.encryption) {
if (params.useApi) {
// Currently only CLI support for testing encryption
return Promise.reject(new Error('Not implemented: cannot test encrypted API backups at this time.'));
} else {
const decryptProcess = cliDecrypt();
pipelineStreams.push(decryptProcess.stream);
promises.push(decryptProcess.childProcessPromise);
}
}
// pipeline everything into the restoreStream
pipelineStreams.push(restoreStream);
// Create the promisified pipeline and add it to the array of promises we'll wait for
promises.unshift(pipeline(pipelineStreams));
// Wait for the all the promises to settle and then assert based on the process promise
return Promise.allSettled(promises)
.then(() => { return restorePromise; })
.then((summary) => {
testLogger(`Restore promise resolved with ${summary}.`);
if (params.expectedRestoreError) {
return Promise.reject(new Error('Restore passed when it should have failed.'));
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
})
.catch((err) => {
testLogger(`Restore promise rejected with ${err}.`);
if (params.expectedRestoreError) {
if (params.useApi) {
assert.strictEqual(err.name, params.expectedRestoreError.name, 'The restore should receive the expected error.');
} else {
assert.strictEqual(err.code, params.expectedRestoreError.code, `The restore exited with unexpected code ${err.code} and signal ${err.signal}.`);
}
} else {
return Promise.reject(err);
}
});
}
// Serial backup and restore via a file on disk
async function testBackupAndRestoreViaFile(params, srcDb, backupFile, targetDb) {
return testBackupToFile(params, srcDb, backupFile).then(() => {
return testRestoreFromFile(params, backupFile, targetDb);
});
}
async function testBackupToFile(params, srcDb, backupFile) {
// Open the file for appending if this is a resume
const output = fs.createWriteStream(backupFile, { flags: (params.opts && params.opts.resume) ? 'a' : 'w' });
return once(output, 'open')
.then(() => {
return testBackup(params, srcDb, output);
});
}
async function testRestoreFromFile(params, backupFile, targetDb) {
const input = fs.createReadStream(backupFile);
return once(input, 'open')
.then(() => {
return testRestore(params, input, targetDb);
});
}
async function testDirectBackupAndRestore(params, srcDb, targetDb) {
// Allow a 64 MB highWaterMark for the passthrough during testing
const passthrough = new PassThrough({ highWaterMark: 67108864 });
const backup = testBackup(params, srcDb, passthrough);
const restore = testRestore(params, passthrough, targetDb);
return Promise.all([backup, restore]).then(() => {
return dbCompare(srcDb, targetDb);
});
}
async function testBackupAbortResumeRestore(params, srcDb, backupFile, targetDb) {
return Promise.resolve()
.then(() => {
// First backup with an abort
if (params.opts && params.opts.output) {
return testBackup(params, srcDb, new PassThrough());
} else {
return testBackupToFile(params, srcDb, backupFile);
// extract doc ids
if (getDocs && obj.command === 't') {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
}
}).then(() => {
// Remove the abort parameter and add the resume parameter
delete params.abort;
params.opts.resume = true;
// Resume the backup
if (params.opts && params.opts.output) {
return testBackup(params, srcDb, new PassThrough());
} else {
return testBackupToFile(params, srcDb, backupFile);
}
}).then(() => {
// Restore the backup
return testRestoreFromFile(params, backupFile, targetDb);
}).then(() => {
// Now compare the restored to the original for validation
return dbCompare(srcDb, targetDb);
});
}
onCommand(obj);
}
done();
};
return change;
};
async function dbCompare(db1Name, db2Name) {
const client = request.client(process.env.COUCH_BACKEND_URL, {});
return compare.compare(db1Name, db2Name, client.service)
.then(result => {
return assert.strictEqual(result, true, 'The database comparison should succeed, but failed');
});
}
/**
* Generate a list of remaining batches from a download file.
*
* @param {string} log - log file name
* @param {function} callback - callback with err, {changesComplete: N, batches: N}.
* changesComplete signifies whether the log file appeared to
* have completed reading the changes feed (contains :changes_complete).
* batches are remaining batch IDs for download.
*/
module.exports = function(log, callback) {
// our sense of state
const state = {
function sortByIdThenRev(o1, o2) {
if (o1._id < o2._id) return -1;
if (o1._id > o2._id) return 1;
if (o1._rev < o2._rev) return -1;
if (o1._rev > o2._rev) return 1;
return 0;
}
};
let changesComplete = false;
function readSortAndDeepEqual(actualContentPath, expectedContentPath) {
const backupContent = JSON.parse(fs.readFileSync(actualContentPath, 'utf8'));
const expectedContent = JSON.parse(fs.readFileSync(expectedContentPath, 'utf8'));
// Array order of the docs is important for equality, but not for backup
backupContent.sort(sortByIdThenRev);
expectedContent.sort(sortByIdThenRev);
// Assert that the backup matches the expected
assert.deepStrictEqual(backupContent, expectedContent);
}
function setTimeout(context, timeout) {
// Increase timeout using TEST_TIMEOUT_MULTIPLIER
const multiplier = (typeof process.env.TEST_TIMEOUT_MULTIPLIER !== 'undefined') ? parseInt(process.env.TEST_TIMEOUT_MULTIPLIER) : 1;
timeout *= multiplier;
// Set the mocha timeout
context.timeout(timeout * 1000);
}
function assertGzipFile(path) {
// 1f 8b is the gzip magic number
const expectedBytes = Buffer.from([0x1f, 0x8b]);
const buffer = Buffer.alloc(2);
const fd = fs.openSync(path, 'r');
// Read the first two bytes
fs.readSync(fd, buffer, 0, 2, 0);
fs.closeSync(fd);
// Assert the magic number corresponds to gz extension
assert.deepStrictEqual(buffer, expectedBytes, 'The backup file should be gz compressed.');
}
function assertEncryptedFile(path) {
// Openssl encrypted files start with Salted
const expectedBytes = Buffer.from('Salted');
const buffer = Buffer.alloc(6);
const fd = fs.openSync(path, 'r');
// Read the first six bytes
fs.readSync(fd, buffer, 0, 6, 0);
fs.closeSync(fd);
// Assert first 6 characters of the file are "Salted"
assert.deepStrictEqual(buffer, expectedBytes, 'The backup file should be encrypted.');
}
function assertWrittenFewerThan(total, number) {
assert(total < number && total > 0, `Saw ${total} but expected between 1 and ${number - 1} documents for the resumed backup.`);
}
function augmentParamsWithApiKey(params) {
if (process.env.COUCHBACKUP_TEST_IAM_API_KEY) {
if (!params.opts) {
params.opts = {};
// called with each line from the log file
const onCommand = function(obj) {
if (obj.command === 't') {
state[obj.batch] = true;
} else if (obj.command === 'd') {
delete state[obj.batch];
} else if (obj.command === 'changes_complete') {
changesComplete = true;
}
params.opts.iamApiKey = process.env.COUCHBACKUP_TEST_IAM_API_KEY;
params.opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
};
module.exports = {
scenario,
p: params,
setTimeout,
dbCompare,
readSortAndDeepEqual,
assertGzipFile,
assertEncryptedFile,
testBackup,
testRestore,
testDirectBackupAndRestore,
testBackupToFile,
testRestoreFromFile,
testBackupAndRestoreViaFile,
testBackupAbortResumeRestore
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, false))
.on('finish', function() {
const obj = { changesComplete: changesComplete, batches: state };
callback(null, obj);
});
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2022 IBM Corp. All rights reserved.
//

@@ -14,281 +14,68 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it before after beforeEach */
'use strict';
const assert = require('assert');
const fs = require('fs');
const u = require('./citestutils.js');
const mockServerPort = +process.env.COUCHBACKUP_MOCK_SERVER_PORT || 7777;
const { once } = require('node:events');
const url = `http://localhost:${mockServerPort}`;
const nock = require('nock');
const httpProxy = require('http-proxy');
const Readable = require('stream').Readable;
const async = require('async');
const error = require('./error.js');
const events = require('events');
// Create an infinite stream to read.
// It just keeps sending a backup line, useful for testing cases of
// termination while a stream has content remaining (the animaldb backup
// is too small for that).
class InfiniteBackupStream extends Readable {
constructor(opt) {
super(opt);
this.contents = Buffer.from('[{"_id":"giraffe","_rev":"3-7665c3e66315ff40616cceef62886bd8","min_weight":830,"min_length":5,"max_weight":1600,"max_length":6,"wiki_page":"http://en.wikipedia.org/wiki/Giraffe","class":"mammal","diet":"herbivore","_revisions":{"start":3,"ids":["7665c3e66315ff40616cceef62886bd8","aaaf10d5a68cdf22d95a5482a0e95549","967a00dff5e02add41819138abb3284d"]}}]\n', 'utf8');
}
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime();
let batch = 0;
let hasErrored = false;
let startKey = null;
let total = 0;
_read() {
let proceed;
do {
proceed = this.push(this.contents);
} while (proceed);
}
}
async.doUntil(
function(callback) {
// Note, include_docs: true is set automatically when using the
// fetch function.
const opts = { db: db.db, limit: options.bufferSize, includeDocs: true };
function assertNock() {
try {
assert.ok(nock.isDone());
} catch (err) {
console.error('pending mocks: %j', nock.pendingMocks());
throw err;
}
}
// To avoid double fetching a document solely for the purposes of getting
// the next ID to use as a startKey for the next page we instead use the
// last ID of the current page and append the lowest unicode sort
// character.
if (startKey) opts.startKey = `${startKey}\0`;
db.service.postAllDocs(opts).then(response => {
const body = response.result;
if (!body.rows) {
ee.emit('error', new error.BackupError(
'AllDocsError', 'ERROR: Invalid all docs response'));
callback();
} else {
if (body.rows.length < opts.limit) {
startKey = null; // last batch
} else {
startKey = body.rows[opts.limit - 1].id;
}
function testPromiseWithAssertNock(testPromise) {
return testPromise.finally(() => {
assertNock();
});
}
async function backupHttpError(opts, errorName, errorCode) {
const p = u.p(opts, { expectedBackupError: { name: errorName, code: errorCode } });
// Create a file and attempt a backup to it
const output = fs.createWriteStream('/dev/null');
return once(output, 'open')
.then(() => {
return testPromiseWithAssertNock(u.testBackup(p, 'fakenockdb', output));
});
}
async function restoreHttpError(opts, errorName, errorCode) {
const q = u.p(opts, { expectedRestoreError: { name: errorName, code: errorCode } });
return testPromiseWithAssertNock(u.testRestoreFromFile(q, './test/fixtures/animaldb_expected.json', 'fakenockdb'));
}
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('#unit Fatal errors', params), function() {
// These tests do real requests with mocks and if they run slowly
// the 2 second default mocha timeout can be insufficient, use 10 s
this.timeout(10000);
let processEnvCopy;
let proxy;
before('Set process data for test', function() {
const proxyPort = mockServerPort + 1000;
// Copy env and argv so we can reset them after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
// Set up a proxy to point to our nock server because the nock override
// isn't visible to the spawned CLI process
if (!params.useApi) {
proxy = httpProxy.createProxyServer({ target: url }).listen(proxyPort, 'localhost');
proxy.on('error', (err, req, res) => {
console.log(`Proxy received error ${err}`);
res.writeHead(400, {
'Content-Type': 'application/json'
const docs = [];
body.rows.forEach(function(doc) {
docs.push(doc.doc);
});
res.end(JSON.stringify(err));
});
}
// setup environment variables
process.env.COUCH_URL = (params.useApi) ? url : `http://localhost:${proxyPort}`;
nock.emitter.on('no match', (req, opts) => {
console.error(`Unmatched nock request ${opts.method} ${opts.protocol}${opts.host}${opts.path}`);
if (docs.length > 0) {
ee.emit('received', {
batch: batch++,
data: docs,
length: docs.length,
time: (new Date().getTime() - start) / 1000,
total: total += docs.length
});
}
callback();
}
}).catch(err => {
err = error.convertResponseError(err);
ee.emit('error', err);
hasErrored = true;
callback();
});
});
},
function(callback) { callback(null, hasErrored || startKey == null); },
function() { ee.emit('finished', { total: total }); }
);
after('Reset process data', function(done) {
process.env = processEnvCopy;
nock.emitter.removeAllListeners();
if (!params.useApi) {
proxy.close(done);
} else {
done();
}
});
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
describe('for backup', function() {
it('should terminate when DB does not exist', function() {
// Simulate existence check
nock(url).head('/fakenockdb').reply(404, { error: 'not_found', reason: 'missing' });
return backupHttpError(params, 'DatabaseNotFound', 10);
});
it('should terminate on BulkGetError', function() {
// Simulate existence check
const n = nock(url).head('/fakenockdb').reply(200);
// Simulate _bulk_get not available
n.post('/fakenockdb/_bulk_get').reply(404, { error: 'not_found', reason: 'missing' });
return backupHttpError(params, 'BulkGetError', 50);
});
it('should terminate on Unauthorized existence check', function() {
// Simulate a 401
nock(url).head('/fakenockdb').reply(401, { error: 'unauthorized', reason: '_reader access is required for this request' });
return backupHttpError(params, 'Unauthorized', 11);
});
it('should terminate on Forbidden no _reader', function() {
// Simulate a 403
nock(url).head('/fakenockdb').reply(403, { error: 'forbidden', reason: '_reader access is required for this request' });
return backupHttpError(params, 'Forbidden', 12);
});
it('should terminate on _bulk_get HTTPFatalError', function() {
// Provide a mock complete changes log to allow a resume to skip ahead
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/test.log' } });
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a fatal HTTP error when trying to fetch docs
// Note: 2 outstanding batches, so 2 responses, 1 mock is optional because we can't guarantee timing
n.post('/fakenockdb/_bulk_get').query(true).reply(400, { error: 'bad_request', reason: 'testing bad response' });
n.post('/fakenockdb/_bulk_get').query(true).optionally().reply(400, { error: 'bad_request', reason: 'testing bad response' });
return backupHttpError(p, 'HTTPFatalError', 40);
});
it('should terminate on NoLogFileName', function() {
// Don't supply a log file name with resume
const p = u.p(params, { opts: { resume: true } });
return backupHttpError(p, 'NoLogFileName', 20);
});
it('should terminate on LogDoesNotExist', function() {
// Use a non-existent log file
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/doesnotexist.log' } });
return backupHttpError(p, 'LogDoesNotExist', 21);
});
it('should terminate on IncompleteChangesInLogFile', function() {
// Use an incomplete changes log file
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/incomplete_changes.log' } });
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Should fail when it reads the incomplete changes
return backupHttpError(p, 'IncompleteChangesInLogFile', 22);
});
it('should terminate on _changes HTTPFatalError', function() {
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a fatal HTTP error when trying to fetch docs (note 2 outstanding batches)
n.post('/fakenockdb/_changes').query(true).reply(400, { error: 'bad_request', reason: 'testing bad response' });
return backupHttpError(params, 'HTTPFatalError', 40);
});
it('should terminate on SpoolChangesError', function() {
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a changes without a last_seq
n.post('/fakenockdb/_changes').query(true).reply(200,
{
results: [{
seq: '2-g1AAAAEbeJzLYWBgYMlgTmFQSElKzi9KdUhJstTLTS3KLElMT9VLzskvTUnMK9HLSy3JAapkSmRIsv___39WBnMiUy5QgN3MzDIxOdEMWb85dv0gSxThigyN8diS5AAkk-pBFiUyoOkzxKMvjwVIMjQAKaDW_Zh6TQnqPQDRC7I3CwDPDV1k',
id: 'badger',
changes: [{ rev: '4-51aa94e4b0ef37271082033bba52b850' }]
}]
});
return backupHttpError(params, 'SpoolChangesError', 30);
});
});
describe('for restore', function() {
it('should terminate on Unauthorized db existence check', function() {
// Simulate a 401
nock(url).get('/fakenockdb').reply(401, { error: 'unauthorized', reason: '_reader access is required for this request' });
return restoreHttpError(params, 'Unauthorized', 11);
});
it('should terminate on Forbidden no _writer', function() {
// Simulate the DB exists (i.e. you can read it)
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 403 trying to write
n.post('/fakenockdb/_bulk_docs').reply(403, { error: 'forbidden', reason: '_writer access is required for this request' });
return restoreHttpError(params, 'Forbidden', 12);
});
it('should terminate on RestoreDatabaseNotFound', function() {
// Simulate the DB does not exist
nock(url).get('/fakenockdb').reply(404, { error: 'not_found', reason: 'Database does not exist.' });
return restoreHttpError(params, 'DatabaseNotFound', 10);
});
it('should terminate on notEmptyDBErr when database is not empty', function() {
// Simulate the DB that does exist and not empty
nock(url).get('/fakenockdb').reply(200, { doc_count: 10, doc_del_count: 0 });
return restoreHttpError(params, 'DatabaseNotEmpty', 13);
});
it('should terminate on notEmptyDBErr when database is not new', function() {
// Simulate the DB that does exist and not new
nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 10 });
return restoreHttpError(params, 'DatabaseNotEmpty', 13);
});
it('should terminate on _bulk_docs HTTPFatalError', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Use a parallelism of one and mock one response
const p = u.p(params, { opts: { parallelism: 1 } });
// Simulate a 400 trying to write
n.post('/fakenockdb/_bulk_docs').reply(400, { error: 'bad_request', reason: 'testing bad response' });
return restoreHttpError(p, 'HTTPFatalError', 40);
});
it('should terminate on _bulk_docs HTTPFatalError from system database', function() {
// Simulate that target database exists and is _not_ empty.
// This should pass validator as we exclude system databases from the check.
const n = nock(url).get('/_replicator').reply(200, { doc_count: 1, doc_del_count: 0 });
// Simulate a 400 trying to write
n.post('/_replicator/_bulk_docs').reply(400, { error: 'bad_request', reason: 'testing bad response' });
// Use a parallelism of one and mock one response
const q = u.p(params, { opts: { parallelism: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return testPromiseWithAssertNock(u.testRestore(q, new InfiniteBackupStream(), '_replicator'));
});
it('should terminate on _bulk_docs HTTPFatalError large stream', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 400 trying to write
// Provide a body function to handle the stream, but allow any body
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).reply(400, { error: 'bad_request', reason: 'testing bad response' });
// Use only parallelism 1 so we don't have to mock up loads of responses
const q = u.p(params, { opts: { parallelism: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return testPromiseWithAssertNock(u.testRestore(q, new InfiniteBackupStream(), 'fakenockdb'));
});
it('should terminate on multiple _bulk_docs HTTPFatalError', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 400 trying to write docs, 5 times because of default parallelism
// Provide a body function to handle the stream, but allow any body
// Four of the mocks are optional because of parallelism 5 we can't guarantee that the exit will happen
// after all 5 requests, but we must get at least one of them
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).reply(400, { error: 'bad_request', reason: 'testing bad response' });
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).times(4).optionally().reply(400, { error: 'bad_request', reason: 'testing bad response' });
const q = u.p(params, { opts: { bufferSize: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return restoreHttpError(q, 'HTTPFatalError', 40);
});
});
});
});
return ee;
};

@@ -1,2 +0,2 @@

// Copyright © 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,88 +14,269 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
'use strict';
const chunk = require('lodash/chunk');
const difference = require('lodash/difference');
const forOwn = require('lodash/forOwn');
const isEmpty = require('lodash/isEmpty');
const union = require('lodash/union');
const async = require('async');
const events = require('events');
const fs = require('fs');
const error = require('./error.js');
const spoolchanges = require('./spoolchanges.js');
const logfilesummary = require('./logfilesummary.js');
const logfilegetbatches = require('./logfilegetbatches.js');
const compare = async function(database1, database2, client) {
// check docs same in both dbs
const allDocs1 = await getAllDocs(client, database1);
const allDocs2 = await getAllDocs(client, database2);
/**
* Read documents from a database to be backed up.
*
* @param {string} db - `@cloudant/cloudant` DB object for source database.
* @param {number} blocksize - number of documents to download in single request
* @param {number} parallelism - number of concurrent downloads
* @param {string} log - path to log file to use
* @param {boolean} resume - whether to resume from an existing log file
* @returns EventEmitter with following events:
* - `received` - called with a block of documents to write to backup
* - `error` - on error
* - `finished` - when backup process is finished (either complete or errored)
*/
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime(); // backup start time
const batchesPerDownloadSession = 50; // max batches to read from log file for download at a time (prevent OOM)
const onlyInDb1 = (difference(allDocs1, allDocs2));
const onlyInDb2 = (difference(allDocs2, allDocs1));
function proceedWithBackup() {
if (options.resume) {
// pick up from existing log file from previous run
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
} else {
// create new log file and process
spoolchanges(db, options.log, options.bufferSize, ee, function(err) {
if (err) {
ee.emit('error', err);
} else {
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
}
});
}
}
let databasesSame = isEmpty(onlyInDb1) && isEmpty(onlyInDb2);
validateBulkGetSupport(db, function(err) {
if (err) {
return ee.emit('error', err);
} else {
proceedWithBackup();
}
});
if (!databasesSame) {
console.log(onlyInDb1.length + ' documents only in db 1.');
console.log('Document IDs only in db 1: ' + onlyInDb1);
console.log(onlyInDb2.length + ' documents only in db 2.');
console.log('Document IDs only in db 2: ' + onlyInDb2);
}
return ee;
};
// check revs same in docs common to both dbs
const partitionSize = 500;
const batches = chunk(union(allDocs1, allDocs2), partitionSize);
/**
* Validate /_bulk_get support for a specified database.
*
* @param {string} db - nodejs-cloudant db
* @param {function} callback - called on completion with signature (err)
*/
function validateBulkGetSupport(db, callback) {
db.service.postBulkGet({ db: db.db, docs: [] }).then(() => { callback(); }).catch(err => {
err = error.convertResponseError(err, function(err) {
switch (err.status) {
case undefined:
// There was no status code on the error
return err;
case 404:
return new error.BackupError('BulkGetError', 'Database does not support /_bulk_get endpoint');
default:
return new error.HTTPError(err);
}
});
callback(err);
});
}
const missingRevsInDb2 = await getMissingRevs(client, database1, database2, batches);
const missingRevsInDb1 = await getMissingRevs(client, database2, database1, batches);
/**
* Download remaining batches in a log file, splitting batches into sets
* to avoid enqueueing too many in one go.
*
* @param {string} log - log file name to maintain download state
* @param {string} db - nodejs-cloudant db
* @param {events.EventEmitter} ee - event emitter to emit received events on
* @param {time} startTime - start time for backup process
* @param {number} batchesPerDownloadSession - max batches to enqueue for
* download at a time. As batches contain many doc IDs, this helps avoid
* exhausting memory.
* @param {number} parallelism - number of concurrent downloads
* @returns function to call do download remaining batches with signature
* (err, {batches: batch, docs: doccount}) {@see spoolchanges}.
*/
function downloadRemainingBatches(log, db, ee, startTime, batchesPerDownloadSession, parallelism) {
let total = 0; // running total of documents downloaded so far
let noRemainingBatches = false;
databasesSame = databasesSame && isEmpty(missingRevsInDb1) && isEmpty(missingRevsInDb2);
// Generate a set of batches (up to batchesPerDownloadSession) to download from the
// log file and download them. Set noRemainingBatches to `true` for last batch.
function downloadSingleBatchSet(done) {
// Fetch the doc IDs for the batches in the current set to
// download them.
function batchSetComplete(err, data) {
if (!err) {
total = data.total;
}
done(err);
}
function processRetrievedBatches(err, batches) {
if (!err) {
// process them in parallelised queue
processBatchSet(db, parallelism, log, batches, ee, startTime, total, batchSetComplete);
} else {
batchSetComplete(err);
}
}
if (!databasesSame) {
console.log('Missing revs in db 1:' + JSON.stringify(missingRevsInDb1));
console.log('Missing revs in db 2:' + JSON.stringify(missingRevsInDb2));
readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, function(err, batchSetIds) {
if (err) {
ee.emit('error', err);
// Stop processing changes file for fatal errors
noRemainingBatches = true;
done();
} else {
if (batchSetIds.length === 0) {
noRemainingBatches = true;
return done();
}
logfilegetbatches(log, batchSetIds, processRetrievedBatches);
}
});
}
return databasesSame;
};
// Return true if all batches in log file have been downloaded
function isFinished(callback) { callback(null, noRemainingBatches); }
const getMissingRevs = async(client, databaseName1, databaseName2, batcheses) => {
const fakeRevisionId = '9999-a';
function onComplete() {
ee.emit('finished', { total: total });
}
const missing = {};
async.doUntil(downloadSingleBatchSet, isFinished, onComplete);
}
// look in db1 - use a fake revision ID to fetch all leaf revisions
/**
* Return a set of uncompleted download batch IDs from the log file.
*
* @param {string} log - log file path
* @param {number} batchesPerDownloadSession - maximum IDs to return
* @param {function} callback - sign (err, batchSetIds array)
*/
function readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, callback) {
logfilesummary(log, function processSummary(err, summary) {
if (!err) {
if (!summary.changesComplete) {
callback(new error.BackupError('IncompleteChangesInLogFile',
'WARNING: Changes did not finish spooling'));
return;
}
if (Object.keys(summary.batches).length === 0) {
return callback(null, []);
}
for (const batches of batcheses) {
const documentRevisions = {};
batches.forEach(id => (documentRevisions[id] = [fakeRevisionId]));
// batch IDs are the property names of summary.batches
const batchSetIds = getPropertyNames(summary.batches, batchesPerDownloadSession);
callback(null, batchSetIds);
} else {
callback(err);
}
});
}
const result1 = await client.postRevsDiff({ db: databaseName1, documentRevisions });
const revsDiffRequestDb2 = {};
forOwn(result1.result, (v, k) => (revsDiffRequestDb2[k] = v.possible_ancestors));
// look in db2
const result2 = await client.postRevsDiff({ db: databaseName2, documentRevisions: revsDiffRequestDb2 });
forOwn(result2.result, (v, k) => {
if ('missing' in v) {
missing[k] = v.missing;
/**
* Download a set of batches retrieved from a log file. When a download is
* complete, add a line to the logfile indicating such.
*
* @param {any} db - nodejs-cloudant database
* @param {any} parallelism - number of concurrent requests to make
* @param {any} log - log file to drive downloads from
* @param {any} batches - batches to download
* @param {any} ee - event emitter for progress. This funciton emits
* received and error events.
* @param {any} start - time backup started, to report deltas
* @param {any} grandtotal - count of documents downloaded prior to this set
* of batches
* @param {any} callback - completion callback, (err, {total: number}).
*/
function processBatchSet(db, parallelism, log, batches, ee, start, grandtotal, callback) {
let hasErrored = false;
let total = grandtotal;
// queue to process the fetch requests in an orderly fashion using _bulk_get
const q = async.queue(function(payload, done) {
const output = [];
const thisBatch = payload.batch;
delete payload.batch;
delete payload.command;
function logCompletedBatch(batch) {
if (log) {
fs.appendFile(log, ':d batch' + thisBatch + '\n', done);
} else {
done();
}
}
// do the /db/_bulk_get request
db.service.postBulkGet({
db: db.db,
revs: true,
docs: payload.docs
}).then(response => {
// create an output array with the docs returned
response.result.results.forEach(function(d) {
if (d.docs) {
d.docs.forEach(function(doc) {
if (doc.ok) {
output.push(doc.ok);
}
});
}
});
total += output.length;
const t = (new Date().getTime() - start) / 1000;
ee.emit('received', {
batch: thisBatch,
data: output,
length: output.length,
time: t,
total: total
}, q, logCompletedBatch);
}).catch(err => {
if (!hasErrored) {
hasErrored = true;
err = error.convertResponseError(err);
// Kill the queue for fatal errors
q.kill();
ee.emit('error', err);
}
done();
});
}, parallelism);
for (const i in batches) {
q.push(batches[i]);
}
return missing;
};
const getAllDocs = async function(client, database) {
let allDocIds = [];
const limit = 2000;
let startKey = '\u0000';
do {
const pageOfDocIds = (await client.postAllDocs({ db: database, startKey, limit })).result.rows.map(r => r.id);
allDocIds = allDocIds.concat(pageOfDocIds);
if (pageOfDocIds.length < limit) {
startKey = null;
} else {
startKey = pageOfDocIds[limit - 1] + '\u0000';
}
} while (startKey != null);
return allDocIds;
};
q.drain(function() {
callback(null, { total: total });
});
}
module.exports = {
compare
};
/**
* Returns first N properties on an object.
*
* @param {object} obj - object with properties
* @param {number} count - number of properties to return
*/
function getPropertyNames(obj, count) {
// decide which batch numbers to deal with
const batchestofetch = [];
let j = 0;
for (const i in obj) {
batchestofetch.push(parseInt(i));
j++;
if (j >= count) break;
}
return batchestofetch;
}

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,94 +14,106 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const fs = require('fs');
const { once } = require('node:events');
const u = require('./citestutils.js');
const path = require('path');
const tmp = require('tmp');
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Basic backup and restore', params), function() {
it('should backup animaldb to a file correctly', async function() {
// Allow up to 40 s to backup and compare (it should be much faster)!
u.setTimeout(this, 40);
const actualBackup = `./${this.fileName}`;
// Create a file and backup to it
const output = fs.createWriteStream(actualBackup);
return once(output, 'open')
.then(() => {
return u.testBackup(params, 'animaldb', output);
}).then(() => {
return u.readSortAndDeepEqual(actualBackup, './test/fixtures/animaldb_expected.json');
});
});
/**
Return API default settings.
*/
function apiDefaults() {
return {
parallelism: 5,
bufferSize: 500,
requestTimeout: 120000,
log: tmp.tmpNameSync(),
resume: false,
mode: 'full'
};
}
it('should restore animaldb to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_expected.json');
const dbName = this.dbName;
return once(input, 'open').then(() => {
return u.testRestore(params, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
/**
Return CLI default settings.
*/
function cliDefaults() {
const defaults = apiDefaults();
it('should execute a shallow mode backup successfully', async function() {
// Allow 30 s
u.setTimeout(this, 30);
const actualBackup = `./${this.fileName}`;
const output = fs.createWriteStream(actualBackup);
// Add the shallow mode option
const p = u.p(params, { opts: { mode: 'shallow' } });
return once(output, 'open')
.then(() => {
return u.testBackup(p, 'animaldb', output);
}).then(() => {
return u.readSortAndDeepEqual(actualBackup, './test/fixtures/animaldb_expected_shallow.json');
});
});
// add additional legacy settings
defaults.db = 'test';
defaults.url = 'http://localhost:5984';
describe(u.scenario('Buffer size tests', params), function() {
it('should backup/restore animaldb with the same buffer size', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const p = u.p(params, { opts: { log: logFile, bufferSize: 1 } });
return u.testBackupAndRestoreViaFile(p, 'animaldb', actualBackup, this.dbName);
});
// add CLI only option
defaults.quiet = false;
it('should backup/restore animaldb with backup buffer > restore buffer', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const dbName = this.dbName;
const p = u.p(params, { opts: { log: logFile, bufferSize: 2 } }); // backup
const q = u.p(params, { opts: { bufferSize: 1 } }); // restore
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
return u.testRestoreFromFile(q, actualBackup, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
return defaults;
}
it('should backup/restore animaldb with backup buffer < restore buffer', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const dbName = this.dbName;
const p = u.p(params, { opts: { log: logFile, bufferSize: 1 } }); // backup
const q = u.p(params, { opts: { bufferSize: 2 } }); // restore
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
return u.testRestoreFromFile(q, actualBackup, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
});
});
});
/**
Override settings **in-place** with environment variables.
*/
function applyEnvironmentVariables(opts) {
// if we have a custom CouchDB url
if (typeof process.env.COUCH_URL !== 'undefined') {
opts.url = process.env.COUCH_URL;
}
// if we have a specified databases
if (typeof process.env.COUCH_DATABASE !== 'undefined') {
opts.db = process.env.COUCH_DATABASE;
}
// if we have a specified buffer size
if (typeof process.env.COUCH_BUFFER_SIZE !== 'undefined') {
opts.bufferSize = parseInt(process.env.COUCH_BUFFER_SIZE);
}
// if we have a specified parallelism
if (typeof process.env.COUCH_PARALLELISM !== 'undefined') {
opts.parallelism = parseInt(process.env.COUCH_PARALLELISM);
}
// if we have a specified request timeout
if (typeof process.env.COUCH_REQUEST_TIMEOUT !== 'undefined') {
opts.requestTimeout = parseInt(process.env.COUCH_REQUEST_TIMEOUT);
}
// if we have a specified log file
if (typeof process.env.COUCH_LOG !== 'undefined') {
opts.log = path.normalize(process.env.COUCH_LOG);
}
// if we are instructed to resume
if (typeof process.env.COUCH_RESUME !== 'undefined' && process.env.COUCH_RESUME === 'true') {
opts.resume = true;
}
// if we are given an output filename
if (typeof process.env.COUCH_OUTPUT !== 'undefined') {
opts.output = path.normalize(process.env.COUCH_OUTPUT);
}
// if we only want a shallow copy
if (typeof process.env.COUCH_MODE !== 'undefined' && process.env.COUCH_MODE === 'shallow') {
opts.mode = 'shallow';
}
// if we are instructed to be quiet
if (typeof process.env.COUCH_QUIET !== 'undefined' && process.env.COUCH_QUIET === 'true') {
opts.quiet = true;
}
// if we have a specified API key
if (typeof process.env.CLOUDANT_IAM_API_KEY !== 'undefined') {
opts.iamApiKey = process.env.CLOUDANT_IAM_API_KEY;
}
// if we have a specified IAM token endpoint
if (typeof process.env.CLOUDANT_IAM_TOKEN_URL !== 'undefined') {
opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
module.exports = {
apiDefaults: apiDefaults,
cliDefaults: cliDefaults,
applyEnvironmentVariables: applyEnvironmentVariables
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//

@@ -14,122 +14,47 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it beforeEach */
'use strict';
const assert = require('assert');
const fs = require('fs');
const nock = require('nock');
const request = require('../includes/request.js');
const writer = require('../includes/writer.js');
const noopEmitter = new (require('events')).EventEmitter();
const liner = require('../includes/liner.js');
const { once } = require('node:events');
const { pipeline } = require('node:stream/promises');
const longTestTimeout = 3000;
/**
* Utility methods for the command line interface.
* @module cliutils
* @see module:cliutils
*/
describe('#unit Check database restore writer', function() {
const dbUrl = 'http://localhost:5984/animaldb';
const db = request.client(dbUrl, { parallelism: 1 });
const url = require('url');
const error = require('./error.js');
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
module.exports = {
it('should complete successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, []); // success
/**
* Combine a base URL and a database name, ensuring at least single slash
* between root and database name. This allows users to have Couch behind
* proxies that mount Couch's / endpoint at some other mount point.
* @param {string} root - root URL
* @param {string} databaseName - database name
* @return concatenated URL.
*
* @private
*/
databaseUrl: function databaseUrl(root, databaseName) {
if (!root.endsWith('/')) {
root = root + '/';
}
try {
return new url.URL(encodeURIComponent(databaseName), root).toString();
} catch (err) {
throw error.wrapPossibleInvalidUrlError(err);
}
},
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
});
it('should terminate on a fatal error', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(401, { error: 'Unauthorized' }); // fatal error
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Unauthorized');
assert.strictEqual(err.message, 'Access is denied due to invalid credentials.');
assert.ok(nock.isDone());
return true;
}
);
});
it('should retry on transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(200, { ok: true }); // third time lucky success
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
}).timeout(longTestTimeout);
it('should fail after 3 transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(503, { error: 'Service Unavailable' }); // Final transient error
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 : post ${dbUrl}/_bulk_docs - Error: Service Unavailable`);
assert.ok(nock.isDone());
return true;
}
);
}).timeout(longTestTimeout);
it('should restore shallow backups without rev info successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ ok: true, id: 'foo', rev: '1-abc' }]); // success
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_old_shallow.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 11);
assert.ok(nock.isDone());
})]);
});
it('should get a batch error for non-empty array response with new_edits false', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ id: 'foo', error: 'foo', reason: 'bar' }]);
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Error');
assert.strictEqual(err.message, 'Error writing batch with new_edits:false and 1 items');
assert.ok(nock.isDone());
return true;
}
);
});
});
/**
* Generate CLI argument usage text.
*
* @param {string} description - argument description.
* @param {string} defaultValue - default argument value.
*
* @private
*/
getUsage: function getUsage(description, defaultValue) {
return `${description} ${defaultValue !== undefined ? ` (default: ${defaultValue})` : ''}`;
}
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,53 +14,101 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global beforeEach afterEach */
'use strict';
const { CloudantV1 } = require('@ibm-cloud/cloudant');
const url = new URL((process.env.COUCH_BACKEND_URL) ? process.env.COUCH_BACKEND_URL : 'https://no-couch-backend-url-set.test');
const { BasicAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const authenticator = (url.username) ? new BasicAuthenticator({ username: url.username, password: decodeURIComponent(url.password) }) : new NoAuthAuthenticator();
const serviceOpts = {
authenticator: authenticator
// fatal errors
const codes = {
Error: 1,
InvalidOption: 2,
DatabaseNotFound: 10,
Unauthorized: 11,
Forbidden: 12,
DatabaseNotEmpty: 13,
NoLogFileName: 20,
LogDoesNotExist: 21,
IncompleteChangesInLogFile: 22,
SpoolChangesError: 30,
HTTPFatalError: 40,
BulkGetError: 50
};
const cloudant = new CloudantV1(serviceOpts);
// Remove auth from URL before using for service
cloudant.setServiceUrl(new URL(url.pathname, url.origin).toString());
const uuid = require('uuid').v4;
const fs = require('fs');
// Mocha hooks that will be at the root context so run for all tests
class BackupError extends Error {
constructor(name, message) {
super(message);
this.name = name;
}
}
beforeEach('Create test database', async function() {
// Don't run hook for unit tests, just for CI
if (!this.currentTest.fullTitle().includes('#unit')) {
// Allow 10 seconds to create the DB
this.timeout(10 * 1000);
const unique = uuid();
this.fileName = `${unique}`;
this.dbName = 'couchbackup_test_' + unique;
class HTTPError extends BackupError {
constructor(responseError, name) {
// Special case some names for more useful error messages
switch (responseError.status) {
case 401:
name = 'Unauthorized';
break;
case 403:
name = 'Forbidden';
break;
default:
name = name || 'HTTPFatalError';
}
super(name, responseError.message);
}
}
return cloudant.putDatabase({ db: this.dbName });
// Default function to return an error for HTTP status codes
// < 400 -> OK
// 4XX (except 429) -> Fatal
// 429 & >=500 -> Transient
function checkResponse(err) {
if (err) {
// Construct an HTTPError if there is request information on the error
// Codes < 400 are considered OK
if (err.status >= 400) {
return new HTTPError(err);
} else {
// Send it back again if there was no status code, e.g. a cxn error
return augmentMessage(err);
}
}
});
}
afterEach('Delete test database', async function() {
// Don't run hook for unit tests, just for CI
if (!this.currentTest.fullTitle().includes('#unit')) {
// Allow 10 seconds to delete the DB
this.timeout(10 * 1000);
deleteIfExists(this.fileName);
deleteIfExists(`${this.fileName}.log`);
return cloudant.deleteDatabase({ db: this.dbName });
function convertResponseError(responseError, errorFactory) {
if (!errorFactory) {
errorFactory = checkResponse;
}
});
return errorFactory(responseError);
}
function deleteIfExists(fileName) {
fs.unlink(fileName, function(err) {
function augmentMessage(err) {
// For errors that don't have a status code, we are likely looking at a cxn
// error.
// Try to augment the message with more detail (core puts the code in statusText)
if (err && err.statusText) {
err.message = `${err.message} ${err.statusText}`;
}
if (err && err.description) {
err.message = `${err.message} ${err.description}`;
}
return err;
}
function wrapPossibleInvalidUrlError(err) {
if (err.code === 'ERR_INVALID_URL') {
// Wrap ERR_INVALID_URL in our own InvalidOption
return new BackupError('InvalidOption', err.message);
}
return err;
}
module.exports = {
BackupError,
HTTPError,
wrapPossibleInvalidUrlError,
convertResponseError,
terminationCallback: function terminationCallback(err, data) {
if (err) {
if (err.code !== 'ENOENT') {
console.error(`${err.code} ${err.message}`);
}
console.error(`ERROR: ${err.message}`);
process.exitCode = codes[err.name] || 1;
process.exit();
}
});
}
}
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,28 +14,152 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const assert = require('assert');
const logfilesummary = require('../includes/logfilesummary.js');
const async = require('async');
const stream = require('stream');
const error = require('./error.js');
const debug = require('debug')('couchbackup:writer');
describe('#unit Fetching summary from the log file', function() {
it('should fetch a summary correctly', function() {
return new Promise((resolve, reject) => {
logfilesummary('./test/fixtures/test.log', function(err, data) {
try {
assert.ok(!err);
assert.ok(data);
assert.strictEqual(data.changesComplete, true);
assert.strictEqual(typeof data.batches, 'object');
assert.strictEqual(Object.keys(data.batches).length, 2);
assert.deepStrictEqual(data.batches['1'], true);
assert.deepStrictEqual(data.batches['4'], true);
resolve();
} catch (err) {
reject(err);
module.exports = function(db, bufferSize, parallelism, ee) {
const writer = new stream.Transform({ objectMode: true });
let buffer = [];
let written = 0;
let linenumber = 0;
// this is the queue of chunks that are written to the database
// the queue's payload will be an array of documents to be written,
// the size of the array will be bufferSize. The variable parallelism
// determines how many HTTP requests will occur at any one time.
const q = async.queue(function(payload, cb) {
// if we are restoring known revisions, we need to supply new_edits=false
if (payload.docs && payload.docs[0] && payload.docs[0]._rev) {
payload.new_edits = false;
debug('Using new_edits false mode.');
}
if (!didError) {
db.service.postBulkDocs({
db: db.db,
bulkDocs: payload
}).then(response => {
if (!response.result || (payload.new_edits === false && response.result.length > 0)) {
throw new Error(`Error writing batch with new_edits:${payload.new_edits !== false}` +
` and ${response.result ? response.result.length : 'unavailable'} items`);
}
written += payload.docs.length;
writer.emit('restored', { documents: payload.docs.length, total: written });
cb();
}).catch(err => {
err = error.convertResponseError(err);
debug(`Error writing docs ${err.name} ${err.message}`);
cb(err, payload);
});
});
});
});
}
}, parallelism);
let didError = false;
// write the contents of the buffer to CouchDB in blocks of bufferSize
function processBuffer(flush, callback) {
function taskCallback(err, payload) {
if (err && !didError) {
debug(`Queue task failed with error ${err.name}`);
didError = true;
q.kill();
writer.emit('error', err);
}
}
if (flush || buffer.length >= bufferSize) {
// work through the buffer to break off bufferSize chunks
// and feed the chunks to the queue
do {
// split the buffer into bufferSize chunks
const toSend = buffer.splice(0, bufferSize);
// and add the chunk to the queue
debug(`Adding ${toSend.length} to the write queue.`);
q.push({ docs: toSend }, taskCallback);
} while (buffer.length >= bufferSize);
// send any leftover documents to the queue
if (flush && buffer.length > 0) {
debug(`Adding remaining ${buffer.length} to the write queue.`);
q.push({ docs: buffer }, taskCallback);
}
// wait until the queue size falls to a reasonable level
async.until(
// wait until the queue length drops to twice the paralellism
// or until empty on the last write
function(callback) {
// if we encountered an error, stop this until loop
if (didError) {
return callback(null, true);
}
if (flush) {
callback(null, q.idle() && q.length() === 0);
} else {
callback(null, q.length() <= parallelism * 2);
}
},
function(cb) {
setTimeout(cb, 20);
},
function() {
if (flush && !didError) {
writer.emit('finished', { total: written });
}
// callback when we're happy with the queue size
callback();
});
} else {
callback();
}
}
// take an object
writer._transform = function(obj, encoding, done) {
// each obj that arrives here is a line from the backup file
// it should contain an array of objects. The length of the array
// depends on the bufferSize at backup time.
linenumber++;
if (!didError && obj !== '') {
// see if it parses as JSON
try {
const arr = JSON.parse(obj);
// if it's an array with a length
if (typeof arr === 'object' && arr.length > 0) {
// push each document into a buffer
buffer = buffer.concat(arr);
// pause the stream
// it's likely that the speed with which data can be read from disk
// may exceed the rate it can be written to CouchDB. To prevent
// the whole file being buffered in memory, we pause the stream here.
// it is resumed, when processBuffer calls back and we call done()
this.pause();
// break the buffer in to bufferSize chunks to be written to the database
processBuffer(false, done);
} else {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - not an array`));
done();
}
} catch (e) {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - cannot parse as JSON`));
// Could be an incomplete write that was subsequently resumed
done();
}
} else {
done();
}
};
// called when we need to flush everything
writer._flush = function(done) {
processBuffer(true, done);
};
return writer;
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,29 +14,166 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const assert = require('assert');
const logfilegetbatches = require('../includes/logfilegetbatches.js');
const pkg = require('../package.json');
const stream = require('stream');
const { CloudantV1, CouchdbSessionAuthenticator } = require('@ibm-cloud/cloudant');
const { IamAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const retryPlugin = require('retry-axios');
describe('#unit Fetching batches from a log file', function() {
it('should fetch multiple batches correctly', async function() {
return new Promise((resolve, reject) => {
logfilegetbatches('./test/fixtures/test.log', [1, 4], function(err, data) {
try {
assert.ok(!err);
assert.ok(data);
assert.strictEqual(typeof data, 'object');
assert.strictEqual(Object.keys(data).length, 2);
assert.deepStrictEqual(data['1'].docs, [{ id: '6' }, { id: '7' }, { id: '8' }, { id: '9' }, { id: '10' }]);
assert.strictEqual(data['1'].batch, 1);
assert.deepStrictEqual(data['4'].docs, [{ id: '21' }, { id: '22' }]);
assert.strictEqual(data['4'].batch, 4);
resolve();
} catch (err) {
reject(err);
const userAgent = 'couchbackup-cloudant/' + pkg.version + ' (Node.js ' +
process.version + ')';
// Class for streaming _changes error responses into
// In general the response is a small error/reason JSON object
// so it is OK to have this in memory.
class ResponseWriteable extends stream.Writable {
constructor(options) {
super(options);
this.data = [];
}
_write(chunk, encoding, callback) {
this.data.push(chunk);
callback();
}
stringBody() {
return Buffer.concat(this.data).toString();
}
}
// An interceptor function to help augment error bodies with a little
// extra information so we can continue to use consistent messaging
// after the ugprade to @ibm-cloud/cloudant
const errorHelper = async function(err) {
let method;
let requestUrl;
if (err.response) {
if (err.response.config.url) {
requestUrl = err.response.config.url;
method = err.response.config.method;
}
// Override the status text with an improved message
let errorMsg = `${err.response.status} ${err.response.statusText || ''}: ` +
`${method} ${requestUrl}`;
if (err.response.data) {
// Check if we have a JSON response and try to get the error/reason
if (err.response.headers['content-type'] === 'application/json') {
if (!err.response.data.error && err.response.data.pipe) {
// If we didn't find a JSON object with `error` then we might have a stream response.
// Detect the stream by the presence of `pipe` and use it to get the body and parse
// the error information.
const p = new Promise((resolve, reject) => {
const errorBody = new ResponseWriteable();
err.response.data.pipe(errorBody)
.on('finish', () => { resolve(JSON.parse(errorBody.stringBody())); })
.on('error', () => { reject(err); });
});
// Replace the stream on the response with the parsed object
err.response.data = await p;
}
// Append the error/reason if available
if (err.response.data.error) {
// Override the status text with our more complete message
errorMsg += ` - Error: ${err.response.data.error}`;
if (err.response.data.reason) {
errorMsg += `, Reason: ${err.response.data.reason}`;
}
}
} else {
errorMsg += err.response.data;
}
// Set a new message for use by the node-sdk-core
// We use the errors array because it gets processed
// ahead of all other service errors.
err.response.data.errors = [{ message: errorMsg }];
}
} else if (err.request) {
if (!err.message.includes(err.config.url)) {
// Augment the message with the URL and method
// but don't do it again if we already have the URL.
err.message = `${err.message}: ${err.config.method} ${err.config.url}`;
}
}
return Promise.reject(err);
};
module.exports = {
client: function(rawUrl, opts) {
const url = new URL(rawUrl);
// Split the URL to separate service from database
// Use origin as the "base" to remove auth elements
const actUrl = new URL(url.pathname.substring(0, url.pathname.lastIndexOf('/')), url.origin);
const dbName = url.pathname.substring(url.pathname.lastIndexOf('/') + 1);
let authenticator;
// Default to cookieauth unless an IAM key is provided
if (opts.iamApiKey) {
const iamAuthOpts = { apikey: opts.iamApiKey };
if (opts.iamTokenUrl) {
iamAuthOpts.url = opts.iamTokenUrl;
}
authenticator = new IamAuthenticator(iamAuthOpts);
} else if (url.username) {
authenticator = new CouchdbSessionAuthenticator({
username: decodeURIComponent(url.username),
password: decodeURIComponent(url.password)
});
});
});
});
} else {
authenticator = new NoAuthAuthenticator();
}
const serviceOpts = {
authenticator: authenticator,
timeout: opts.requestTimeout,
// Axios performance options
maxContentLength: -1
};
const service = new CloudantV1(serviceOpts);
// Configure retries
const maxRetries = 2; // for 3 total attempts
service.getHttpClient().defaults.raxConfig = {
// retries for status codes
retry: maxRetries,
// retries for non-response e.g. ETIMEDOUT
noResponseRetries: maxRetries,
backoffType: 'exponential',
httpMethodsToRetry: ['GET', 'HEAD', 'POST'],
statusCodesToRetry: [
[429, 429],
[500, 599]
],
shouldRetry: err => {
const cfg = retryPlugin.getConfig(err);
// cap at max retries regardless of response/non-response type
if (cfg.currentRetryAttempt >= maxRetries) {
return false;
} else {
return retryPlugin.shouldRetryRequest(err);
}
},
instance: service.getHttpClient()
};
retryPlugin.attach(service.getHttpClient());
service.setServiceUrl(actUrl.toString());
if (authenticator instanceof CouchdbSessionAuthenticator) {
// Awkward workaround for known Couch issue with compression on _session requests
// It is not feasible to disable compression on all requests with the amount of
// data this lib needs to move, so override the property in the tokenManager instance.
authenticator.tokenManager.requestWrapperInstance.compressRequestData = false;
}
if (authenticator.tokenManager && authenticator.tokenManager.requestWrapperInstance) {
authenticator.tokenManager.requestWrapperInstance.axiosInstance.interceptors.response.use(null, errorHelper);
}
// Add error interceptors to put URLs in error messages
service.getHttpClient().interceptors.response.use(null, errorHelper);
// Add request interceptor to add user-agent (adding it with custom request headers gets overwritten)
service.getHttpClient().interceptors.request.use(function(requestConfig) {
requestConfig.headers['User-Agent'] = userAgent;
return requestConfig;
}, null);
return { service: service, db: dbName, url: actUrl.toString() };
}
};
<testsuites name="test">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:36:16" time="0.086">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.001">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:08" time="0.093">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.002">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.022">
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.025">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0.001">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0">
</testcase>

@@ -23,3 +23,3 @@ <testcase classname="test.#unit Validate arguments" name="returns error for invalid buffer size type" time="0">

</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.005">
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.006">
</testcase>

@@ -32,5 +32,5 @@ <testcase classname="test.#unit Validate arguments" name="returns error for invalid log type" time="0">

</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode string" time="0.001">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode string" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.004">
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.005">
</testcase>

@@ -47,3 +47,3 @@ <testcase classname="test.#unit Validate arguments" name="returns error for invalid output type" time="0">

</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.003">
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.004">
</testcase>

@@ -54,15 +54,15 @@ <testcase classname="test.#unit Validate arguments" name="returns error for invalid request timeout type" time="0">

</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float request timout" time="0">
<testcase classname="test.#unit Validate arguments" name="returns error for float request timout" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0.001">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid resume type" time="0.004">
<testcase classname="test.#unit Validate arguments" name="returns no error for valid resume type" time="0.007">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid key type" time="0">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid key type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.007">
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.004">
</testcase>

@@ -74,134 +74,134 @@ <testcase classname="test.#unit Validate arguments" name="warns for resume arg in shallow mode" time="0.004">

</testsuite>
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:36:16" time="4.499">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.958">
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:08" time="4.506">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.937">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.792">
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.827">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.631">
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.63">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:36:20" time="10.334">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="2.543">
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:12" time="10.394">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="2.628">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.47">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.451">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.536">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.523">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:36:31" time="5.907">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.604">
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:23" time="5.52">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.354">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.341">
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.301">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.165">
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.059">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:36:37" time="13.613">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.668">
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:28" time="12.954">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.417">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.521">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.336">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.629">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.406">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:36:50" time="5.381">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.9">
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:41" time="5.411">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.902">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.535">
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.552">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.155">
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.163">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:36:56" time="8.294">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.926">
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:47" time="7.289">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.43">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.774">
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.395">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.811">
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.668">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:37:04" time="300.302">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.178">
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:54" time="305.581">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.199">
</testcase>
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="296.984">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="302.587">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:42:04" time="479.558">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="3.589">
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:50:00" time="465.717">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.718">
</testcase>
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="475.211">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="462.235">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:04" time="2.946">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.68">
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:57:45" time="2.618">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.358">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:07" time="0.274">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.011">
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:57:48" time="0.257">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.007">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:07" time="2.32">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.891">
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:57:48" time="2.247">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.856">
</testcase>
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.89">
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.878">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:09" time="5.319">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.891">
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:57:50" time="5.207">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.868">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.837">
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.766">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.792">
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.793">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:15" time="6.88">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.446">
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:57:56" time="6.669">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.328">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.331">
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.298">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.295">
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.258">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:22" time="33.576">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="16.563">
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:02" time="32.6">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="16.319">
</testcase>
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.45">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="15.754">
</testcase>
</testsuite>
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:55" time="0.008">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0.001">
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:35" time="0.009">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_DATABASE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_BUFFER_SIZE env variable" time="0">
<testcase classname="test.#unit Configuration" name="respects the COUCH_BUFFER_SIZE env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_PARALLELISM env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_REQUEST_TIMEOUT env variable" time="0">
<testcase classname="test.#unit Configuration" name="respects the COUCH_REQUEST_TIMEOUT env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_API_KEY env variable" time="0.001">
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_API_KEY env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_TOKEN_URL env variable" time="0">
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_TOKEN_URL env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_LOG env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_RESUME env variable" time="0">
<testcase classname="test.#unit Configuration" name="respects the COUCH_RESUME env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_OUTPUT env variable" time="0.001">
<testcase classname="test.#unit Configuration" name="respects the COUCH_OUTPUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_MODE env variable" time="0">
<testcase classname="test.#unit Configuration" name="respects the COUCH_MODE env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0">
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:55" time="0.086">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.011">
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:35" time="0.085">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.008">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.007">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.009">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.014">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.015">
</testcase>

@@ -214,81 +214,81 @@ <testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on NoLogFileName" time="0.001">

</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.012">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.015">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.012">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.015">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:55" time="0.108">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.006">
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:35" time="0.12">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.007">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.011">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.012">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on RestoreDatabaseNotFound" time="0.004">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on RestoreDatabaseNotFound" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.003">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.006">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.003">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.03">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.035">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.025">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.026">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.01">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.009">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:55" time="5.33">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.978">
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:35" time="3.425">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.32">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.476">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.349">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.462">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.378">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.522">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.316">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.511">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.374">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.44">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.293">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.417">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.285">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.481">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.37">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.497">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.371">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.533">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.359">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:51:01" time="4.559">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.474">
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:38" time="3.292">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.351">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.525">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.382">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.485">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.333">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.47">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.318">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.467">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.39">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.491">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.367">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.517">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.384">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.592">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.38">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.523">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.376">
</testcase>
</testsuite>
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:51:05" time="0.003">
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:42" time="0.003">
<testcase classname="test.#unit Fetching batches from a log file" name="should fetch multiple batches correctly" time="0.002">
</testcase>
</testsuite>
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:51:05" time="0.002">
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:42" time="0.002">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:51:05" time="0.036">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.013">
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:42" time="0.039">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.015">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing" time="0">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing" time="0.001">
</testcase>

@@ -303,9 +303,9 @@ <testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing" time="0.001">

</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0.001">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0.001">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0.001">
</testcase>

@@ -316,7 +316,7 @@ <testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0.001">

</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --db command-line parameter" time="0.001">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --db command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0.001">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0.001">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0">
</testcase>

@@ -327,5 +327,5 @@ <testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --request-timeout command-line parameter" time="0">

</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0.001">
</testcase>

@@ -341,3 +341,3 @@ <testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0.001">

</testsuite>
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:51:05" time="0.012">
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:42" time="0.013">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0.001">

@@ -347,3 +347,3 @@ </testcase>

</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0.001">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0">
</testcase>

@@ -354,3 +354,3 @@ <testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0">

</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0.001">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0">
</testcase>

@@ -361,5 +361,5 @@ <testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0.001">

</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0.001">
</testcase>

@@ -370,3 +370,3 @@ <testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0">

</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0.001">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0">
</testcase>

@@ -376,20 +376,20 @@ <testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --quiet command-line parameter" time="0">

</testsuite>
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:51:05" time="0.004">
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:42" time="0.004">
<testcase classname="test.#unit Check request headers" name="should have a couchbackup user-agent" time="0.004">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:51:05" time="10.602">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.003">
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:42" time="10.593">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.004">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.015">
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.014">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.019">
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.018">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.015">
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.016">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with fatal error for 404 response" time="0.005">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with same error for no status code error response" time="2.015">
<testcase classname="test.#unit Check request response error callback" name="should callback with same error for no status code error response" time="2.014">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.509">
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.507">
</testcase>

@@ -399,38 +399,38 @@ <testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.01">

</testsuite>
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:51:16" time="0.013">
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:52" time="0.012">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.012">
</testcase>
</testsuite>
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:51:16" time="0.564">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.021">
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:52" time="0.562">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.02">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.526">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.524">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.016">
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.014">
</testcase>
</testsuite>
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:51:17" time="14.933">
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:53" time="15.269">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.014">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.017">
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.019">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="1.845">
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="1.848">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="9.055">
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="9.357">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:51:31" time="35.469">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="35.191">
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:59:08" time="42.618">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="42.354">
</testcase>
</testsuite>
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:52:07" time="4.097">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.023">
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:59:51" time="4.116">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.025">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should terminate on a fatal error" time="0.008">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.017">
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.034">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.015">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.023">
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.022">
</testcase>

@@ -437,0 +437,0 @@ <testcase classname="test.#unit Check database restore writer" name="should get a batch error for non-empty array response with new_edits false" time="0.007">

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -14,34 +14,317 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe afterEach before after it */
'use strict';
// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
const assert = require('assert');
const parser = require('../includes/parser.js');
module.exports = function() {
const liner = new stream.Transform({ objectMode: true });
describe('#unit Default parameters', function() {
let processEnvCopy;
let processArgvCopy;
liner._transform = function(chunk, encoding, done) {
let data = chunk.toString();
if (this._lastLineData) {
data = this._lastLineData + data;
}
before('Set process data for test', function() {
// Copy env and argv so we can reset them after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
processArgvCopy = JSON.parse(JSON.stringify(process.argv));
const lines = data.split('\n');
this._lastLineData = lines.splice(lines.length - 1, 1)[0];
// setup environment variables
process.env.COUCH_URL = 'http://user:pass@myurl.com';
process.env.COUCH_DATABASE = 'mydb';
process.env.COUCH_BUFFER_SIZE = '1000';
process.env.COUCH_PARALLELISM = '20';
process.env.COUCH_REQUEST_TIMEOUT = '20000';
process.env.COUCH_LOG = 'my.log';
process.env.COUCH_RESUME = 'true';
process.env.COUCH_OUTPUT = 'myfile.txt';
process.env.COUCH_MODE = 'shallow';
process.env.CLOUDANT_IAM_API_KEY = 'ABC123-ZYX987_cba789-xyz321';
process.env.COUCH_QUIET = 'true';
});
for (const i in lines) {
this.push(lines[i]);
}
done();
};
after('Reset process data', function() {
process.env = processEnvCopy;
process.argv = processArgvCopy;
});
liner._flush = function(done) {
if (this._lastLineData) {
this.push(this._lastLineData);
}
this._lastLineData = null;
done();
};
afterEach(function() {
delete require.cache[require.resolve('commander')];
});
return liner;
};
describe('Backup command-line', function() {
it('respects the COUCH_URL env variable if the --url backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, process.env.COUCH_URL);
});
it('respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, process.env.COUCH_DATABASE);
});
it('respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, parseInt(process.env.COUCH_BUFFER_SIZE, 10));
});
it('respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parseInt(process.env.COUCH_PARALLELISM, 10));
});
it('respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, parseInt(process.env.COUCH_REQUEST_TIMEOUT, 10));
});
it('respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, process.env.CLOUDANT_IAM_API_KEY);
});
it('respects the COUCH_LOG env variable if the --log backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.log, 'string');
assert.strictEqual(program.log, process.env.COUCH_LOG);
});
it('respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.resume, 'boolean');
assert.strictEqual(program.resume, true);
});
it('respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.output, 'string');
assert.strictEqual(program.output, process.env.COUCH_OUTPUT);
});
it('respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, process.env.COUCH_MODE);
});
it('respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
it('respects the backup --url command-line parameter', function() {
const url = 'http://user:pass@myurl2.com';
process.argv = ['node', 'test', '--url', url];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, url);
});
it('respects the backup --db command-line parameter', function() {
const db = 'mydb2';
process.argv = ['node', 'test', '--db', db];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, db);
});
it('respects the backup --buffer-size command-line parameter', function() {
const bufferSize = 500;
process.argv = ['node', 'test', '--buffer-size', bufferSize];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, bufferSize);
});
it('respects the backup --parallelism command-line parameter', function() {
const parallelism = 10;
process.argv = ['node', 'test', '--parallelism', parallelism];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parallelism);
});
it('respects the backup --request-timeout command-line parameter', function() {
const requestTimeout = 10000;
process.argv = ['node', 'test', '--request-timeout', requestTimeout];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, requestTimeout);
});
it('respects the backup --iam-api-key command-line parameter', function() {
const key = '123abc-789zyx_CBA987-XYZ321';
process.argv = ['node', 'test', '--iam-api-key', key];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, key);
});
it('respects the backup --log command-line parameter', function() {
const filename = 'my2.log';
process.argv = ['node', 'test', '--log', filename];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.log, 'string');
assert.strictEqual(program.log, filename);
});
it('respects the backup --resume command-line parameter', function() {
process.argv = ['node', 'test', '--resume'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.resume, 'boolean');
assert.strictEqual(program.resume, true);
});
it('respects the backup --output command-line parameter', function() {
const filename = 'myfile2.txt';
process.argv = ['node', 'test', '--output', filename];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.output, 'string');
assert.strictEqual(program.output, filename);
});
it('respects the backup --mode full command-line parameter', function() {
process.argv = ['node', 'test', '--mode', 'full'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, 'full');
});
it('respects the backup --mode shallow command-line parameter', function() {
process.argv = ['node', 'test', '--mode', 'shallow'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, 'shallow');
});
it('respects the backup --quiet command-line parameter', function() {
process.argv = ['node', 'test', '--quiet'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
});
describe('Restore command-line', function() {
it('respects the COUCH_URL env variable if the --url restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, process.env.COUCH_URL);
});
it('respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, process.env.COUCH_DATABASE);
});
it('respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, parseInt(process.env.COUCH_BUFFER_SIZE, 10));
});
it('respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parseInt(process.env.COUCH_PARALLELISM, 10));
});
it('respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, parseInt(process.env.COUCH_REQUEST_TIMEOUT, 10));
});
it('respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, process.env.CLOUDANT_IAM_API_KEY);
});
it('respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
it('respects the restore --url command-line parameter', function() {
const url = 'https://a:b@myurl3.com';
process.argv = ['node', 'test', '--url', url];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, url);
});
it('respects the restore --db command-line parameter', function() {
const db = 'mydb3';
process.argv = ['node', 'test', '--db', db];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, db);
});
it('respects the restore --buffer-size command-line parameter', function() {
const bufferSize = 250;
process.argv = ['node', 'test', '--buffer-size', bufferSize];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, bufferSize);
});
it('respects the restore --parallelism command-line parameter', function() {
const parallelism = 5;
process.argv = ['node', 'test', '--parallelism', parallelism];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parallelism);
});
it('respects the restore --request-timeout command-line parameter', function() {
const requestTimeout = 10000;
process.argv = ['node', 'test', '--request-timeout', requestTimeout];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, requestTimeout);
});
it('respects the restore --iam-api-key command-line parameter', function() {
const key = '123abc-789zyx_CBA987-XYZ321';
process.argv = ['node', 'test', '--iam-api-key', key];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, key);
});
it('respects the restore --quiet command-line parameter', function() {
process.argv = ['node', 'test', '--quiet'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2022 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -14,68 +14,28 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const async = require('async');
const error = require('./error.js');
const events = require('events');
const assert = require('assert');
const logfilesummary = require('../includes/logfilesummary.js');
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime();
let batch = 0;
let hasErrored = false;
let startKey = null;
let total = 0;
async.doUntil(
function(callback) {
// Note, include_docs: true is set automatically when using the
// fetch function.
const opts = { db: db.db, limit: options.bufferSize, includeDocs: true };
// To avoid double fetching a document solely for the purposes of getting
// the next ID to use as a startKey for the next page we instead use the
// last ID of the current page and append the lowest unicode sort
// character.
if (startKey) opts.startKey = `${startKey}\0`;
db.service.postAllDocs(opts).then(response => {
const body = response.result;
if (!body.rows) {
ee.emit('error', new error.BackupError(
'AllDocsError', 'ERROR: Invalid all docs response'));
callback();
} else {
if (body.rows.length < opts.limit) {
startKey = null; // last batch
} else {
startKey = body.rows[opts.limit - 1].id;
}
const docs = [];
body.rows.forEach(function(doc) {
docs.push(doc.doc);
});
if (docs.length > 0) {
ee.emit('received', {
batch: batch++,
data: docs,
length: docs.length,
time: (new Date().getTime() - start) / 1000,
total: total += docs.length
});
}
callback();
describe('#unit Fetching summary from the log file', function() {
it('should fetch a summary correctly', function() {
return new Promise((resolve, reject) => {
logfilesummary('./test/fixtures/test.log', function(err, data) {
try {
assert.ok(!err);
assert.ok(data);
assert.strictEqual(data.changesComplete, true);
assert.strictEqual(typeof data.batches, 'object');
assert.strictEqual(Object.keys(data.batches).length, 2);
assert.deepStrictEqual(data.batches['1'], true);
assert.deepStrictEqual(data.batches['4'], true);
resolve();
} catch (err) {
reject(err);
}
}).catch(err => {
err = error.convertResponseError(err);
ee.emit('error', err);
hasErrored = true;
callback();
});
},
function(callback) { callback(null, hasErrored || startKey == null); },
function() { ee.emit('finished', { total: total }); }
);
return ee;
};
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2018 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -14,47 +14,171 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it beforeEach */
'use strict';
/**
* Utility methods for the command line interface.
* @module cliutils
* @see module:cliutils
*/
const assert = require('assert');
const backup = require('../includes/shallowbackup.js');
const request = require('../includes/request.js');
const fs = require('fs');
const nock = require('nock');
const url = require('url');
const error = require('./error.js');
// Function to create a DB object and call the shallow backup function
// This is normally done by app.js
function shallowBackup(dbUrl, opts) {
const db = request.client(dbUrl, opts);
// Disable compression to make body assertions easier
db.service.setEnableGzipCompression(false);
return backup(db, opts);
}
module.exports = {
// Note all these tests include a body parameter of include_docs and a query
// string of include_docs because of a quirk of nano that when using the fetch
// method always adds the include_docs query string.
describe('#unit Perform backup using shallow backup', function() {
const dbUrl = 'http://localhost:5984/animaldb';
// Query string keys are stringified by Nano
const badgerKey = 'badger\0';
const kookaburraKey = 'kookaburra\0';
const snipeKey = 'snipe\0';
/**
* Combine a base URL and a database name, ensuring at least single slash
* between root and database name. This allows users to have Couch behind
* proxies that mount Couch's / endpoint at some other mount point.
* @param {string} root - root URL
* @param {string} databaseName - database name
* @return concatenated URL.
*
* @private
*/
databaseUrl: function databaseUrl(root, databaseName) {
if (!root.endsWith('/')) {
root = root + '/';
}
try {
return new url.URL(encodeURIComponent(databaseName), root).toString();
} catch (err) {
throw error.wrapPossibleInvalidUrlError(err);
}
},
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
/**
* Generate CLI argument usage text.
*
* @param {string} description - argument description.
* @param {string} defaultValue - default argument value.
*
* @private
*/
getUsage: function getUsage(description, defaultValue) {
return `${description} ${defaultValue !== undefined ? ` (default: ${defaultValue})` : ''}`;
}
};
it('should perform a shallow backup', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_3.json', 'utf8')))
// batch 4
.post('/_all_docs', { limit: 3, start_key: snipeKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_4.json', 'utf8')));
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
reject(err);
})
.on('received', function(data) {
try {
if (data.batch === 3) {
assert.strictEqual(data.length, 2); // smaller last batch
} else {
assert.strictEqual(data.length, 3);
}
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 11);
assert.ok(couch.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
it('should perform a shallow backup with transient error', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3 - transient error
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(500, { error: 'Internal Server Error' })
// batch 3 - retry
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_3.json', 'utf8')))
// batch 4
.post('/_all_docs', { limit: 3, start_key: snipeKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_4.json', 'utf8')));
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
try {
assert.strictEqual(err.name, 'HTTPError');
} catch (err) {
reject(err);
}
})
.on('received', function(data) {
try {
if (data.batch === 3) {
assert.strictEqual(data.length, 2); // smaller last batch
} else {
assert.strictEqual(data.length, 3);
}
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 11);
assert.ok(couch.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
it('should fail to perform a shallow backup on fatal error', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3 - fatal error
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(401, { error: 'Unauthorized' });
let errCount = 0;
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
try {
errCount++;
assert.strictEqual(err.name, 'Unauthorized');
} catch (err) {
reject(err);
}
})
.on('received', function(data) {
try {
assert.strictEqual(data.length, 3);
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 6);
assert.ok(couch.isDone());
assert.strictEqual(errCount, 1);
resolve();
} catch (err) {
reject(err);
}
});
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -14,166 +14,25 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const pkg = require('../package.json');
const stream = require('stream');
const { CloudantV1, CouchdbSessionAuthenticator } = require('@ibm-cloud/cloudant');
const { IamAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const retryPlugin = require('retry-axios');
delete require.cache[require.resolve('./citestutils.js')];
const u = require('./citestutils.js');
const userAgent = 'couchbackup-cloudant/' + pkg.version + ' (Node.js ' +
process.version + ')';
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('End to end backup and restore', params), function() {
it('should backup and restore animaldb', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
return u.testDirectBackupAndRestore(params, 'animaldb', this.dbName);
});
// Class for streaming _changes error responses into
// In general the response is a small error/reason JSON object
// so it is OK to have this in memory.
class ResponseWriteable extends stream.Writable {
constructor(options) {
super(options);
this.data = [];
}
_write(chunk, encoding, callback) {
this.data.push(chunk);
callback();
}
stringBody() {
return Buffer.concat(this.data).toString();
}
}
// An interceptor function to help augment error bodies with a little
// extra information so we can continue to use consistent messaging
// after the ugprade to @ibm-cloud/cloudant
const errorHelper = async function(err) {
let method;
let requestUrl;
if (err.response) {
if (err.response.config.url) {
requestUrl = err.response.config.url;
method = err.response.config.method;
}
// Override the status text with an improved message
let errorMsg = `${err.response.status} ${err.response.statusText || ''}: ` +
`${method} ${requestUrl}`;
if (err.response.data) {
// Check if we have a JSON response and try to get the error/reason
if (err.response.headers['content-type'] === 'application/json') {
if (!err.response.data.error && err.response.data.pipe) {
// If we didn't find a JSON object with `error` then we might have a stream response.
// Detect the stream by the presence of `pipe` and use it to get the body and parse
// the error information.
const p = new Promise((resolve, reject) => {
const errorBody = new ResponseWriteable();
err.response.data.pipe(errorBody)
.on('finish', () => { resolve(JSON.parse(errorBody.stringBody())); })
.on('error', () => { reject(err); });
});
// Replace the stream on the response with the parsed object
err.response.data = await p;
}
// Append the error/reason if available
if (err.response.data.error) {
// Override the status text with our more complete message
errorMsg += ` - Error: ${err.response.data.error}`;
if (err.response.data.reason) {
errorMsg += `, Reason: ${err.response.data.reason}`;
}
}
} else {
errorMsg += err.response.data;
}
// Set a new message for use by the node-sdk-core
// We use the errors array because it gets processed
// ahead of all other service errors.
err.response.data.errors = [{ message: errorMsg }];
}
} else if (err.request) {
if (!err.message.includes(err.config.url)) {
// Augment the message with the URL and method
// but don't do it again if we already have the URL.
err.message = `${err.message}: ${err.config.method} ${err.config.url}`;
}
}
return Promise.reject(err);
};
module.exports = {
client: function(rawUrl, opts) {
const url = new URL(rawUrl);
// Split the URL to separate service from database
// Use origin as the "base" to remove auth elements
const actUrl = new URL(url.pathname.substring(0, url.pathname.lastIndexOf('/')), url.origin);
const dbName = url.pathname.substring(url.pathname.lastIndexOf('/') + 1);
let authenticator;
// Default to cookieauth unless an IAM key is provided
if (opts.iamApiKey) {
const iamAuthOpts = { apikey: opts.iamApiKey };
if (opts.iamTokenUrl) {
iamAuthOpts.url = opts.iamTokenUrl;
}
authenticator = new IamAuthenticator(iamAuthOpts);
} else if (url.username) {
authenticator = new CouchdbSessionAuthenticator({
username: decodeURIComponent(url.username),
password: decodeURIComponent(url.password)
});
} else {
authenticator = new NoAuthAuthenticator();
}
const serviceOpts = {
authenticator: authenticator,
timeout: opts.requestTimeout,
// Axios performance options
maxContentLength: -1
};
const service = new CloudantV1(serviceOpts);
// Configure retries
const maxRetries = 2; // for 3 total attempts
service.getHttpClient().defaults.raxConfig = {
// retries for status codes
retry: maxRetries,
// retries for non-response e.g. ETIMEDOUT
noResponseRetries: maxRetries,
backoffType: 'exponential',
httpMethodsToRetry: ['GET', 'HEAD', 'POST'],
statusCodesToRetry: [
[429, 429],
[500, 599]
],
shouldRetry: err => {
const cfg = retryPlugin.getConfig(err);
// cap at max retries regardless of response/non-response type
if (cfg.currentRetryAttempt >= maxRetries) {
return false;
} else {
return retryPlugin.shouldRetryRequest(err);
}
},
instance: service.getHttpClient()
};
retryPlugin.attach(service.getHttpClient());
service.setServiceUrl(actUrl.toString());
if (authenticator instanceof CouchdbSessionAuthenticator) {
// Awkward workaround for known Couch issue with compression on _session requests
// It is not feasible to disable compression on all requests with the amount of
// data this lib needs to move, so override the property in the tokenManager instance.
authenticator.tokenManager.requestWrapperInstance.compressRequestData = false;
}
if (authenticator.tokenManager && authenticator.tokenManager.requestWrapperInstance) {
authenticator.tokenManager.requestWrapperInstance.axiosInstance.interceptors.response.use(null, errorHelper);
}
// Add error interceptors to put URLs in error messages
service.getHttpClient().interceptors.response.use(null, errorHelper);
// Add request interceptor to add user-agent (adding it with custom request headers gets overwritten)
service.getHttpClient().interceptors.request.use(function(requestConfig) {
requestConfig.headers['User-Agent'] = userAgent;
return requestConfig;
}, null);
return { service: service, db: dbName, url: actUrl.toString() };
}
};
it('should backup and restore largedb1g #slow', async function() {
// Allow up to 30 m for backup and restore of largedb1g
// This is a long time but when many builds run in parallel it can take a
// while to get this done.
u.setTimeout(this, 30 * 60);
return u.testDirectBackupAndRestore(params, 'largedb1g', this.dbName);
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -14,106 +14,118 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it before after */
'use strict';
const path = require('path');
const tmp = require('tmp');
const assert = require('assert');
const applyEnvVars = require('../includes/config.js').applyEnvironmentVariables;
/**
Return API default settings.
*/
function apiDefaults() {
return {
parallelism: 5,
bufferSize: 500,
requestTimeout: 120000,
log: tmp.tmpNameSync(),
resume: false,
mode: 'full'
};
}
describe('#unit Configuration', function() {
let processEnvCopy;
/**
Return CLI default settings.
*/
function cliDefaults() {
const defaults = apiDefaults();
before('Save env', function() {
// Copy env so we can reset it after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
});
// add additional legacy settings
defaults.db = 'test';
defaults.url = 'http://localhost:5984';
after('Reset env', function() {
process.env = processEnvCopy;
});
// add CLI only option
defaults.quiet = false;
it('respects the COUCH_URL env variable', function() {
process.env.COUCH_URL = 'http://user:pass@myurl.com';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.url, 'string');
assert.strictEqual(config.url, process.env.COUCH_URL);
});
return defaults;
}
it('respects the COUCH_DATABASE env variable', function() {
process.env.COUCH_DATABASE = 'mydb';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.db, 'string');
assert.strictEqual(config.db, process.env.COUCH_DATABASE);
});
/**
Override settings **in-place** with environment variables.
*/
function applyEnvironmentVariables(opts) {
// if we have a custom CouchDB url
if (typeof process.env.COUCH_URL !== 'undefined') {
opts.url = process.env.COUCH_URL;
}
it('respects the COUCH_BUFFER_SIZE env variable', function() {
process.env.COUCH_BUFFER_SIZE = '1000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.bufferSize, 'number');
assert.strictEqual(config.bufferSize, 1000);
});
// if we have a specified databases
if (typeof process.env.COUCH_DATABASE !== 'undefined') {
opts.db = process.env.COUCH_DATABASE;
}
it('respects the COUCH_PARALLELISM env variable', function() {
process.env.COUCH_PARALLELISM = '20';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.parallelism, 'number');
assert.strictEqual(config.parallelism, 20);
});
// if we have a specified buffer size
if (typeof process.env.COUCH_BUFFER_SIZE !== 'undefined') {
opts.bufferSize = parseInt(process.env.COUCH_BUFFER_SIZE);
}
it('respects the COUCH_REQUEST_TIMEOUT env variable', function() {
process.env.COUCH_REQUEST_TIMEOUT = '10000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.requestTimeout, 'number');
assert.strictEqual(config.requestTimeout, 10000);
});
// if we have a specified parallelism
if (typeof process.env.COUCH_PARALLELISM !== 'undefined') {
opts.parallelism = parseInt(process.env.COUCH_PARALLELISM);
}
it('respects the CLOUDANT_IAM_API_KEY env variable', function() {
const key = 'ABC123-ZYX987_cba789-xyz321';
process.env.CLOUDANT_IAM_API_KEY = key;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamApiKey, 'string');
assert.strictEqual(config.iamApiKey, key);
});
// if we have a specified request timeout
if (typeof process.env.COUCH_REQUEST_TIMEOUT !== 'undefined') {
opts.requestTimeout = parseInt(process.env.COUCH_REQUEST_TIMEOUT);
}
it('respects the CLOUDANT_IAM_TOKEN_URL env variable', function() {
const u = 'https://testhost.example:1234/identity/token';
process.env.CLOUDANT_IAM_TOKEN_URL = u;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamTokenUrl, 'string');
assert.strictEqual(config.iamTokenUrl, u);
});
// if we have a specified log file
if (typeof process.env.COUCH_LOG !== 'undefined') {
opts.log = path.normalize(process.env.COUCH_LOG);
}
it('respects the COUCH_LOG env variable', function() {
process.env.COUCH_LOG = 'my.log';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.log, 'string');
assert.strictEqual(config.log, process.env.COUCH_LOG);
});
// if we are instructed to resume
if (typeof process.env.COUCH_RESUME !== 'undefined' && process.env.COUCH_RESUME === 'true') {
opts.resume = true;
}
it('respects the COUCH_RESUME env variable', function() {
process.env.COUCH_RESUME = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.resume, 'boolean');
assert.strictEqual(config.resume, true);
});
// if we are given an output filename
if (typeof process.env.COUCH_OUTPUT !== 'undefined') {
opts.output = path.normalize(process.env.COUCH_OUTPUT);
}
it('respects the COUCH_OUTPUT env variable', function() {
process.env.COUCH_OUTPUT = 'myfile.txt';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.output, 'string');
assert.strictEqual(config.output, process.env.COUCH_OUTPUT);
});
// if we only want a shallow copy
if (typeof process.env.COUCH_MODE !== 'undefined' && process.env.COUCH_MODE === 'shallow') {
opts.mode = 'shallow';
}
it('respects the COUCH_MODE env variable', function() {
process.env.COUCH_MODE = 'shallow';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.mode, 'string');
assert.strictEqual(config.mode, 'shallow');
});
// if we are instructed to be quiet
if (typeof process.env.COUCH_QUIET !== 'undefined' && process.env.COUCH_QUIET === 'true') {
opts.quiet = true;
}
// if we have a specified API key
if (typeof process.env.CLOUDANT_IAM_API_KEY !== 'undefined') {
opts.iamApiKey = process.env.CLOUDANT_IAM_API_KEY;
}
// if we have a specified IAM token endpoint
if (typeof process.env.CLOUDANT_IAM_TOKEN_URL !== 'undefined') {
opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
module.exports = {
apiDefaults: apiDefaults,
cliDefaults: cliDefaults,
applyEnvironmentVariables: applyEnvironmentVariables
};
it('respects the COUCH_QUIET env variable', function() {
process.env.COUCH_QUIET = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.quiet, 'boolean');
assert.strictEqual(config.quiet, true);
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -14,29 +14,17 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
const u = require('./citestutils.js');
module.exports = function(onChange) {
const change = new stream.Transform({ objectMode: true });
change._transform = function(line, encoding, done) {
let obj = null;
// one change per line - remove the trailing comma
line = line.trim().replace(/,$/, '');
// extract thee last_seq at the end of the changes feed
if (line.match(/^"last_seq":/)) {
line = '{' + line;
}
try {
obj = JSON.parse(line);
} catch (e) {
}
onChange(obj);
done();
};
return change;
};
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('#slowest End to end backup and restore', params), function() {
// 10 GB is about the largest the CI can handle before getting very upset
// about how long things are taking
it('should backup and restore largedb10g', async function() {
u.setTimeout(this, 350 * 60);
return u.testDirectBackupAndRestore(params, 'largedb10g', this.dbName);
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -14,101 +14,50 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
// fatal errors
const codes = {
Error: 1,
InvalidOption: 2,
DatabaseNotFound: 10,
Unauthorized: 11,
Forbidden: 12,
DatabaseNotEmpty: 13,
NoLogFileName: 20,
LogDoesNotExist: 21,
IncompleteChangesInLogFile: 22,
SpoolChangesError: 30,
HTTPFatalError: 40,
BulkGetError: 50
};
const fs = require('fs');
const { once } = require('node:events');
const u = require('./citestutils.js');
class BackupError extends Error {
constructor(name, message) {
super(message);
this.name = name;
}
}
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Compression tests', params), function() {
const p = u.p(params, { compression: true });
class HTTPError extends BackupError {
constructor(responseError, name) {
// Special case some names for more useful error messages
switch (responseError.status) {
case 401:
name = 'Unauthorized';
break;
case 403:
name = 'Forbidden';
break;
default:
name = name || 'HTTPFatalError';
}
super(name, responseError.message);
}
}
it('should backup animaldb to a compressed file', async function() {
// Allow up to 60 s for backup of animaldb
u.setTimeout(this, 60);
const compressedBackup = `./${this.fileName}`;
const output = fs.createWriteStream(compressedBackup);
return once(output, 'open')
.then(() => {
return u.testBackup(p, 'animaldb', output);
}).then(() => {
return u.assertGzipFile(compressedBackup);
});
});
// Default function to return an error for HTTP status codes
// < 400 -> OK
// 4XX (except 429) -> Fatal
// 429 & >=500 -> Transient
function checkResponse(err) {
if (err) {
// Construct an HTTPError if there is request information on the error
// Codes < 400 are considered OK
if (err.status >= 400) {
return new HTTPError(err);
} else {
// Send it back again if there was no status code, e.g. a cxn error
return augmentMessage(err);
}
}
}
it('should backup and restore animaldb via a compressed file', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const compressedBackup = `./${this.fileName}`;
return u.testBackupAndRestoreViaFile(p, 'animaldb', compressedBackup, this.dbName).then(() => {
return u.assertGzipFile(compressedBackup);
});
});
function convertResponseError(responseError, errorFactory) {
if (!errorFactory) {
errorFactory = checkResponse;
}
return errorFactory(responseError);
}
it('should backup and restore animaldb via a compressed stream', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
return u.testDirectBackupAndRestore(p, 'animaldb', this.dbName);
});
function augmentMessage(err) {
// For errors that don't have a status code, we are likely looking at a cxn
// error.
// Try to augment the message with more detail (core puts the code in statusText)
if (err && err.statusText) {
err.message = `${err.message} ${err.statusText}`;
}
if (err && err.description) {
err.message = `${err.message} ${err.description}`;
}
return err;
}
function wrapPossibleInvalidUrlError(err) {
if (err.code === 'ERR_INVALID_URL') {
// Wrap ERR_INVALID_URL in our own InvalidOption
return new BackupError('InvalidOption', err.message);
}
return err;
}
module.exports = {
BackupError,
HTTPError,
wrapPossibleInvalidUrlError,
convertResponseError,
terminationCallback: function terminationCallback(err, data) {
if (err) {
console.error(`ERROR: ${err.message}`);
process.exitCode = codes[err.name] || 1;
process.exit();
}
}
};
it('should backup and restore largedb2g via a compressed file #slower', async function() {
// Takes ~ 25 min using CLI, but sometimes over an hour with API
u.setTimeout(this, 180 * 60);
const compressedBackup = `./${this.fileName}`;
params.compression = true;
return u.testBackupAndRestoreViaFile(p, 'largedb2g', compressedBackup, this.dbName);
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2018, 2023 IBM Corp. All rights reserved.
//

@@ -14,152 +14,63 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const async = require('async');
const stream = require('stream');
const error = require('./error.js');
const debug = require('debug')('couchbackup:writer');
const fs = require('fs');
const { once } = require('node:events');
const readline = require('readline');
const u = require('./citestutils.js');
const uuid = require('uuid').v4;
module.exports = function(db, bufferSize, parallelism, ee) {
const writer = new stream.Transform({ objectMode: true });
let buffer = [];
let written = 0;
let linenumber = 0;
const params = { useApi: true };
// this is the queue of chunks that are written to the database
// the queue's payload will be an array of documents to be written,
// the size of the array will be bufferSize. The variable parallelism
// determines how many HTTP requests will occur at any one time.
const q = async.queue(function(payload, cb) {
// if we are restoring known revisions, we need to supply new_edits=false
if (payload.docs && payload.docs[0] && payload.docs[0]._rev) {
payload.new_edits = false;
debug('Using new_edits false mode.');
}
describe(u.scenario('Concurrent database backups', params), function() {
it('should run concurrent API database backups correctly #slower', async function() {
// Allow up to 900 s to backup and compare (it should be much faster)!
u.setTimeout(this, 900);
if (!didError) {
db.service.postBulkDocs({
db: db.db,
bulkDocs: payload
}).then(response => {
if (!response.result || (payload.new_edits === false && response.result.length > 0)) {
throw new Error(`Error writing batch with new_edits:${payload.new_edits !== false}` +
` and ${response.result ? response.result.length : 'unavailable'} items`);
const checkForEmptyBatches = async function(fileName) {
let foundEmptyBatch = false;
const rd = readline.createInterface({
input: fs.createReadStream(fileName),
output: fs.createWriteStream('/dev/null'),
terminal: false
});
rd.on('line', function(line) {
if (JSON.parse(line).length === 0) {
// Note: Empty batch arrays indicate that the running backup is
// incorrectly sharing a log file with another ongoing backup job.
foundEmptyBatch = true;
}
written += payload.docs.length;
writer.emit('restored', { documents: payload.docs.length, total: written });
cb();
}).catch(err => {
err = error.convertResponseError(err);
debug(`Error writing docs ${err.name} ${err.message}`);
cb(err, payload);
});
}
}, parallelism);
let didError = false;
rd.on('close', function() {
if (foundEmptyBatch) {
return Promise.reject(new Error(`Log file '${fileName}' contains empty batches`));
} else {
return Promise.resolve();
}
});
};
// write the contents of the buffer to CouchDB in blocks of bufferSize
function processBuffer(flush, callback) {
function taskCallback(err, payload) {
if (err && !didError) {
debug(`Queue task failed with error ${err.name}`);
didError = true;
q.kill();
writer.emit('error', err);
}
}
const backupPromise = async function() {
const actualBackup = `./${uuid()}`;
const output = fs.createWriteStream(actualBackup);
return once(output, 'open').then(() => {
return u.testBackup(params, 'largedb1g', output);
}).then(() => {
return checkForEmptyBatches(actualBackup);
});
};
if (flush || buffer.length >= bufferSize) {
// work through the buffer to break off bufferSize chunks
// and feed the chunks to the queue
do {
// split the buffer into bufferSize chunks
const toSend = buffer.splice(0, bufferSize);
// [1] Run 'largedb1g' database backup
const backup1 = backupPromise();
// and add the chunk to the queue
debug(`Adding ${toSend.length} to the write queue.`);
q.push({ docs: toSend }, taskCallback);
} while (buffer.length >= bufferSize);
// [2] Run 'largedb1g' database backup
const backup2 = backupPromise();
// send any leftover documents to the queue
if (flush && buffer.length > 0) {
debug(`Adding remaining ${buffer.length} to the write queue.`);
q.push({ docs: buffer }, taskCallback);
}
// wait until the queue size falls to a reasonable level
async.until(
// wait until the queue length drops to twice the paralellism
// or until empty on the last write
function(callback) {
// if we encountered an error, stop this until loop
if (didError) {
return callback(null, true);
}
if (flush) {
callback(null, q.idle() && q.length() === 0);
} else {
callback(null, q.length() <= parallelism * 2);
}
},
function(cb) {
setTimeout(cb, 20);
},
function() {
if (flush && !didError) {
writer.emit('finished', { total: written });
}
// callback when we're happy with the queue size
callback();
});
} else {
callback();
}
}
// take an object
writer._transform = function(obj, encoding, done) {
// each obj that arrives here is a line from the backup file
// it should contain an array of objects. The length of the array
// depends on the bufferSize at backup time.
linenumber++;
if (!didError && obj !== '') {
// see if it parses as JSON
try {
const arr = JSON.parse(obj);
// if it's an array with a length
if (typeof arr === 'object' && arr.length > 0) {
// push each document into a buffer
buffer = buffer.concat(arr);
// pause the stream
// it's likely that the speed with which data can be read from disk
// may exceed the rate it can be written to CouchDB. To prevent
// the whole file being buffered in memory, we pause the stream here.
// it is resumed, when processBuffer calls back and we call done()
this.pause();
// break the buffer in to bufferSize chunks to be written to the database
processBuffer(false, done);
} else {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - not an array`));
done();
}
} catch (e) {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - cannot parse as JSON`));
// Could be an incomplete write that was subsequently resumed
done();
}
} else {
done();
}
};
// called when we need to flush everything
writer._flush = function(done) {
processBuffer(true, done);
};
return writer;
};
return Promise.all([backup1, backup2]);
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -14,269 +14,281 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it before after beforeEach */
'use strict';
const async = require('async');
const events = require('events');
const assert = require('assert');
const fs = require('fs');
const error = require('./error.js');
const spoolchanges = require('./spoolchanges.js');
const logfilesummary = require('./logfilesummary.js');
const logfilegetbatches = require('./logfilegetbatches.js');
const u = require('./citestutils.js');
const mockServerPort = +process.env.COUCHBACKUP_MOCK_SERVER_PORT || 7777;
const { once } = require('node:events');
const url = `http://localhost:${mockServerPort}`;
const nock = require('nock');
const httpProxy = require('http-proxy');
const Readable = require('stream').Readable;
/**
* Read documents from a database to be backed up.
*
* @param {string} db - `@cloudant/cloudant` DB object for source database.
* @param {number} blocksize - number of documents to download in single request
* @param {number} parallelism - number of concurrent downloads
* @param {string} log - path to log file to use
* @param {boolean} resume - whether to resume from an existing log file
* @returns EventEmitter with following events:
* - `received` - called with a block of documents to write to backup
* - `error` - on error
* - `finished` - when backup process is finished (either complete or errored)
*/
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime(); // backup start time
const batchesPerDownloadSession = 50; // max batches to read from log file for download at a time (prevent OOM)
// Create an infinite stream to read.
// It just keeps sending a backup line, useful for testing cases of
// termination while a stream has content remaining (the animaldb backup
// is too small for that).
class InfiniteBackupStream extends Readable {
constructor(opt) {
super(opt);
this.contents = Buffer.from('[{"_id":"giraffe","_rev":"3-7665c3e66315ff40616cceef62886bd8","min_weight":830,"min_length":5,"max_weight":1600,"max_length":6,"wiki_page":"http://en.wikipedia.org/wiki/Giraffe","class":"mammal","diet":"herbivore","_revisions":{"start":3,"ids":["7665c3e66315ff40616cceef62886bd8","aaaf10d5a68cdf22d95a5482a0e95549","967a00dff5e02add41819138abb3284d"]}}]\n', 'utf8');
}
function proceedWithBackup() {
if (options.resume) {
// pick up from existing log file from previous run
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
} else {
// create new log file and process
spoolchanges(db, options.log, options.bufferSize, ee, function(err) {
if (err) {
ee.emit('error', err);
} else {
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
}
});
}
_read() {
let proceed;
do {
proceed = this.push(this.contents);
} while (proceed);
}
}
validateBulkGetSupport(db, function(err) {
if (err) {
return ee.emit('error', err);
} else {
proceedWithBackup();
}
function assertNock() {
try {
assert.ok(nock.isDone());
} catch (err) {
console.error('pending mocks: %j', nock.pendingMocks());
throw err;
}
}
function testPromiseWithAssertNock(testPromise) {
return testPromise.finally(() => {
assertNock();
});
}
return ee;
};
async function backupHttpError(opts, errorName, errorCode) {
const p = u.p(opts, { expectedBackupError: { name: errorName, code: errorCode } });
/**
* Validate /_bulk_get support for a specified database.
*
* @param {string} db - nodejs-cloudant db
* @param {function} callback - called on completion with signature (err)
*/
function validateBulkGetSupport(db, callback) {
db.service.postBulkGet({ db: db.db, docs: [] }).then(() => { callback(); }).catch(err => {
err = error.convertResponseError(err, function(err) {
switch (err.status) {
case undefined:
// There was no status code on the error
return err;
case 404:
return new error.BackupError('BulkGetError', 'Database does not support /_bulk_get endpoint');
default:
return new error.HTTPError(err);
}
// Create a file and attempt a backup to it
const output = fs.createWriteStream('/dev/null');
return once(output, 'open')
.then(() => {
return testPromiseWithAssertNock(u.testBackup(p, 'fakenockdb', output));
});
callback(err);
});
}
/**
* Download remaining batches in a log file, splitting batches into sets
* to avoid enqueueing too many in one go.
*
* @param {string} log - log file name to maintain download state
* @param {string} db - nodejs-cloudant db
* @param {events.EventEmitter} ee - event emitter to emit received events on
* @param {time} startTime - start time for backup process
* @param {number} batchesPerDownloadSession - max batches to enqueue for
* download at a time. As batches contain many doc IDs, this helps avoid
* exhausting memory.
* @param {number} parallelism - number of concurrent downloads
* @returns function to call do download remaining batches with signature
* (err, {batches: batch, docs: doccount}) {@see spoolchanges}.
*/
function downloadRemainingBatches(log, db, ee, startTime, batchesPerDownloadSession, parallelism) {
let total = 0; // running total of documents downloaded so far
let noRemainingBatches = false;
async function restoreHttpError(opts, errorName, errorCode) {
const q = u.p(opts, { expectedRestoreError: { name: errorName, code: errorCode } });
return testPromiseWithAssertNock(u.testRestoreFromFile(q, './test/fixtures/animaldb_expected.json', 'fakenockdb'));
}
// Generate a set of batches (up to batchesPerDownloadSession) to download from the
// log file and download them. Set noRemainingBatches to `true` for last batch.
function downloadSingleBatchSet(done) {
// Fetch the doc IDs for the batches in the current set to
// download them.
function batchSetComplete(err, data) {
if (!err) {
total = data.total;
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('#unit Fatal errors', params), function() {
// These tests do real requests with mocks and if they run slowly
// the 2 second default mocha timeout can be insufficient, use 10 s
this.timeout(10000);
let processEnvCopy;
let proxy;
before('Set process data for test', function() {
const proxyPort = mockServerPort + 1000;
// Copy env and argv so we can reset them after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
// Set up a proxy to point to our nock server because the nock override
// isn't visible to the spawned CLI process
if (!params.useApi) {
proxy = httpProxy.createProxyServer({ target: url }).listen(proxyPort, 'localhost');
proxy.on('error', (err, req, res) => {
console.log(`Proxy received error ${err}`);
res.writeHead(400, {
'Content-Type': 'application/json'
});
res.end(JSON.stringify(err));
});
}
done(err);
}
function processRetrievedBatches(err, batches) {
if (!err) {
// process them in parallelised queue
processBatchSet(db, parallelism, log, batches, ee, startTime, total, batchSetComplete);
// setup environment variables
process.env.COUCH_URL = (params.useApi) ? url : `http://localhost:${proxyPort}`;
nock.emitter.on('no match', (req, opts) => {
console.error(`Unmatched nock request ${opts.method} ${opts.protocol}${opts.host}${opts.path}`);
});
});
after('Reset process data', function(done) {
process.env = processEnvCopy;
nock.emitter.removeAllListeners();
if (!params.useApi) {
proxy.close(done);
} else {
batchSetComplete(err);
}
}
readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, function(err, batchSetIds) {
if (err) {
ee.emit('error', err);
// Stop processing changes file for fatal errors
noRemainingBatches = true;
done();
} else {
if (batchSetIds.length === 0) {
noRemainingBatches = true;
return done();
}
logfilegetbatches(log, batchSetIds, processRetrievedBatches);
}
});
}
// Return true if all batches in log file have been downloaded
function isFinished(callback) { callback(null, noRemainingBatches); }
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
function onComplete() {
ee.emit('finished', { total: total });
}
describe('for backup', function() {
it('should terminate when DB does not exist', function() {
// Simulate existence check
nock(url).head('/fakenockdb').reply(404, { error: 'not_found', reason: 'missing' });
return backupHttpError(params, 'DatabaseNotFound', 10);
});
async.doUntil(downloadSingleBatchSet, isFinished, onComplete);
}
it('should terminate on BulkGetError', function() {
// Simulate existence check
const n = nock(url).head('/fakenockdb').reply(200);
// Simulate _bulk_get not available
n.post('/fakenockdb/_bulk_get').reply(404, { error: 'not_found', reason: 'missing' });
return backupHttpError(params, 'BulkGetError', 50);
});
/**
* Return a set of uncompleted download batch IDs from the log file.
*
* @param {string} log - log file path
* @param {number} batchesPerDownloadSession - maximum IDs to return
* @param {function} callback - sign (err, batchSetIds array)
*/
function readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, callback) {
logfilesummary(log, function processSummary(err, summary) {
if (!err) {
if (!summary.changesComplete) {
callback(new error.BackupError('IncompleteChangesInLogFile',
'WARNING: Changes did not finish spooling'));
return;
}
if (Object.keys(summary.batches).length === 0) {
return callback(null, []);
}
it('should terminate on Unauthorized existence check', function() {
// Simulate a 401
nock(url).head('/fakenockdb').reply(401, { error: 'unauthorized', reason: '_reader access is required for this request' });
return backupHttpError(params, 'Unauthorized', 11);
});
// batch IDs are the property names of summary.batches
const batchSetIds = getPropertyNames(summary.batches, batchesPerDownloadSession);
callback(null, batchSetIds);
} else {
callback(err);
}
});
}
it('should terminate on Forbidden no _reader', function() {
// Simulate a 403
nock(url).head('/fakenockdb').reply(403, { error: 'forbidden', reason: '_reader access is required for this request' });
return backupHttpError(params, 'Forbidden', 12);
});
/**
* Download a set of batches retrieved from a log file. When a download is
* complete, add a line to the logfile indicating such.
*
* @param {any} db - nodejs-cloudant database
* @param {any} parallelism - number of concurrent requests to make
* @param {any} log - log file to drive downloads from
* @param {any} batches - batches to download
* @param {any} ee - event emitter for progress. This funciton emits
* received and error events.
* @param {any} start - time backup started, to report deltas
* @param {any} grandtotal - count of documents downloaded prior to this set
* of batches
* @param {any} callback - completion callback, (err, {total: number}).
*/
function processBatchSet(db, parallelism, log, batches, ee, start, grandtotal, callback) {
let hasErrored = false;
let total = grandtotal;
it('should terminate on _bulk_get HTTPFatalError', function() {
// Provide a mock complete changes log to allow a resume to skip ahead
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/test.log' } });
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a fatal HTTP error when trying to fetch docs
// Note: 2 outstanding batches, so 2 responses, 1 mock is optional because we can't guarantee timing
n.post('/fakenockdb/_bulk_get').query(true).reply(400, { error: 'bad_request', reason: 'testing bad response' });
n.post('/fakenockdb/_bulk_get').query(true).optionally().reply(400, { error: 'bad_request', reason: 'testing bad response' });
return backupHttpError(p, 'HTTPFatalError', 40);
});
// queue to process the fetch requests in an orderly fashion using _bulk_get
const q = async.queue(function(payload, done) {
const output = [];
const thisBatch = payload.batch;
delete payload.batch;
delete payload.command;
it('should terminate on NoLogFileName', function() {
// Don't supply a log file name with resume
const p = u.p(params, { opts: { resume: true } });
return backupHttpError(p, 'NoLogFileName', 20);
});
function logCompletedBatch(batch) {
if (log) {
fs.appendFile(log, ':d batch' + thisBatch + '\n', done);
} else {
done();
}
}
it('should terminate on LogDoesNotExist', function() {
// Use a non-existent log file
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/doesnotexist.log' } });
return backupHttpError(p, 'LogDoesNotExist', 21);
});
// do the /db/_bulk_get request
db.service.postBulkGet({
db: db.db,
revs: true,
docs: payload.docs
}).then(response => {
// create an output array with the docs returned
response.result.results.forEach(function(d) {
if (d.docs) {
d.docs.forEach(function(doc) {
if (doc.ok) {
output.push(doc.ok);
}
it('should terminate on IncompleteChangesInLogFile', function() {
// Use an incomplete changes log file
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/incomplete_changes.log' } });
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Should fail when it reads the incomplete changes
return backupHttpError(p, 'IncompleteChangesInLogFile', 22);
});
it('should terminate on _changes HTTPFatalError', function() {
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a fatal HTTP error when trying to fetch docs (note 2 outstanding batches)
n.post('/fakenockdb/_changes').query(true).reply(400, { error: 'bad_request', reason: 'testing bad response' });
return backupHttpError(params, 'HTTPFatalError', 40);
});
it('should terminate on SpoolChangesError', function() {
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a changes without a last_seq
n.post('/fakenockdb/_changes').query(true).reply(200,
{
results: [{
seq: '2-g1AAAAEbeJzLYWBgYMlgTmFQSElKzi9KdUhJstTLTS3KLElMT9VLzskvTUnMK9HLSy3JAapkSmRIsv___39WBnMiUy5QgN3MzDIxOdEMWb85dv0gSxThigyN8diS5AAkk-pBFiUyoOkzxKMvjwVIMjQAKaDW_Zh6TQnqPQDRC7I3CwDPDV1k',
id: 'badger',
changes: [{ rev: '4-51aa94e4b0ef37271082033bba52b850' }]
}]
});
}
return backupHttpError(params, 'SpoolChangesError', 30);
});
total += output.length;
const t = (new Date().getTime() - start) / 1000;
ee.emit('received', {
batch: thisBatch,
data: output,
length: output.length,
time: t,
total: total
}, q, logCompletedBatch);
}).catch(err => {
if (!hasErrored) {
hasErrored = true;
err = error.convertResponseError(err);
// Kill the queue for fatal errors
q.kill();
ee.emit('error', err);
}
done();
});
}, parallelism);
for (const i in batches) {
q.push(batches[i]);
}
describe('for restore', function() {
it('should terminate on Unauthorized db existence check', function() {
// Simulate a 401
nock(url).get('/fakenockdb').reply(401, { error: 'unauthorized', reason: '_reader access is required for this request' });
return restoreHttpError(params, 'Unauthorized', 11);
});
q.drain(function() {
callback(null, { total: total });
it('should terminate on Forbidden no _writer', function() {
// Simulate the DB exists (i.e. you can read it)
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 403 trying to write
n.post('/fakenockdb/_bulk_docs').reply(403, { error: 'forbidden', reason: '_writer access is required for this request' });
return restoreHttpError(params, 'Forbidden', 12);
});
it('should terminate on RestoreDatabaseNotFound', function() {
// Simulate the DB does not exist
nock(url).get('/fakenockdb').reply(404, { error: 'not_found', reason: 'Database does not exist.' });
return restoreHttpError(params, 'DatabaseNotFound', 10);
});
it('should terminate on notEmptyDBErr when database is not empty', function() {
// Simulate the DB that does exist and not empty
nock(url).get('/fakenockdb').reply(200, { doc_count: 10, doc_del_count: 0 });
return restoreHttpError(params, 'DatabaseNotEmpty', 13);
});
it('should terminate on notEmptyDBErr when database is not new', function() {
// Simulate the DB that does exist and not new
nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 10 });
return restoreHttpError(params, 'DatabaseNotEmpty', 13);
});
it('should terminate on _bulk_docs HTTPFatalError', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Use a parallelism of one and mock one response
const p = u.p(params, { opts: { parallelism: 1 } });
// Simulate a 400 trying to write
n.post('/fakenockdb/_bulk_docs').reply(400, { error: 'bad_request', reason: 'testing bad response' });
return restoreHttpError(p, 'HTTPFatalError', 40);
});
it('should terminate on _bulk_docs HTTPFatalError from system database', function() {
// Simulate that target database exists and is _not_ empty.
// This should pass validator as we exclude system databases from the check.
const n = nock(url).get('/_replicator').reply(200, { doc_count: 1, doc_del_count: 0 });
// Simulate a 400 trying to write
n.post('/_replicator/_bulk_docs').reply(400, { error: 'bad_request', reason: 'testing bad response' });
// Use a parallelism of one and mock one response
const q = u.p(params, { opts: { parallelism: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return testPromiseWithAssertNock(u.testRestore(q, new InfiniteBackupStream(), '_replicator'));
});
it('should terminate on _bulk_docs HTTPFatalError large stream', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 400 trying to write
// Provide a body function to handle the stream, but allow any body
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).reply(400, { error: 'bad_request', reason: 'testing bad response' });
// Use only parallelism 1 so we don't have to mock up loads of responses
const q = u.p(params, { opts: { parallelism: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return testPromiseWithAssertNock(u.testRestore(q, new InfiniteBackupStream(), 'fakenockdb'));
});
it('should terminate on multiple _bulk_docs HTTPFatalError', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 400 trying to write docs, 5 times because of default parallelism
// Provide a body function to handle the stream, but allow any body
// Four of the mocks are optional because of parallelism 5 we can't guarantee that the exit will happen
// after all 5 requests, but we must get at least one of them
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).reply(400, { error: 'bad_request', reason: 'testing bad response' });
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).times(4).optionally().reply(400, { error: 'bad_request', reason: 'testing bad response' });
const q = u.p(params, { opts: { bufferSize: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return restoreHttpError(q, 'HTTPFatalError', 40);
});
});
});
}
/**
* Returns first N properties on an object.
*
* @param {object} obj - object with properties
* @param {number} count - number of properties to return
*/
function getPropertyNames(obj, count) {
// decide which batch numbers to deal with
const batchestofetch = [];
let j = 0;
for (const i in obj) {
batchestofetch.push(parseInt(i));
j++;
if (j >= count) break;
}
return batchestofetch;
}
});

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -14,80 +14,122 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it beforeEach */
'use strict';
const assert = require('assert');
const fs = require('fs');
const stream = require('stream');
const liner = require('./liner.js');
const nock = require('nock');
const request = require('../includes/request.js');
const writer = require('../includes/writer.js');
const noopEmitter = new (require('events')).EventEmitter();
const liner = require('../includes/liner.js');
const { once } = require('node:events');
const { pipeline } = require('node:stream/promises');
const longTestTimeout = 3000;
const onLine = function(onCommand, getDocs) {
const change = new stream.Transform({ objectMode: true });
describe('#unit Check database restore writer', function() {
const dbUrl = 'http://localhost:5984/animaldb';
const db = request.client(dbUrl, { parallelism: 1 });
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
let matches;
it('should complete successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, []); // success
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
});
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
it('should terminate on a fatal error', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(401, { error: 'Unauthorized' }); // fatal error
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Unauthorized');
assert.strictEqual(err.message, 'Access is denied due to invalid credentials.');
assert.ok(nock.isDone());
return true;
}
);
});
// extract doc ids
if (getDocs && obj.command === 't') {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
it('should retry on transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(200, { ok: true }); // third time lucky success
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
}).timeout(longTestTimeout);
it('should fail after 3 transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(503, { error: 'Service Unavailable' }); // Final transient error
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 : post ${dbUrl}/_bulk_docs - Error: Service Unavailable`);
assert.ok(nock.isDone());
return true;
}
onCommand(obj);
}
done();
};
return change;
};
);
}).timeout(longTestTimeout);
/**
* Generate a list of remaining batches from a download file.
*
* @param {string} log - log file name
* @param {function} callback - callback with err, {changesComplete: N, batches: N}.
* changesComplete signifies whether the log file appeared to
* have completed reading the changes feed (contains :changes_complete).
* batches are remaining batch IDs for download.
*/
module.exports = function(log, callback) {
// our sense of state
const state = {
it('should restore shallow backups without rev info successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ ok: true, id: 'foo', rev: '1-abc' }]); // success
};
let changesComplete = false;
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_old_shallow.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 11);
assert.ok(nock.isDone());
})]);
});
// called with each line from the log file
const onCommand = function(obj) {
if (obj.command === 't') {
state[obj.batch] = true;
} else if (obj.command === 'd') {
delete state[obj.batch];
} else if (obj.command === 'changes_complete') {
changesComplete = true;
}
};
it('should get a batch error for non-empty array response with new_edits false', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ id: 'foo', error: 'foo', reason: 'bar' }]);
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, false))
.on('finish', function() {
const obj = { changesComplete: changesComplete, batches: state };
callback(null, obj);
});
};
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Error');
assert.strictEqual(err.message, 'Error writing batch with new_edits:false and 1 items');
assert.ok(nock.isDone());
return true;
}
);
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -14,63 +14,201 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it beforeEach */
'use strict';
const fs = require('fs');
const stream = require('stream');
const liner = require('./liner.js');
const assert = require('assert');
const nock = require('nock');
const request = require('../includes/request.js');
const error = require('../includes/error.js');
const onLine = function(onCommand, batches) {
const change = new stream.Transform({ objectMode: true });
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
const url = 'http://localhost:7777/testdb';
const db = request.client(url, { parallelism: 1 });
const timeoutDb = request.client(url, { parallelism: 1, requestTimeout: 500 });
const longTestTimeout = 3000;
let matches;
beforeEach('Clean nock', function() {
nock.cleanAll();
});
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
describe('#unit Check request headers', function() {
it('should have a couchbackup user-agent', async function() {
const couch = nock(url)
.matchHeader('user-agent', /couchbackup-cloudant\/\d+\.\d+\.\d+(?:-SNAPSHOT)? \(Node.js v\d+\.\d+\.\d+\)/)
.head('/good')
.reply(200);
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
return db.service.headDocument({ db: db.db, docId: 'good' }).then(() => {
assert.ok(couch.isDone());
});
});
});
// if this is one we want
if (obj.command === 't' && batches.indexOf(obj.batch) > -1) {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
onCommand(obj);
}
}
done();
};
return change;
};
describe('#unit Check request response error callback', function() {
it('should not callback with error for 200 response', async function() {
const couch = nock(url)
.get('/good')
.reply(200, { ok: true });
module.exports = function(log, batches, callback) {
// our sense of state
const retval = { };
return db.service.getDocument({ db: db.db, docId: 'good' }).then(response => {
assert.ok(response.result);
assert.ok(couch.isDone());
});
});
// called with each line from the log file
const onCommand = function(obj) {
retval[obj.batch] = obj;
};
it('should callback with error after 3 500 responses', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.reply(500, function(uri, requestBody) {
this.req.response.statusMessage = 'Internal Server Error';
return { error: 'foo', reason: 'bar' };
});
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, batches))
.on('error', function(err) {
callback(err);
})
.on('finish', function() {
callback(null, retval);
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `500 Internal Server Error: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should callback with error after 3 POST 503 responses', async function() {
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.times(3)
.reply(503, function(uri, requestBody) {
this.req.response.statusMessage = 'Service Unavailable';
return { error: 'service_unavailable', reason: 'Service unavailable' };
});
return assert.rejects(
db.service.postBulkGet({ db: db.db, revs: true, docs: [] }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 Service Unavailable: post ${url}/_bulk_get - Error: service_unavailable, Reason: Service unavailable`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should callback with error after 3 429 responses', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.reply(429, function(uri, requestBody) {
this.req.response.statusMessage = 'Too Many Requests';
return { error: 'foo', reason: 'bar' };
});
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `429 Too Many Requests: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should callback with fatal error for 404 response', async function() {
const couch = nock(url)
.get('/bad')
.reply(404, function(uri, requestBody) {
this.req.response.statusMessage = 'Not Found';
return { error: 'foo', reason: 'bar' };
});
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `404 Not Found: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
});
it('should callback with same error for no status code error response', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.replyWithError('testing badness');
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
const err2 = error.convertResponseError(err);
assert.strictEqual(err, err2);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should retry request if HTTP request gets timed out', async function() {
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.delay(1000)
.reply(200, { results: { docs: [{ id: '1', ok: { _id: '1' } }] } })
.post('/_bulk_get')
.query(true)
.reply(200, { results: { docs: [{ id: '1', ok: { _id: '1' } }, { id: '2', ok: { _id: '2' } }] } });
return timeoutDb.service.postBulkGet({ db: db.db, revs: true, docs: [] }).then((response) => {
assert.ok(response);
assert.ok(response.result);
assert.ok(response.result.results);
assert.ok(response.result.results.docs);
assert.strictEqual(response.result.results.docs.length, 2);
assert.ok(couch.isDone());
});
};
});
it('should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out', async function() {
// Increase the timeout for this test to allow for the delays
this.timeout(3000);
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.delay(1000)
.times(3)
.reply(200, { ok: true });
return assert.rejects(
timeoutDb.service.postBulkGet({ db: db.db, revs: true, docs: [] }),
(err) => {
err = error.convertResponseError(err);
// Note axios returns ECONNABORTED rather than ESOCKETTIMEDOUT
// See https://github.com/axios/axios/issues/2710 via https://github.com/axios/axios/issues/1543`
assert.strictEqual(err.statusText, 'ECONNABORTED');
assert.strictEqual(err.message, `timeout of 500ms exceeded: post ${url}/_bulk_get ECONNABORTED`);
assert.ok(couch.isDone());
return true;
});
});
describe('#unit Check credentials', async function() {
it('should properly decode username and password', async function() {
const username = 'user%123';
const password = 'colon:at@321';
const url = `http://${encodeURIComponent(username)}:${encodeURIComponent(password)}@localhost:7777/testdb`;
const sessionUrl = 'http://localhost:7777';
const couch = nock(sessionUrl)
.post('/_session', { username: username, password: password })
.reply(200, { ok: true }, { 'Set-Cookie': 'AuthSession=ABC123DEF4356;' })
.get('/')
.reply(200);
const db = request.client(url, { parallelism: 1 });
return db.service.getServerInformation().then(response => {
assert.ok(response);
assert.ok(couch.isDone());
});
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2018 IBM Corp. All rights reserved.
// Copyright © 2023 IBM Corp. All rights reserved.
//

@@ -14,19 +14,139 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global */
'use strict';
module.exports = function(db, options, readstream, ee, callback) {
const liner = require('../includes/liner.js')();
const writer = require('../includes/writer.js')(db, options.bufferSize, options.parallelism, ee);
const { fork, spawn } = require('node:child_process');
const { once } = require('node:events');
const { Duplex } = require('node:stream');
const debug = require('debug');
const logProcess = debug('couchbackup:test:process');
// pipe the input to the output, via transformation functions
readstream
.pipe(liner) // transform the input stream into per-line
.on('error', function(err) {
// Forward the error to the writer event emitter where we already have
// listeners on for handling errors
writer.emit('error', err);
})
.pipe(writer); // transform the data
class TestProcess {
constructor(cmd, args, mode) {
this.cmd = cmd;
// Child process stdio [stdin, stdout, stderr, ...extra channels]
const childProcessOptions = { stdio: [] };
switch (mode) {
case 'readable':
// Readable only, no writing to stdin so ignore it
childProcessOptions.stdio = ['ignore', 'pipe', 'inherit'];
break;
case 'writable':
// Writable only, no reading from stdout so ignore it
childProcessOptions.stdio = ['pipe', 'ignore', 'inherit'];
break;
default:
// Default Duplex mode pipe both stdin and stdout
childProcessOptions.stdio = ['pipe', 'pipe', 'inherit'];
break;
}
if (cmd.endsWith('.js')) {
// Add Node fork ipc channel
childProcessOptions.stdio.push('ipc');
logProcess(`Forking Node process for ${cmd} with stdio:[${childProcessOptions.stdio}]`);
this.childProcess = fork(cmd, args, childProcessOptions);
} else {
logProcess(`Spawning process for ${cmd} with stdio:[${childProcessOptions.stdio}]`);
this.childProcess = spawn(cmd, args, childProcessOptions);
}
callback(null, writer);
this.childProcessPromise = once(this.childProcess, 'close').then(() => {
const code = this.childProcess.exitCode;
const signal = this.childProcess.signalCode;
logProcess(`Test process ${cmd} closed with code ${code} and signal ${signal}`);
if (code === 0) {
logProcess(`Resolving process promise for ${cmd}`);
return Promise.resolve(code);
} else {
const e = new Error(`Test child process ${cmd} exited with code ${code} and ${signal}. This may be normal for error case testing.`);
e.code = code;
e.signal = signal;
logProcess(`Will reject process promise for ${cmd} with ${e}`);
return Promise.reject(e);
}
});
switch (mode) {
case 'readable':
this.duplexFrom = this.childProcess.stdout;
break;
case 'writable':
this.duplexFrom = this.childProcess.stdin;
break;
default:
// Default is duplex
this.duplexFrom = { writable: this.childProcess.stdin, readable: this.childProcess.stdout };
}
this.stream = Duplex.from(this.duplexFrom);
}
}
module.exports = {
TestProcess,
cliBackup: function(databaseName, params = {}) {
const args = ['--db', databaseName];
if (params.opts) {
if (params.opts.mode) {
args.push('--mode');
args.push(params.opts.mode);
}
if (params.opts.output) {
args.push('--output');
args.push(params.opts.output);
}
if (params.opts.log) {
args.push('--log');
args.push(params.opts.log);
}
if (params.opts.resume) {
args.push('--resume');
args.push(params.opts.resume);
}
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
return new TestProcess('./bin/couchbackup.bin.js', args, 'readable');
},
cliRestore: function(databaseName, params) {
const args = ['--db', databaseName];
if (params.opts) {
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.parallelism) {
args.push('--parallelism');
args.push(params.opts.parallelism);
}
if (params.opts.requestTimeout) {
args.push('--request-timeout');
args.push(params.opts.requestTimeout);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
return new TestProcess('./bin/couchrestore.bin.js', args, 'writable');
},
cliGzip: function() {
return new TestProcess('gzip', []);
},
cliGunzip: function() {
return new TestProcess('gunzip', []);
},
cliEncrypt: function() {
return new TestProcess('openssl', ['aes-128-cbc', '-pass', 'pass:12345']);
},
cliDecrypt: function() {
return new TestProcess('openssl', ['aes-128-cbc', '-d', '-pass', 'pass:12345']);
}
};

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -15,33 +15,88 @@ // Licensed under the Apache License, Version 2.0 (the "License");

/* global after before describe */
/* global describe it */
'use strict';
// Import the common hooks
require('../test/hooks.js');
const assert = require('assert');
const fs = require('fs');
const { once } = require('node:events');
const u = require('./citestutils.js');
const poisons = [
'normal',
'bandwidth-limit',
'latency',
'slow-read',
'rate-limit'
];
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Resume tests', params), function() {
it('should create a log file', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 60);
poisons.forEach(function(poison) {
describe('unreliable network tests (using poison ' + poison + ')', function() {
before('start server', function() {
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const p = u.p(params, { opts: { log: logFile } });
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
assert.ok(fs.existsSync(logFile), 'The log file should exist.');
});
});
// **************************
// Currently these tests do nothing
// pending resolution of https://github.com/IBM/couchbackup/issues/360
// to add a new toxic server
// **************************
it('should restore corrupted animaldb to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_corrupted.json');
const dbName = this.dbName;
const p = u.p(params, { expectedRestoreErrorRecoverable: { name: 'BackupFileJsonError' } });
return once(input, 'open')
.then(() => {
return u.testRestore(p, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
after('stop server', function() {
it('should restore resumed animaldb with blank line to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_resumed_blank.json');
const dbName = this.dbName;
return once(input, 'open')
.then(() => {
return u.testRestore(params, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
});
});
delete require.cache[require.resolve('../test/ci_e2e.js')];
require('../test/ci_e2e.js');
describe('Resume tests', function() {
// Currently cannot abort API backups, when we do this test should be run for
// both API and CLI
it('should correctly backup and restore backup10m', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 90);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
// Use abort parameter to terminate the backup
const p = u.p(params, { abort: true }, { opts: { log: logFile } });
const restoreDb = this.dbName;
// Set the database doc count as fewer than this should be written during
// resumed backup.
p.exclusiveMaxExpected = 5096;
return u.testBackupAbortResumeRestore(p, 'backup10m', actualBackup, restoreDb);
});
// Note --output is only valid for CLI usage, this test should only run for CLI
const params = { useApi: false };
it('should correctly backup and restore backup10m using --output', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 90);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
// Use abort parameter to terminate the backup
const p = u.p(params, { abort: true }, { opts: { output: actualBackup, log: logFile } });
const restoreDb = this.dbName;
// Set the database doc count as fewer than this should be written during
// resumed backup.
p.exclusiveMaxExpected = 5096;
return await u.testBackupAbortResumeRestore(p, 'backup10m', actualBackup, restoreDb);
});
});

@@ -1,424 +0,187 @@

<testsuites name="test">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:36:17" time="0.093">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.022">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid buffer size type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero buffer size" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid log type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid log type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode string" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid output type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid output type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid parallelism type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero parallelism" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid request timeout type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero request timeout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float request timout" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid resume type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid key type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for resume arg in shallow mode" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for parallelism arg in shallow mode" time="0.008">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:36:17" time="4.527">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.955">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.832">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.633">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:36:21" time="10.637">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="2.831">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.468">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.539">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:36:32" time="5.253">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.291">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.18">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:36:37" time="12.64">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.284">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.234">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.316">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:36:50" time="5.355">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.869">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.515">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.174">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:36:55" time="6.979">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.272">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.302">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.611">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:37:02" time="301.883">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.159">
</testcase>
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="298.955">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:42:04" time="454.537">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.644">
</testcase>
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="451.128">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:49:39" time="2.646">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.373">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:49:41" time="0.292">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.012">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:49:41" time="2.492">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.956">
</testcase>
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.899">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:49:44" time="5.396">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.925">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.884">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.769">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:49:49" time="6.462">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.268">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.152">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.23">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:49:56" time="32.265">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="16.008">
</testcase>
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="15.676">
</testcase>
</testsuite>
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:28" time="0.01">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_DATABASE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_BUFFER_SIZE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_PARALLELISM env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_REQUEST_TIMEOUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_API_KEY env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_TOKEN_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_LOG env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_RESUME env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_OUTPUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_MODE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:28" time="0.092">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.009">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.015">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on NoLogFileName" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on LogDoesNotExist" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on IncompleteChangesInLogFile" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.015">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.014">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:28" time="0.121">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.006">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.011">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on RestoreDatabaseNotFound" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.035">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.025">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.01">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:28" time="3.024">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.297">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.332">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.305">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.294">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.322">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.239">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.257">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.308">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.341">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.321">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:31" time="2.822">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.284">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.332">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.291">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.274">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.285">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.31">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.355">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.35">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.334">
</testcase>
</testsuite>
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:34" time="0.003">
<testcase classname="test.#unit Fetching batches from a log file" name="should fetch multiple batches correctly" time="0.002">
</testcase>
</testsuite>
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:34" time="0.002">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:34" time="0.037">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.014">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --url command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --db command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --iam-api-key command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode full command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode shallow command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --quiet command-line parameter" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:34" time="0.013">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --quiet command-line parameter" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:34" time="0.004">
<testcase classname="test.#unit Check request headers" name="should have a couchbackup user-agent" time="0.004">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:34" time="10.591">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.003">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.014">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.017">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.016">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with fatal error for 404 response" time="0.004">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with same error for no status code error response" time="2.014">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.509">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.01">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:45" time="0.012">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.011">
</testcase>
</testsuite>
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:45" time="0.559">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.019">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.523">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.014">
</testcase>
</testsuite>
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:50:45" time="15.163">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.01">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.015">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="1.955">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="9.178">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:51:01" time="49.4">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="49.12">
</testcase>
</testsuite>
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2024-01-11T08:51:50" time="4.101">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.024">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should terminate on a fatal error" time="0.008">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.019">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.016">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.023">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should get a batch error for non-empty array response with new_edits false" time="0.007">
</testcase>
</testsuite>
</testsuites>
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* global describe it */
'use strict';
const assert = require('assert');
const backup = require('../app.js').backup;
const fs = require('fs');
const nock = require('nock');
const util = require('util');
const backupPromise = util.promisify(backup);
const goodUrl = 'http://localhost:5984/db';
// The real validateArgs function of app.js isn't
// exported - so we call the exported backup method
// instead. We don't get as far as a real backup when
// testing error cases. For success cases we nock the
// goodUrl and
const validateArgs = async function(url, opts, errorValidationForAssertRejects) {
const nullStream = fs.createWriteStream('/dev/null');
if (url === goodUrl) {
// Nock the goodUrl
nock(goodUrl).head('').reply(404, { error: 'not_found', reason: 'missing' });
}
return assert.rejects(backupPromise(url, nullStream, opts), errorValidationForAssertRejects);
};
const validateShallowModeArgs = async function(url, opts, msg) {
// We pass assertNoValidationError because for these shallow opts
// we are expecting only a stderr warning
return validateArgs(url, opts, assertNoValidationError()).then(() => {
// Assert the warning message was in stderr
assert(capturedStderr.indexOf(msg) > -1, 'Log warning message was not present');
});
};
const stderrWriteFun = process.stderr.write;
let capturedStderr;
function captureStderr() {
process.stderr.write = function(string, encoding, fd) {
capturedStderr += string;
};
}
function releaseStderr() {
process.stderr.write = stderrWriteFun;
capturedStderr = null;
}
// Return a validation object for use with assert.rejects
function assertErrorMessage(msg) {
return { name: 'InvalidOption', message: msg };
}
// For cases where validation should pass we reach a real backup that hits a 404
// mock for a DatabaseNotFound, so that it is the expected in the case assertNoValidationError
function assertNoValidationError() { return { name: 'DatabaseNotFound' }; }
describe('#unit Validate arguments', function() {
it('returns error for invalid URL type', async function() {
return validateArgs(true, {}, assertErrorMessage('Invalid URL, must be type string'));
});
it('returns no error for valid URL type', async function() {
return validateArgs(goodUrl, {}, assertNoValidationError());
});
it('returns error for invalid (no host) URL', async function() {
return validateArgs('http://', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid (no protocol) URL', async function() {
return validateArgs('invalid', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid (wrong protocol) URL', async function() {
return validateArgs('ftp://invalid.example.com', {}, assertErrorMessage('Invalid URL protocol.'));
});
it('returns error for invalid (no path) URL', async function() {
return validateArgs('https://invalid.example.com', {}, assertErrorMessage('Invalid URL, missing path element (no database).'));
});
it('returns error for invalid (no protocol, no host) URL', async function() {
return validateArgs('invalid', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid buffer size type', async function() {
return validateArgs(goodUrl, { bufferSize: '123' }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero buffer size', async function() {
return validateArgs(goodUrl, { bufferSize: 0 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float buffer size', async function() {
return validateArgs(goodUrl, { bufferSize: 1.23 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid buffer size type', async function() {
return validateArgs(goodUrl, { bufferSize: 123 }, assertNoValidationError());
});
it('returns error for invalid log type', async function() {
return validateArgs(goodUrl, { log: true }, assertErrorMessage('Invalid log option, must be type string'));
});
it('returns no error for valid log type', async function() {
return validateArgs(goodUrl, { log: 'log.txt' }, assertNoValidationError());
});
it('returns error for invalid mode type', async function() {
return validateArgs(goodUrl, { mode: true }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"'));
});
it('returns error for invalid mode string', async function() {
return validateArgs(goodUrl, { mode: 'foobar' }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"'));
});
it('returns no error for valid mode type', async function() {
return validateArgs(goodUrl, { mode: 'full' }, assertNoValidationError());
});
it('returns error for invalid output type', async function() {
return validateArgs(goodUrl, { output: true }, assertErrorMessage('Invalid output option, must be type string'));
});
it('returns no error for valid output type', async function() {
return validateArgs(goodUrl, { output: 'output.txt' }, assertNoValidationError());
});
it('returns error for invalid parallelism type', async function() {
return validateArgs(goodUrl, { parallelism: '123' }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero parallelism', async function() {
return validateArgs(goodUrl, { parallelism: 0 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float parallelism', async function() {
return validateArgs(goodUrl, { parallelism: 1.23 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid parallelism type', async function() {
return validateArgs(goodUrl, { parallelism: 123 }, assertNoValidationError());
});
it('returns error for invalid request timeout type', async function() {
return validateArgs(goodUrl, { requestTimeout: '123' }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero request timeout', async function() {
return validateArgs(goodUrl, { requestTimeout: 0 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float request timout', async function() {
return validateArgs(goodUrl, { requestTimeout: 1.23 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid request timeout type', async function() {
return validateArgs(goodUrl, { requestTimeout: 123 }, assertNoValidationError());
});
it('returns error for invalid resume type', async function() {
return validateArgs(goodUrl, { resume: 'true' }, assertErrorMessage('Invalid resume option, must be type boolean'));
});
it('returns no error for valid resume type', async function() {
return validateArgs(goodUrl, { resume: false }, assertNoValidationError());
});
it('returns error for invalid key type', async function() {
return validateArgs(goodUrl, { iamApiKey: true }, assertErrorMessage('Invalid iamApiKey option, must be type string'));
});
it('returns error for key and URL credentials supplied', async function() {
return validateArgs('https://a:b@example.com/db', { iamApiKey: 'abc123' }, assertErrorMessage('URL user information must not be supplied when using IAM API key.'));
});
it('warns for log arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', log: 'test' },
'the options "log" and "resume" are invalid when using shallow mode.').finally(
() => {
releaseStderr();
});
});
it('warns for resume arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', log: 'test', resume: true },
'the options "log" and "resume" are invalid when using shallow mode.').finally(
() => {
releaseStderr();
});
});
it('warns for parallelism arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', parallelism: 10 },
'the option "parallelism" has no effect when using shallow mode.').finally(
() => {
releaseStderr();
});
});
});

@@ -1,3 +0,2 @@

#!/usr/bin/env node
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -15,47 +14,29 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const error = require('../includes/error.js');
const cliutils = require('../includes/cliutils.js');
const couchbackup = require('../app.js');
const parser = require('../includes/parser.js');
const debug = require('debug');
const restoreDebug = debug('couchbackup:restore');
const restoreBatchDebug = debug('couchbackup:restore:batch');
const assert = require('assert');
const logfilegetbatches = require('../includes/logfilegetbatches.js');
restoreDebug.enabled = true;
try {
const program = parser.parseRestoreArgs();
const databaseUrl = cliutils.databaseUrl(program.url, program.db);
const opts = {
bufferSize: program.bufferSize,
parallelism: program.parallelism,
requestTimeout: program.requestTimeout,
iamApiKey: program.iamApiKey,
iamTokenUrl: program.iamTokenUrl
};
// log configuration to console
console.error('='.repeat(80));
console.error('Performing restore on ' + databaseUrl.replace(/\/\/.+@/g, '//****:****@') + ' using configuration:');
console.error(JSON.stringify(opts, null, 2).replace(/"iamApiKey": "[^"]+"/, '"iamApiKey": "****"'));
console.error('='.repeat(80));
restoreBatchDebug.enabled = !program.quiet;
return couchbackup.restore(
process.stdin, // restore from stdin
databaseUrl,
opts,
error.terminationCallback
).on('restored', function(obj) {
restoreBatchDebug('restored', obj.total);
}).on('error', function(e) {
restoreDebug('ERROR', e);
}).on('finished', function(obj) {
restoreDebug('finished', obj);
describe('#unit Fetching batches from a log file', function() {
it('should fetch multiple batches correctly', async function() {
return new Promise((resolve, reject) => {
logfilegetbatches('./test/fixtures/test.log', [1, 4], function(err, data) {
try {
assert.ok(!err);
assert.ok(data);
assert.strictEqual(typeof data, 'object');
assert.strictEqual(Object.keys(data).length, 2);
assert.deepStrictEqual(data['1'].docs, [{ id: '6' }, { id: '7' }, { id: '8' }, { id: '9' }, { id: '10' }]);
assert.strictEqual(data['1'].batch, 1);
assert.deepStrictEqual(data['4'].docs, [{ id: '21' }, { id: '22' }]);
assert.strictEqual(data['4'].batch, 4);
resolve();
} catch (err) {
reject(err);
}
});
});
});
} catch (err) {
error.terminationCallback(err);
}
});

@@ -1,183 +0,424 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* global describe it beforeEach */
'use strict';
const assert = require('assert');
const backup = require('../includes/shallowbackup.js');
const request = require('../includes/request.js');
const fs = require('fs');
const nock = require('nock');
// Function to create a DB object and call the shallow backup function
// This is normally done by app.js
function shallowBackup(dbUrl, opts) {
const db = request.client(dbUrl, opts);
// Disable compression to make body assertions easier
db.service.setEnableGzipCompression(false);
return backup(db, opts);
}
// Note all these tests include a body parameter of include_docs and a query
// string of include_docs because of a quirk of nano that when using the fetch
// method always adds the include_docs query string.
describe('#unit Perform backup using shallow backup', function() {
const dbUrl = 'http://localhost:5984/animaldb';
// Query string keys are stringified by Nano
const badgerKey = 'badger\0';
const kookaburraKey = 'kookaburra\0';
const snipeKey = 'snipe\0';
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
it('should perform a shallow backup', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_3.json', 'utf8')))
// batch 4
.post('/_all_docs', { limit: 3, start_key: snipeKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_4.json', 'utf8')));
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
reject(err);
})
.on('received', function(data) {
try {
if (data.batch === 3) {
assert.strictEqual(data.length, 2); // smaller last batch
} else {
assert.strictEqual(data.length, 3);
}
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 11);
assert.ok(couch.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
it('should perform a shallow backup with transient error', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3 - transient error
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(500, { error: 'Internal Server Error' })
// batch 3 - retry
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_3.json', 'utf8')))
// batch 4
.post('/_all_docs', { limit: 3, start_key: snipeKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_4.json', 'utf8')));
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
try {
assert.strictEqual(err.name, 'HTTPError');
} catch (err) {
reject(err);
}
})
.on('received', function(data) {
try {
if (data.batch === 3) {
assert.strictEqual(data.length, 2); // smaller last batch
} else {
assert.strictEqual(data.length, 3);
}
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 11);
assert.ok(couch.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
it('should fail to perform a shallow backup on fatal error', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3 - fatal error
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(401, { error: 'Unauthorized' });
let errCount = 0;
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
try {
errCount++;
assert.strictEqual(err.name, 'Unauthorized');
} catch (err) {
reject(err);
}
})
.on('received', function(data) {
try {
assert.strictEqual(data.length, 3);
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 6);
assert.ok(couch.isDone());
assert.strictEqual(errCount, 1);
resolve();
} catch (err) {
reject(err);
}
});
});
});
});
<testsuites name="test">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:07" time="0.096">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.021">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid buffer size type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float buffer size" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid log type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid log type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode string" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid output type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid output type" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid parallelism type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid request timeout type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero request timeout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float request timout" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid resume type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid key type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.008">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for resume arg in shallow mode" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for parallelism arg in shallow mode" time="0.004">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:07" time="4.71">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="1.083">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.882">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.638">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:12" time="10.332">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="2.584">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.442">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.515">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:22" time="6.061">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.585">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.429">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.26">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:28" time="14.051">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.773">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.729">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.741">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:42" time="5.37">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.905">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.548">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.133">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:48" time="7.94">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.483">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.871">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.806">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:44:55" time="300.882">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.123">
</testcase>
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="297.971">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:49:56" time="456.847">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.913">
</testcase>
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="453.158">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:57:33" time="3.026">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.764">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:57:36" time="0.273">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.011">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:57:37" time="2.267">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.858">
</testcase>
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.88">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:57:39" time="5.226">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.873">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.78">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.77">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:57:44" time="7.022">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.454">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.369">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.405">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:57:51" time="34.279">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="17.398">
</testcase>
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.353">
</testcase>
</testsuite>
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:25" time="0.01">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_DATABASE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_BUFFER_SIZE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_PARALLELISM env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_REQUEST_TIMEOUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_API_KEY env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_TOKEN_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_LOG env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_RESUME env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_OUTPUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_MODE env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:25" time="0.094">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.013">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.011">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.018">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on NoLogFileName" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on LogDoesNotExist" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on IncompleteChangesInLogFile" time="0.009">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.013">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.013">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:25" time="0.124">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.007">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.011">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on RestoreDatabaseNotFound" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.04">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.026">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.011">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:26" time="5.514">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.532">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.575">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.518">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.542">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.572">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.48">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.495">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.649">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.578">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.562">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:31" time="5.694">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.488">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.527">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.841">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.624">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.558">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.883">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.587">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.601">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.578">
</testcase>
</testsuite>
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:37" time="0.001">
<testcase classname="test.#unit Fetching batches from a log file" name="should fetch multiple batches correctly" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:37" time="0.002">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:37" time="0.032">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.01">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --db command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --request-timeout command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --iam-api-key command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode full command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode shallow command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --quiet command-line parameter" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:37" time="0.012">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --quiet command-line parameter" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:37" time="0.005">
<testcase classname="test.#unit Check request headers" name="should have a couchbackup user-agent" time="0.004">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:37" time="10.602">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.003">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.017">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.019">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.02">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with fatal error for 404 response" time="0.005">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with same error for no status code error response" time="2.013">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.51">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.008">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:47" time="0.012">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.011">
</testcase>
</testsuite>
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:47" time="0.558">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.018">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.524">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.014">
</testcase>
</testsuite>
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:58:48" time="15.011">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.011">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.014">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="1.927">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="9.056">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:59:03" time="40.756">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="40.494">
</testcase>
</testsuite>
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2024-01-11T11:59:44" time="4.095">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.023">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should terminate on a fatal error" time="0.008">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.015">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.015">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.022">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should get a batch error for non-empty array response with new_edits false" time="0.007">
</testcase>
</testsuite>
</testsuites>
{
"name": "@cloudant/couchbackup",
"version": "2.9.16",
"version": "2.9.17-SNAPSHOT.185",
"description": "CouchBackup - command-line backup utility for Cloudant/CouchDB",

@@ -5,0 +5,0 @@ "homepage": "https://github.com/IBM/couchbackup",

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc