Socket
Socket
Sign inDemoInstall

@cloudant/couchbackup

Package Overview
Dependencies
Maintainers
6
Versions
479
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@cloudant/couchbackup - npm Package Compare versions

Comparing version 2.9.16-SNAPSHOT.181 to 2.9.16-SNAPSHOT.182

.scannerwork/scanner-report/changesets-27.pb

4

.scannerwork/report-task.txt

@@ -6,3 +6,3 @@ projectKey=couchbackup

dashboardUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/dashboard?id=couchbackup&branch=main
ceTaskId=AYx8UL_GTQBSfuLv66i3
ceTaskUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/api/ce/task?id=AYx8UL_GTQBSfuLv66i3
ceTaskId=AYzNiajN6RYwUyfa0GmF
ceTaskUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/api/ce/task?id=AYzNiajN6RYwUyfa0GmF

@@ -19,24 +19,84 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

const assert = require('assert');
const logfilegetbatches = require('../includes/logfilegetbatches.js');
const fs = require('fs');
const { once } = require('node:events');
const u = require('./citestutils.js');
describe('#unit Fetching batches from a log file', function() {
it('should fetch multiple batches correctly', async function() {
return new Promise((resolve, reject) => {
logfilegetbatches('./test/fixtures/test.log', [1, 4], function(err, data) {
try {
assert.ok(!err);
assert.ok(data);
assert.strictEqual(typeof data, 'object');
assert.strictEqual(Object.keys(data).length, 2);
assert.deepStrictEqual(data['1'].docs, [{ id: '6' }, { id: '7' }, { id: '8' }, { id: '9' }, { id: '10' }]);
assert.strictEqual(data['1'].batch, 1);
assert.deepStrictEqual(data['4'].docs, [{ id: '21' }, { id: '22' }]);
assert.strictEqual(data['4'].batch, 4);
resolve();
} catch (err) {
reject(err);
}
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Resume tests', params), function() {
it('should create a log file', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const p = u.p(params, { opts: { log: logFile } });
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
assert.ok(fs.existsSync(logFile), 'The log file should exist.');
});
});
it('should restore corrupted animaldb to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_corrupted.json');
const dbName = this.dbName;
const p = u.p(params, { expectedRestoreErrorRecoverable: { name: 'BackupFileJsonError' } });
return once(input, 'open')
.then(() => {
return u.testRestore(p, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
it('should restore resumed animaldb with blank line to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_resumed_blank.json');
const dbName = this.dbName;
return once(input, 'open')
.then(() => {
return u.testRestore(params, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
});
});
describe('Resume tests', function() {
// Currently cannot abort API backups, when we do this test should be run for
// both API and CLI
it('should correctly backup and restore backup10m', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 90);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
// Use abort parameter to terminate the backup
const p = u.p(params, { abort: true }, { opts: { log: logFile } });
const restoreDb = this.dbName;
// Set the database doc count as fewer than this should be written during
// resumed backup.
p.exclusiveMaxExpected = 5096;
return u.testBackupAbortResumeRestore(p, 'backup10m', actualBackup, restoreDb);
});
// Note --output is only valid for CLI usage, this test should only run for CLI
const params = { useApi: false };
it('should correctly backup and restore backup10m using --output', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 90);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
// Use abort parameter to terminate the backup
const p = u.p(params, { abort: true }, { opts: { output: actualBackup, log: logFile } });
const restoreDb = this.dbName;
// Set the database doc count as fewer than this should be written during
// resumed backup.
p.exclusiveMaxExpected = 5096;
return await u.testBackupAbortResumeRestore(p, 'backup10m', actualBackup, restoreDb);
});
});

@@ -1,2 +0,2 @@

// Copyright © 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -15,87 +15,121 @@ // Licensed under the Apache License, Version 2.0 (the "License");

/* global describe it beforeEach */
'use strict';
const chunk = require('lodash/chunk');
const difference = require('lodash/difference');
const forOwn = require('lodash/forOwn');
const isEmpty = require('lodash/isEmpty');
const union = require('lodash/union');
const assert = require('assert');
const fs = require('fs');
const nock = require('nock');
const request = require('../includes/request.js');
const writer = require('../includes/writer.js');
const noopEmitter = new (require('events')).EventEmitter();
const liner = require('../includes/liner.js');
const { once } = require('node:events');
const { pipeline } = require('node:stream/promises');
const longTestTimeout = 3000;
const compare = async function(database1, database2, client) {
// check docs same in both dbs
const allDocs1 = await getAllDocs(client, database1);
const allDocs2 = await getAllDocs(client, database2);
describe('#unit Check database restore writer', function() {
const dbUrl = 'http://localhost:5984/animaldb';
const db = request.client(dbUrl, { parallelism: 1 });
const onlyInDb1 = (difference(allDocs1, allDocs2));
const onlyInDb2 = (difference(allDocs2, allDocs1));
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
let databasesSame = isEmpty(onlyInDb1) && isEmpty(onlyInDb2);
it('should complete successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, []); // success
if (!databasesSame) {
console.log(onlyInDb1.length + ' documents only in db 1.');
console.log('Document IDs only in db 1: ' + onlyInDb1);
console.log(onlyInDb2.length + ' documents only in db 2.');
console.log('Document IDs only in db 2: ' + onlyInDb2);
}
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
});
// check revs same in docs common to both dbs
const partitionSize = 500;
const batches = chunk(union(allDocs1, allDocs2), partitionSize);
it('should terminate on a fatal error', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(401, { error: 'Unauthorized' }); // fatal error
const missingRevsInDb2 = await getMissingRevs(client, database1, database2, batches);
const missingRevsInDb1 = await getMissingRevs(client, database2, database1, batches);
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Unauthorized');
assert.strictEqual(err.message, 'Access is denied due to invalid credentials.');
assert.ok(nock.isDone());
return true;
}
);
});
databasesSame = databasesSame && isEmpty(missingRevsInDb1) && isEmpty(missingRevsInDb2);
it('should retry on transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(200, { ok: true }); // third time lucky success
if (!databasesSame) {
console.log('Missing revs in db 1:' + JSON.stringify(missingRevsInDb1));
console.log('Missing revs in db 2:' + JSON.stringify(missingRevsInDb2));
}
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
}).timeout(longTestTimeout);
return databasesSame;
};
it('should fail after 3 transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(503, { error: 'Service Unavailable' }); // Final transient error
const getMissingRevs = async(client, databaseName1, databaseName2, batcheses) => {
const fakeRevisionId = '9999-a';
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 : post ${dbUrl}/_bulk_docs - Error: Service Unavailable`);
assert.ok(nock.isDone());
return true;
}
);
}).timeout(longTestTimeout);
const missing = {};
it('should restore shallow backups without rev info successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ ok: true, id: 'foo', rev: '1-abc' }]); // success
// look in db1 - use a fake revision ID to fetch all leaf revisions
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_old_shallow.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 11);
assert.ok(nock.isDone());
})]);
});
for (const batches of batcheses) {
const documentRevisions = {};
batches.forEach(id => (documentRevisions[id] = [fakeRevisionId]));
it('should get a batch error for non-empty array response with new_edits false', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ id: 'foo', error: 'foo', reason: 'bar' }]);
const result1 = await client.postRevsDiff({ db: databaseName1, documentRevisions });
const revsDiffRequestDb2 = {};
forOwn(result1.result, (v, k) => (revsDiffRequestDb2[k] = v.possible_ancestors));
// look in db2
const result2 = await client.postRevsDiff({ db: databaseName2, documentRevisions: revsDiffRequestDb2 });
forOwn(result2.result, (v, k) => {
if ('missing' in v) {
missing[k] = v.missing;
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Error');
assert.strictEqual(err.message, 'Error writing batch with new_edits:false and 1 items');
assert.ok(nock.isDone());
return true;
}
});
}
return missing;
};
const getAllDocs = async function(client, database) {
let allDocIds = [];
const limit = 2000;
let startKey = '\u0000';
do {
const pageOfDocIds = (await client.postAllDocs({ db: database, startKey, limit })).result.rows.map(r => r.id);
allDocIds = allDocIds.concat(pageOfDocIds);
if (pageOfDocIds.length < limit) {
startKey = null;
} else {
startKey = pageOfDocIds[limit - 1] + '\u0000';
}
} while (startKey != null);
return allDocIds;
};
module.exports = {
compare
};
);
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2023 IBM Corp. All rights reserved.
//

@@ -18,433 +18,135 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const assert = require('node:assert');
const { fork, spawn } = require('node:child_process');
const { once } = require('node:events');
const fs = require('node:fs');
const { PassThrough } = require('node:stream');
const { pipeline } = require('node:stream/promises');
const { createGzip, createGunzip } = require('node:zlib');
const { Duplex } = require('node:stream');
const debug = require('debug');
const { Tail } = require('tail');
const app = require('../app.js');
const dbUrl = require('../includes/cliutils.js').databaseUrl;
const compare = require('./compare.js');
const request = require('../includes/request.js');
const { cliBackup, cliDecrypt, cliEncrypt, cliGzip, cliGunzip, cliRestore } = require('./test_process.js');
const testLogger = debug('couchbackup:test:utils');
const logProcess = debug('couchbackup:test:process');
function scenario(test, params) {
return `${test} ${(params.useApi) ? 'using API' : 'using CLI'}`;
}
function params() {
const p = {};
for (let i = 0; i < arguments.length; i++) {
Object.assign(p, arguments[i]);
}
return p;
}
// Returns the event emitter for API calls, or the child process for CLI calls
async function testBackup(params, databaseName, outputStream) {
const pipelineStreams = [];
const promises = [];
// Configure API key if needed
augmentParamsWithApiKey(params);
let backup;
let backupStream;
let backupPromise;
let tail;
if (params.useApi) {
if (params.useStdOut) {
backupStream = outputStream;
} else {
backupStream = new PassThrough();
class TestProcess {
constructor(cmd, args, mode) {
this.cmd = cmd;
// Child process stdio [stdin, stdout, stderr, ...extra channels]
const childProcessOptions = { stdio: [] };
switch (mode) {
case 'readable':
// Readable only, no writing to stdin so ignore it
childProcessOptions.stdio = ['ignore', 'pipe', 'inherit'];
break;
case 'writable':
// Writable only, no reading from stdout so ignore it
childProcessOptions.stdio = ['pipe', 'ignore', 'inherit'];
break;
default:
// Default Duplex mode pipe both stdin and stdout
childProcessOptions.stdio = ['pipe', 'pipe', 'inherit'];
break;
}
const backupCallbackPromise = new Promise((resolve, reject) => {
backup = app.backup(
dbUrl(process.env.COUCH_URL, databaseName),
backupStream,
params.opts,
(err, data) => {
if (err) {
testLogger(`API backup callback with ${JSON.stringify(err)}, will reject.`);
reject(err);
} else {
testLogger(`API backup callback with ${JSON.stringify(data)}, will resolve.`);
resolve(data);
}
});
});
const backupFinshedPromise = once(backup, 'finished')
.then((summary) => {
testLogger(`Resolving API backup event promise with ${JSON.stringify(summary)}`);
if (params.resume) {
assertWrittenFewerThan(summary.total, params.exclusiveMaxExpected);
}
})
.catch((err) => {
testLogger(`Rejecting API backup event promise with error ${JSON.stringify(err)}`);
throw err;
});
backupPromise = Promise.all([backupCallbackPromise, backupFinshedPromise])
.then(() => {
testLogger('Both API backup promises resolved.');
});
} else {
backup = cliBackup(databaseName, params);
backupStream = backup.stream;
backupPromise = backup.childProcessPromise;
if (params.abort) {
// Create the log file for abort tests so we can tail it, other tests assert
// the log file is usually created normally by the backup process.
const f = fs.openSync(params.opts.log, 'w');
fs.closeSync(f);
// Use tail to watch the log file for a batch to be completed then abort
tail = new Tail(params.opts.log, { useWatchFile: true, fsWatchOptions: { interval: 500 }, follow: false });
tail.on('line', function(data) {
const matches = data.match(/:d batch\d+/);
if (matches !== null) {
// Turn off the tail.
tail.unwatch();
// Abort the backup
backup.childProcess.kill();
}
});
}
if (params.resume) {
const listenerPromise = new Promise((resolve, reject) => {
const listener = function(data) {
const matches = data.toString().match(/.*Finished - Total document revisions written: (\d+).*/);
if (matches !== null) {
try {
assertWrittenFewerThan(matches[1], params.exclusiveMaxExpected);
resolve();
} catch (err) {
reject(err);
}
process.stderr.removeListener('data', listener);
}
};
backup.childProcess.stderr.on('data', listener);
});
promises.push(listenerPromise);
}
}
promises.push(backupPromise);
if (!params.useStdOut) {
pipelineStreams.push(backupStream);
}
if (params.compression) {
if (params.useApi) {
pipelineStreams.push(createGzip());
if (cmd.endsWith('.js')) {
// Add Node fork ipc channel
childProcessOptions.stdio.push('ipc');
logProcess(`Forking Node process for ${cmd} with stdio:[${childProcessOptions.stdio}]`);
this.childProcess = fork(cmd, args, childProcessOptions);
} else {
const gzipProcess = cliGzip();
pipelineStreams.push(gzipProcess.stream);
promises.push(gzipProcess.childProcessPromise);
logProcess(`Spawning process for ${cmd} with stdio:[${childProcessOptions.stdio}]`);
this.childProcess = spawn(cmd, args, childProcessOptions);
}
}
// Pipe via encryption if requested
if (params.encryption) {
if (params.useApi) {
// Currently only CLI support for testing encryption
return Promise.reject(new Error('Not implemented: cannot test encrypted API backups at this time.'));
} else {
const encryptProcess = cliEncrypt();
pipelineStreams.push(encryptProcess.stream);
promises.push(encryptProcess.childProcessPromise);
}
}
if (!params.useStdOut) {
// Finally add the outputStream to the list we want to pipeline
pipelineStreams.push(outputStream);
// Create the promisified pipeline and add it to the array of promises we'll wait for
promises.unshift(pipeline(pipelineStreams));
}
// Wait for the promises and then assert
return Promise.all(promises)
.then(() => testLogger('All backup promises resolved.'))
.then(() => {
if (params.expectedBackupError) {
return Promise.reject(new Error('Backup passed when it should have failed.'));
}
})
.catch((err) => {
if (params.expectedBackupError || params.abort) {
if (params.useApi) {
assert.strictEqual(err.name, params.expectedBackupError.name, 'The backup should receive the expected error.');
} else {
if (params.abort) {
// The tail should be stopped when we match a line and abort, but if
// something didn't work we need to make sure the tail is stopped
tail.unwatch();
// Assert that the process was aborted as expected
assert.strictEqual(err.signal, 'SIGTERM', `The backup should have terminated with SIGTERM, but was ${err.signal}.`);
} else if (params.expectedBackupError) {
assert.strictEqual(err.code, params.expectedBackupError.code, `The backup exited with unexpected code ${err.code} and signal ${err.signal}.`);
}
}
this.childProcessPromise = once(this.childProcess, 'close').then(() => {
const code = this.childProcess.exitCode;
const signal = this.childProcess.signalCode;
logProcess(`Test process ${cmd} closed with code ${code} and signal ${signal}`);
if (code === 0) {
logProcess(`Resolving process promise for ${cmd}`);
return Promise.resolve(code);
} else {
return Promise.reject(err);
const e = new Error(`Test child process ${cmd} exited with code ${code} and ${signal}. This may be normal for error case testing.`);
e.code = code;
e.signal = signal;
logProcess(`Will reject process promise for ${cmd} with ${e}`);
return Promise.reject(e);
}
});
}
async function testRestore(params, inputStream, databaseName) {
const pipelineStreams = [inputStream];
const promises = [];
// Configure API key if needed
augmentParamsWithApiKey(params);
let restore;
let restoreStream;
let restorePromise;
if (params.useApi) {
restoreStream = new PassThrough();
const restoreCallbackPromise = new Promise((resolve, reject) => {
restore = app.restore(
restoreStream,
dbUrl(process.env.COUCH_URL, databaseName),
params.opts,
(err, data) => {
if (err) {
testLogger(`API restore callback with ${err}, will reject.`);
reject(err);
} else {
resolve(data);
}
});
});
const restoreFinshedPromise = once(restore, 'finished')
.then((summary) => {
testLogger(`Resolving API restore promise with ${summary}`);
})
.catch((err) => {
testLogger(`Handling API restore error event ${JSON.stringify(err)}`);
if (params.expectedRestoreErrorRecoverable) {
testLogger(`Expecting restore error ${params.expectedRestoreErrorRecoverable.name}`);
assert.strictEqual(err.name, params.expectedRestoreErrorRecoverable.name, 'The restore should receive the expected recoverable error.');
} else {
testLogger(`API restore will reject by throwing error event ${JSON.stringify(err)}`);
return Promise.reject(err);
}
});
restorePromise = Promise.all([restoreCallbackPromise, restoreFinshedPromise]);
} else {
restore = cliRestore(databaseName, params);
restoreStream = restore.stream;
restorePromise = restore.childProcessPromise;
}
promises.push(restorePromise);
// Pipe via decompression if requested
if (params.compression) {
if (params.useApi) {
pipelineStreams.push(createGunzip());
} else {
const gunzipProcess = cliGunzip();
pipelineStreams.push(gunzipProcess.stream);
promises.push(gunzipProcess.childProcessPromise);
switch (mode) {
case 'readable':
this.duplexFrom = this.childProcess.stdout;
break;
case 'writable':
this.duplexFrom = this.childProcess.stdin;
break;
default:
// Default is duplex
this.duplexFrom = { writable: this.childProcess.stdin, readable: this.childProcess.stdout };
}
}
// Pipe via decryption if requested
if (params.encryption) {
if (params.useApi) {
// Currently only CLI support for testing encryption
return Promise.reject(new Error('Not implemented: cannot test encrypted API backups at this time.'));
} else {
const decryptProcess = cliDecrypt();
pipelineStreams.push(decryptProcess.stream);
promises.push(decryptProcess.childProcessPromise);
}
this.stream = Duplex.from(this.duplexFrom);
}
}
// pipeline everything into the restoreStream
pipelineStreams.push(restoreStream);
// Create the promisified pipeline and add it to the array of promises we'll wait for
promises.unshift(pipeline(pipelineStreams));
// Wait for the all the promises to settle and then assert based on the process promise
return Promise.allSettled(promises)
.then(() => { return restorePromise; })
.then((summary) => {
testLogger(`Restore promise resolved with ${summary}.`);
if (params.expectedRestoreError) {
return Promise.reject(new Error('Restore passed when it should have failed.'));
module.exports = {
TestProcess,
cliBackup: function(databaseName, params = {}) {
const args = ['--db', databaseName];
if (params.opts) {
if (params.opts.mode) {
args.push('--mode');
args.push(params.opts.mode);
}
})
.catch((err) => {
testLogger(`Restore promise rejected with ${err}.`);
if (params.expectedRestoreError) {
if (params.useApi) {
assert.strictEqual(err.name, params.expectedRestoreError.name, 'The restore should receive the expected error.');
} else {
assert.strictEqual(err.code, params.expectedRestoreError.code, `The restore exited with unexpected code ${err.code} and signal ${err.signal}.`);
}
} else {
return Promise.reject(err);
if (params.opts.output) {
args.push('--output');
args.push(params.opts.output);
}
});
}
// Serial backup and restore via a file on disk
async function testBackupAndRestoreViaFile(params, srcDb, backupFile, targetDb) {
return testBackupToFile(params, srcDb, backupFile).then(() => {
return testRestoreFromFile(params, backupFile, targetDb);
});
}
async function testBackupToFile(params, srcDb, backupFile) {
// Open the file for appending if this is a resume
const output = fs.createWriteStream(backupFile, { flags: (params.opts && params.opts.resume) ? 'a' : 'w' });
return once(output, 'open')
.then(() => {
return testBackup(params, srcDb, output);
});
}
async function testRestoreFromFile(params, backupFile, targetDb) {
const input = fs.createReadStream(backupFile);
return once(input, 'open')
.then(() => {
return testRestore(params, input, targetDb);
});
}
async function testDirectBackupAndRestore(params, srcDb, targetDb) {
// Allow a 64 MB highWaterMark for the passthrough during testing
const passthrough = new PassThrough({ highWaterMark: 67108864 });
const backup = testBackup(params, srcDb, passthrough);
const restore = testRestore(params, passthrough, targetDb);
return Promise.all([backup, restore]).then(() => {
return dbCompare(srcDb, targetDb);
});
}
async function testBackupAbortResumeRestore(params, srcDb, backupFile, targetDb) {
return Promise.resolve()
.then(() => {
// First backup with an abort
if (params.opts && params.opts.output) {
return testBackup(params, srcDb, new PassThrough());
} else {
return testBackupToFile(params, srcDb, backupFile);
if (params.opts.log) {
args.push('--log');
args.push(params.opts.log);
}
}).then(() => {
// Remove the abort parameter and add the resume parameter
delete params.abort;
params.opts.resume = true;
// Resume the backup
if (params.opts && params.opts.output) {
return testBackup(params, srcDb, new PassThrough());
} else {
return testBackupToFile(params, srcDb, backupFile);
if (params.opts.resume) {
args.push('--resume');
args.push(params.opts.resume);
}
}).then(() => {
// Restore the backup
return testRestoreFromFile(params, backupFile, targetDb);
}).then(() => {
// Now compare the restored to the original for validation
return dbCompare(srcDb, targetDb);
});
}
async function dbCompare(db1Name, db2Name) {
const client = request.client(process.env.COUCH_BACKEND_URL, {});
return compare.compare(db1Name, db2Name, client.service)
.then(result => {
return assert.strictEqual(result, true, 'The database comparison should succeed, but failed');
});
}
function sortByIdThenRev(o1, o2) {
if (o1._id < o2._id) return -1;
if (o1._id > o2._id) return 1;
if (o1._rev < o2._rev) return -1;
if (o1._rev > o2._rev) return 1;
return 0;
}
function readSortAndDeepEqual(actualContentPath, expectedContentPath) {
const backupContent = JSON.parse(fs.readFileSync(actualContentPath, 'utf8'));
const expectedContent = JSON.parse(fs.readFileSync(expectedContentPath, 'utf8'));
// Array order of the docs is important for equality, but not for backup
backupContent.sort(sortByIdThenRev);
expectedContent.sort(sortByIdThenRev);
// Assert that the backup matches the expected
assert.deepStrictEqual(backupContent, expectedContent);
}
function setTimeout(context, timeout) {
// Increase timeout using TEST_TIMEOUT_MULTIPLIER
const multiplier = (typeof process.env.TEST_TIMEOUT_MULTIPLIER !== 'undefined') ? parseInt(process.env.TEST_TIMEOUT_MULTIPLIER) : 1;
timeout *= multiplier;
// Set the mocha timeout
context.timeout(timeout * 1000);
}
function assertGzipFile(path) {
// 1f 8b is the gzip magic number
const expectedBytes = Buffer.from([0x1f, 0x8b]);
const buffer = Buffer.alloc(2);
const fd = fs.openSync(path, 'r');
// Read the first two bytes
fs.readSync(fd, buffer, 0, 2, 0);
fs.closeSync(fd);
// Assert the magic number corresponds to gz extension
assert.deepStrictEqual(buffer, expectedBytes, 'The backup file should be gz compressed.');
}
function assertEncryptedFile(path) {
// Openssl encrypted files start with Salted
const expectedBytes = Buffer.from('Salted');
const buffer = Buffer.alloc(6);
const fd = fs.openSync(path, 'r');
// Read the first six bytes
fs.readSync(fd, buffer, 0, 6, 0);
fs.closeSync(fd);
// Assert first 6 characters of the file are "Salted"
assert.deepStrictEqual(buffer, expectedBytes, 'The backup file should be encrypted.');
}
function assertWrittenFewerThan(total, number) {
assert(total < number && total > 0, `Saw ${total} but expected between 1 and ${number - 1} documents for the resumed backup.`);
}
function augmentParamsWithApiKey(params) {
if (process.env.COUCHBACKUP_TEST_IAM_API_KEY) {
if (!params.opts) {
params.opts = {};
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
params.opts.iamApiKey = process.env.COUCHBACKUP_TEST_IAM_API_KEY;
params.opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
return new TestProcess('./bin/couchbackup.bin.js', args, 'readable');
},
cliRestore: function(databaseName, params) {
const args = ['--db', databaseName];
if (params.opts) {
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.parallelism) {
args.push('--parallelism');
args.push(params.opts.parallelism);
}
if (params.opts.requestTimeout) {
args.push('--request-timeout');
args.push(params.opts.requestTimeout);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
return new TestProcess('./bin/couchrestore.bin.js', args, 'writable');
},
cliGzip: function() {
return new TestProcess('gzip', []);
},
cliGunzip: function() {
return new TestProcess('gunzip', []);
},
cliEncrypt: function() {
return new TestProcess('openssl', ['aes-128-cbc', '-pass', 'pass:12345']);
},
cliDecrypt: function() {
return new TestProcess('openssl', ['aes-128-cbc', '-d', '-pass', 'pass:12345']);
}
}
module.exports = {
scenario,
p: params,
setTimeout,
dbCompare,
readSortAndDeepEqual,
assertGzipFile,
assertEncryptedFile,
testBackup,
testRestore,
testDirectBackupAndRestore,
testBackupToFile,
testRestoreFromFile,
testBackupAndRestoreViaFile,
testBackupAbortResumeRestore
};

@@ -15,24 +15,170 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it */
/* global describe it beforeEach */
'use strict';
const u = require('./citestutils.js');
const assert = require('assert');
const backup = require('../includes/shallowbackup.js');
const request = require('../includes/request.js');
const fs = require('fs');
const nock = require('nock');
describe('Event tests', function() {
it('should get a finished event when using stdout', async function() {
u.setTimeout(this, 40);
// Use the API so we can get events, pass eventEmitter so we get the emitter back
const params = { useApi: true, useStdOut: true };
// All API backups now set an event listener for finished and it is part of the backup
// promise, so if the backup passes the finished event fired.
return u.testBackup(params, 'animaldb', process.stdout);
// Function to create a DB object and call the shallow backup function
// This is normally done by app.js
function shallowBackup(dbUrl, opts) {
const db = request.client(dbUrl, opts);
// Disable compression to make body assertions easier
db.service.setEnableGzipCompression(false);
return backup(db, opts);
}
// Note all these tests include a body parameter of include_docs and a query
// string of include_docs because of a quirk of nano that when using the fetch
// method always adds the include_docs query string.
describe('#unit Perform backup using shallow backup', function() {
const dbUrl = 'http://localhost:5984/animaldb';
// Query string keys are stringified by Nano
const badgerKey = 'badger\0';
const kookaburraKey = 'kookaburra\0';
const snipeKey = 'snipe\0';
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
it('should get a finished event when using file output', async function() {
u.setTimeout(this, 40);
// Use the API so we can get events, pass eventEmitter so we get the emitter back
const params = { useApi: true };
const actualBackup = `./${this.fileName}`;
return u.testBackupToFile(params, 'animaldb', actualBackup);
it('should perform a shallow backup', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_3.json', 'utf8')))
// batch 4
.post('/_all_docs', { limit: 3, start_key: snipeKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_4.json', 'utf8')));
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
reject(err);
})
.on('received', function(data) {
try {
if (data.batch === 3) {
assert.strictEqual(data.length, 2); // smaller last batch
} else {
assert.strictEqual(data.length, 3);
}
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 11);
assert.ok(couch.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
it('should perform a shallow backup with transient error', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3 - transient error
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(500, { error: 'Internal Server Error' })
// batch 3 - retry
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_3.json', 'utf8')))
// batch 4
.post('/_all_docs', { limit: 3, start_key: snipeKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_4.json', 'utf8')));
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
try {
assert.strictEqual(err.name, 'HTTPError');
} catch (err) {
reject(err);
}
})
.on('received', function(data) {
try {
if (data.batch === 3) {
assert.strictEqual(data.length, 2); // smaller last batch
} else {
assert.strictEqual(data.length, 3);
}
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 11);
assert.ok(couch.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
it('should fail to perform a shallow backup on fatal error', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3 - fatal error
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(401, { error: 'Unauthorized' });
let errCount = 0;
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
try {
errCount++;
assert.strictEqual(err.name, 'Unauthorized');
} catch (err) {
reject(err);
}
})
.on('received', function(data) {
try {
assert.strictEqual(data.length, 3);
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 6);
assert.ok(couch.isDone());
assert.strictEqual(errCount, 1);
resolve();
} catch (err) {
reject(err);
}
});
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2023 IBM Corp. All rights reserved.
//

@@ -15,174 +15,87 @@ // Licensed under the Apache License, Version 2.0 (the "License");

/* global describe it */
'use strict';
const assert = require('assert');
const backup = require('../app.js').backup;
const fs = require('fs');
const nock = require('nock');
const util = require('util');
const backupPromise = util.promisify(backup);
const chunk = require('lodash/chunk');
const difference = require('lodash/difference');
const forOwn = require('lodash/forOwn');
const isEmpty = require('lodash/isEmpty');
const union = require('lodash/union');
const goodUrl = 'http://localhost:5984/db';
// The real validateArgs function of app.js isn't
// exported - so we call the exported backup method
// instead. We don't get as far as a real backup when
// testing error cases. For success cases we nock the
// goodUrl and
const validateArgs = async function(url, opts, errorValidationForAssertRejects) {
const nullStream = fs.createWriteStream('/dev/null');
if (url === goodUrl) {
// Nock the goodUrl
nock(goodUrl).head('').reply(404, { error: 'not_found', reason: 'missing' });
const compare = async function(database1, database2, client) {
// check docs same in both dbs
const allDocs1 = await getAllDocs(client, database1);
const allDocs2 = await getAllDocs(client, database2);
const onlyInDb1 = (difference(allDocs1, allDocs2));
const onlyInDb2 = (difference(allDocs2, allDocs1));
let databasesSame = isEmpty(onlyInDb1) && isEmpty(onlyInDb2);
if (!databasesSame) {
console.log(onlyInDb1.length + ' documents only in db 1.');
console.log('Document IDs only in db 1: ' + onlyInDb1);
console.log(onlyInDb2.length + ' documents only in db 2.');
console.log('Document IDs only in db 2: ' + onlyInDb2);
}
return assert.rejects(backupPromise(url, nullStream, opts), errorValidationForAssertRejects);
};
const validateShallowModeArgs = async function(url, opts, msg) {
// We pass assertNoValidationError because for these shallow opts
// we are expecting only a stderr warning
return validateArgs(url, opts, assertNoValidationError()).then(() => {
// Assert the warning message was in stderr
assert(capturedStderr.indexOf(msg) > -1, 'Log warning message was not present');
});
// check revs same in docs common to both dbs
const partitionSize = 500;
const batches = chunk(union(allDocs1, allDocs2), partitionSize);
const missingRevsInDb2 = await getMissingRevs(client, database1, database2, batches);
const missingRevsInDb1 = await getMissingRevs(client, database2, database1, batches);
databasesSame = databasesSame && isEmpty(missingRevsInDb1) && isEmpty(missingRevsInDb2);
if (!databasesSame) {
console.log('Missing revs in db 1:' + JSON.stringify(missingRevsInDb1));
console.log('Missing revs in db 2:' + JSON.stringify(missingRevsInDb2));
}
return databasesSame;
};
const stderrWriteFun = process.stderr.write;
let capturedStderr;
const getMissingRevs = async(client, databaseName1, databaseName2, batcheses) => {
const fakeRevisionId = '9999-a';
function captureStderr() {
process.stderr.write = function(string, encoding, fd) {
capturedStderr += string;
};
}
const missing = {};
function releaseStderr() {
process.stderr.write = stderrWriteFun;
capturedStderr = null;
}
// look in db1 - use a fake revision ID to fetch all leaf revisions
// Return a validation object for use with assert.rejects
function assertErrorMessage(msg) {
return { name: 'InvalidOption', message: msg };
}
for (const batches of batcheses) {
const documentRevisions = {};
batches.forEach(id => (documentRevisions[id] = [fakeRevisionId]));
// For cases where validation should pass we reach a real backup that hits a 404
// mock for a DatabaseNotFound, so that it is the expected in the case assertNoValidationError
function assertNoValidationError() { return { name: 'DatabaseNotFound' }; }
const result1 = await client.postRevsDiff({ db: databaseName1, documentRevisions });
const revsDiffRequestDb2 = {};
forOwn(result1.result, (v, k) => (revsDiffRequestDb2[k] = v.possible_ancestors));
// look in db2
const result2 = await client.postRevsDiff({ db: databaseName2, documentRevisions: revsDiffRequestDb2 });
forOwn(result2.result, (v, k) => {
if ('missing' in v) {
missing[k] = v.missing;
}
});
}
return missing;
};
describe('#unit Validate arguments', function() {
it('returns error for invalid URL type', async function() {
return validateArgs(true, {}, assertErrorMessage('Invalid URL, must be type string'));
});
it('returns no error for valid URL type', async function() {
return validateArgs(goodUrl, {}, assertNoValidationError());
});
it('returns error for invalid (no host) URL', async function() {
return validateArgs('http://', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid (no protocol) URL', async function() {
return validateArgs('invalid', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid (wrong protocol) URL', async function() {
return validateArgs('ftp://invalid.example.com', {}, assertErrorMessage('Invalid URL protocol.'));
});
it('returns error for invalid (no path) URL', async function() {
return validateArgs('https://invalid.example.com', {}, assertErrorMessage('Invalid URL, missing path element (no database).'));
});
it('returns error for invalid (no protocol, no host) URL', async function() {
return validateArgs('invalid', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid buffer size type', async function() {
return validateArgs(goodUrl, { bufferSize: '123' }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero buffer size', async function() {
return validateArgs(goodUrl, { bufferSize: 0 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float buffer size', async function() {
return validateArgs(goodUrl, { bufferSize: 1.23 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid buffer size type', async function() {
return validateArgs(goodUrl, { bufferSize: 123 }, assertNoValidationError());
});
it('returns error for invalid log type', async function() {
return validateArgs(goodUrl, { log: true }, assertErrorMessage('Invalid log option, must be type string'));
});
it('returns no error for valid log type', async function() {
return validateArgs(goodUrl, { log: 'log.txt' }, assertNoValidationError());
});
it('returns error for invalid mode type', async function() {
return validateArgs(goodUrl, { mode: true }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"'));
});
it('returns error for invalid mode string', async function() {
return validateArgs(goodUrl, { mode: 'foobar' }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"'));
});
it('returns no error for valid mode type', async function() {
return validateArgs(goodUrl, { mode: 'full' }, assertNoValidationError());
});
it('returns error for invalid output type', async function() {
return validateArgs(goodUrl, { output: true }, assertErrorMessage('Invalid output option, must be type string'));
});
it('returns no error for valid output type', async function() {
return validateArgs(goodUrl, { output: 'output.txt' }, assertNoValidationError());
});
it('returns error for invalid parallelism type', async function() {
return validateArgs(goodUrl, { parallelism: '123' }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero parallelism', async function() {
return validateArgs(goodUrl, { parallelism: 0 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float parallelism', async function() {
return validateArgs(goodUrl, { parallelism: 1.23 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid parallelism type', async function() {
return validateArgs(goodUrl, { parallelism: 123 }, assertNoValidationError());
});
it('returns error for invalid request timeout type', async function() {
return validateArgs(goodUrl, { requestTimeout: '123' }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero request timeout', async function() {
return validateArgs(goodUrl, { requestTimeout: 0 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float request timout', async function() {
return validateArgs(goodUrl, { requestTimeout: 1.23 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid request timeout type', async function() {
return validateArgs(goodUrl, { requestTimeout: 123 }, assertNoValidationError());
});
it('returns error for invalid resume type', async function() {
return validateArgs(goodUrl, { resume: 'true' }, assertErrorMessage('Invalid resume option, must be type boolean'));
});
it('returns no error for valid resume type', async function() {
return validateArgs(goodUrl, { resume: false }, assertNoValidationError());
});
it('returns error for invalid key type', async function() {
return validateArgs(goodUrl, { iamApiKey: true }, assertErrorMessage('Invalid iamApiKey option, must be type string'));
});
it('returns error for key and URL credentials supplied', async function() {
return validateArgs('https://a:b@example.com/db', { iamApiKey: 'abc123' }, assertErrorMessage('URL user information must not be supplied when using IAM API key.'));
});
it('warns for log arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', log: 'test' },
'the options "log" and "resume" are invalid when using shallow mode.').finally(
() => {
releaseStderr();
});
});
it('warns for resume arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', log: 'test', resume: true },
'the options "log" and "resume" are invalid when using shallow mode.').finally(
() => {
releaseStderr();
});
});
it('warns for parallelism arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', parallelism: 10 },
'the option "parallelism" has no effect when using shallow mode.').finally(
() => {
releaseStderr();
});
});
});
const getAllDocs = async function(client, database) {
let allDocIds = [];
const limit = 2000;
let startKey = '\u0000';
do {
const pageOfDocIds = (await client.postAllDocs({ db: database, startKey, limit })).result.rows.map(r => r.id);
allDocIds = allDocIds.concat(pageOfDocIds);
if (pageOfDocIds.length < limit) {
startKey = null;
} else {
startKey = pageOfDocIds[limit - 1] + '\u0000';
}
} while (startKey != null);
return allDocIds;
};
module.exports = {
compare
};

@@ -15,316 +15,436 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe afterEach before after it */
/* global */
'use strict';
const assert = require('assert');
const parser = require('../includes/parser.js');
const assert = require('node:assert');
const { once } = require('node:events');
const fs = require('node:fs');
const { PassThrough } = require('node:stream');
const { pipeline } = require('node:stream/promises');
const { createGzip, createGunzip } = require('node:zlib');
const debug = require('debug');
const { Tail } = require('tail');
const app = require('../app.js');
const dbUrl = require('../includes/cliutils.js').databaseUrl;
const compare = require('./compare.js');
const request = require('../includes/request.js');
const { cliBackup, cliDecrypt, cliEncrypt, cliGzip, cliGunzip, cliRestore } = require('./test_process.js');
const testLogger = debug('couchbackup:test:utils');
describe('#unit Default parameters', function() {
let processEnvCopy;
let processArgvCopy;
function scenario(test, params) {
return `${test} ${(params.useApi) ? 'using API' : 'using CLI'}`;
}
before('Set process data for test', function() {
// Copy env and argv so we can reset them after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
processArgvCopy = JSON.parse(JSON.stringify(process.argv));
function params() {
const p = {};
for (let i = 0; i < arguments.length; i++) {
Object.assign(p, arguments[i]);
}
return p;
}
// setup environment variables
process.env.COUCH_URL = 'http://user:pass@myurl.com';
process.env.COUCH_DATABASE = 'mydb';
process.env.COUCH_BUFFER_SIZE = '1000';
process.env.COUCH_PARALLELISM = '20';
process.env.COUCH_REQUEST_TIMEOUT = '20000';
process.env.COUCH_LOG = 'my.log';
process.env.COUCH_RESUME = 'true';
process.env.COUCH_OUTPUT = 'myfile.txt';
process.env.COUCH_MODE = 'shallow';
process.env.CLOUDANT_IAM_API_KEY = 'ABC123-ZYX987_cba789-xyz321';
process.env.COUCH_QUIET = 'true';
});
// Returns the event emitter for API calls, or the child process for CLI calls
async function testBackup(params, databaseName, outputStream) {
const pipelineStreams = [];
const promises = [];
after('Reset process data', function() {
process.env = processEnvCopy;
process.argv = processArgvCopy;
});
// Configure API key if needed
augmentParamsWithApiKey(params);
afterEach(function() {
delete require.cache[require.resolve('commander')];
});
describe('Backup command-line', function() {
it('respects the COUCH_URL env variable if the --url backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, process.env.COUCH_URL);
let backup;
let backupStream;
let backupPromise;
let tail;
if (params.useApi) {
if (params.useStdOut) {
backupStream = outputStream;
} else {
backupStream = new PassThrough();
}
const backupCallbackPromise = new Promise((resolve, reject) => {
backup = app.backup(
dbUrl(process.env.COUCH_URL, databaseName),
backupStream,
params.opts,
(err, data) => {
if (err) {
testLogger(`API backup callback with ${JSON.stringify(err)}, will reject.`);
reject(err);
} else {
testLogger(`API backup callback with ${JSON.stringify(data)}, will resolve.`);
resolve(data);
}
});
});
const backupFinshedPromise = once(backup, 'finished')
.then((summary) => {
testLogger(`Resolving API backup event promise with ${JSON.stringify(summary)}`);
if (params.resume) {
assertWrittenFewerThan(summary.total, params.exclusiveMaxExpected);
}
})
.catch((err) => {
testLogger(`Rejecting API backup event promise with error ${JSON.stringify(err)}`);
throw err;
});
backupPromise = Promise.all([backupCallbackPromise, backupFinshedPromise])
.then(() => {
testLogger('Both API backup promises resolved.');
});
} else {
backup = cliBackup(databaseName, params);
backupStream = backup.stream;
backupPromise = backup.childProcessPromise;
if (params.abort) {
// Create the log file for abort tests so we can tail it, other tests assert
// the log file is usually created normally by the backup process.
const f = fs.openSync(params.opts.log, 'w');
fs.closeSync(f);
it('respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, process.env.COUCH_DATABASE);
});
// Use tail to watch the log file for a batch to be completed then abort
tail = new Tail(params.opts.log, { useWatchFile: true, fsWatchOptions: { interval: 500 }, follow: false });
tail.on('line', function(data) {
const matches = data.match(/:d batch\d+/);
if (matches !== null) {
// Turn off the tail.
tail.unwatch();
// Abort the backup
backup.childProcess.kill();
}
});
}
if (params.resume) {
const listenerPromise = new Promise((resolve, reject) => {
const listener = function(data) {
const matches = data.toString().match(/.*Finished - Total document revisions written: (\d+).*/);
if (matches !== null) {
try {
assertWrittenFewerThan(matches[1], params.exclusiveMaxExpected);
resolve();
} catch (err) {
reject(err);
}
process.stderr.removeListener('data', listener);
}
};
backup.childProcess.stderr.on('data', listener);
});
promises.push(listenerPromise);
}
}
it('respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, parseInt(process.env.COUCH_BUFFER_SIZE, 10));
});
promises.push(backupPromise);
if (!params.useStdOut) {
pipelineStreams.push(backupStream);
}
it('respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parseInt(process.env.COUCH_PARALLELISM, 10));
});
if (params.compression) {
if (params.useApi) {
pipelineStreams.push(createGzip());
} else {
const gzipProcess = cliGzip();
pipelineStreams.push(gzipProcess.stream);
promises.push(gzipProcess.childProcessPromise);
}
}
it('respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, parseInt(process.env.COUCH_REQUEST_TIMEOUT, 10));
});
// Pipe via encryption if requested
if (params.encryption) {
if (params.useApi) {
// Currently only CLI support for testing encryption
return Promise.reject(new Error('Not implemented: cannot test encrypted API backups at this time.'));
} else {
const encryptProcess = cliEncrypt();
pipelineStreams.push(encryptProcess.stream);
promises.push(encryptProcess.childProcessPromise);
}
}
it('respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, process.env.CLOUDANT_IAM_API_KEY);
});
if (!params.useStdOut) {
// Finally add the outputStream to the list we want to pipeline
pipelineStreams.push(outputStream);
it('respects the COUCH_LOG env variable if the --log backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.log, 'string');
assert.strictEqual(program.log, process.env.COUCH_LOG);
});
// Create the promisified pipeline and add it to the array of promises we'll wait for
promises.unshift(pipeline(pipelineStreams));
}
it('respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.resume, 'boolean');
assert.strictEqual(program.resume, true);
// Wait for the promises and then assert
return Promise.all(promises)
.then(() => testLogger('All backup promises resolved.'))
.then(() => {
if (params.expectedBackupError) {
return Promise.reject(new Error('Backup passed when it should have failed.'));
}
})
.catch((err) => {
if (params.expectedBackupError || params.abort) {
if (params.useApi) {
assert.strictEqual(err.name, params.expectedBackupError.name, 'The backup should receive the expected error.');
} else {
if (params.abort) {
// The tail should be stopped when we match a line and abort, but if
// something didn't work we need to make sure the tail is stopped
tail.unwatch();
// Assert that the process was aborted as expected
assert.strictEqual(err.signal, 'SIGTERM', `The backup should have terminated with SIGTERM, but was ${err.signal}.`);
} else if (params.expectedBackupError) {
assert.strictEqual(err.code, params.expectedBackupError.code, `The backup exited with unexpected code ${err.code} and signal ${err.signal}.`);
}
}
} else {
return Promise.reject(err);
}
});
}
it('respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.output, 'string');
assert.strictEqual(program.output, process.env.COUCH_OUTPUT);
});
async function testRestore(params, inputStream, databaseName) {
const pipelineStreams = [inputStream];
const promises = [];
it('respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, process.env.COUCH_MODE);
});
// Configure API key if needed
augmentParamsWithApiKey(params);
it('respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
let restore;
let restoreStream;
let restorePromise;
it('respects the backup --url command-line parameter', function() {
const url = 'http://user:pass@myurl2.com';
process.argv = ['node', 'test', '--url', url];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, url);
if (params.useApi) {
restoreStream = new PassThrough();
const restoreCallbackPromise = new Promise((resolve, reject) => {
restore = app.restore(
restoreStream,
dbUrl(process.env.COUCH_URL, databaseName),
params.opts,
(err, data) => {
if (err) {
testLogger(`API restore callback with ${err}, will reject.`);
reject(err);
} else {
resolve(data);
}
});
});
const restoreFinshedPromise = once(restore, 'finished')
.then((summary) => {
testLogger(`Resolving API restore promise with ${summary}`);
})
.catch((err) => {
testLogger(`Handling API restore error event ${JSON.stringify(err)}`);
if (params.expectedRestoreErrorRecoverable) {
testLogger(`Expecting restore error ${params.expectedRestoreErrorRecoverable.name}`);
assert.strictEqual(err.name, params.expectedRestoreErrorRecoverable.name, 'The restore should receive the expected recoverable error.');
} else {
testLogger(`API restore will reject by throwing error event ${JSON.stringify(err)}`);
return Promise.reject(err);
}
});
restorePromise = Promise.all([restoreCallbackPromise, restoreFinshedPromise]);
} else {
restore = cliRestore(databaseName, params);
restoreStream = restore.stream;
restorePromise = restore.childProcessPromise;
}
promises.push(restorePromise);
it('respects the backup --db command-line parameter', function() {
const db = 'mydb2';
process.argv = ['node', 'test', '--db', db];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, db);
});
// Pipe via decompression if requested
if (params.compression) {
if (params.useApi) {
pipelineStreams.push(createGunzip());
} else {
const gunzipProcess = cliGunzip();
pipelineStreams.push(gunzipProcess.stream);
promises.push(gunzipProcess.childProcessPromise);
}
}
it('respects the backup --buffer-size command-line parameter', function() {
const bufferSize = 500;
process.argv = ['node', 'test', '--buffer-size', bufferSize];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, bufferSize);
});
// Pipe via decryption if requested
if (params.encryption) {
if (params.useApi) {
// Currently only CLI support for testing encryption
return Promise.reject(new Error('Not implemented: cannot test encrypted API backups at this time.'));
} else {
const decryptProcess = cliDecrypt();
pipelineStreams.push(decryptProcess.stream);
promises.push(decryptProcess.childProcessPromise);
}
}
it('respects the backup --parallelism command-line parameter', function() {
const parallelism = 10;
process.argv = ['node', 'test', '--parallelism', parallelism];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parallelism);
});
// pipeline everything into the restoreStream
pipelineStreams.push(restoreStream);
it('respects the backup --request-timeout command-line parameter', function() {
const requestTimeout = 10000;
process.argv = ['node', 'test', '--request-timeout', requestTimeout];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, requestTimeout);
});
// Create the promisified pipeline and add it to the array of promises we'll wait for
promises.unshift(pipeline(pipelineStreams));
it('respects the backup --iam-api-key command-line parameter', function() {
const key = '123abc-789zyx_CBA987-XYZ321';
process.argv = ['node', 'test', '--iam-api-key', key];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, key);
// Wait for the all the promises to settle and then assert based on the process promise
return Promise.allSettled(promises)
.then(() => { return restorePromise; })
.then((summary) => {
testLogger(`Restore promise resolved with ${summary}.`);
if (params.expectedRestoreError) {
return Promise.reject(new Error('Restore passed when it should have failed.'));
}
})
.catch((err) => {
testLogger(`Restore promise rejected with ${err}.`);
if (params.expectedRestoreError) {
if (params.useApi) {
assert.strictEqual(err.name, params.expectedRestoreError.name, 'The restore should receive the expected error.');
} else {
assert.strictEqual(err.code, params.expectedRestoreError.code, `The restore exited with unexpected code ${err.code} and signal ${err.signal}.`);
}
} else {
return Promise.reject(err);
}
});
}
it('respects the backup --log command-line parameter', function() {
const filename = 'my2.log';
process.argv = ['node', 'test', '--log', filename];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.log, 'string');
assert.strictEqual(program.log, filename);
});
// Serial backup and restore via a file on disk
async function testBackupAndRestoreViaFile(params, srcDb, backupFile, targetDb) {
return testBackupToFile(params, srcDb, backupFile).then(() => {
return testRestoreFromFile(params, backupFile, targetDb);
});
}
it('respects the backup --resume command-line parameter', function() {
process.argv = ['node', 'test', '--resume'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.resume, 'boolean');
assert.strictEqual(program.resume, true);
async function testBackupToFile(params, srcDb, backupFile) {
// Open the file for appending if this is a resume
const output = fs.createWriteStream(backupFile, { flags: (params.opts && params.opts.resume) ? 'a' : 'w' });
return once(output, 'open')
.then(() => {
return testBackup(params, srcDb, output);
});
}
it('respects the backup --output command-line parameter', function() {
const filename = 'myfile2.txt';
process.argv = ['node', 'test', '--output', filename];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.output, 'string');
assert.strictEqual(program.output, filename);
async function testRestoreFromFile(params, backupFile, targetDb) {
const input = fs.createReadStream(backupFile);
return once(input, 'open')
.then(() => {
return testRestore(params, input, targetDb);
});
}
it('respects the backup --mode full command-line parameter', function() {
process.argv = ['node', 'test', '--mode', 'full'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, 'full');
});
it('respects the backup --mode shallow command-line parameter', function() {
process.argv = ['node', 'test', '--mode', 'shallow'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, 'shallow');
});
it('respects the backup --quiet command-line parameter', function() {
process.argv = ['node', 'test', '--quiet'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
async function testDirectBackupAndRestore(params, srcDb, targetDb) {
// Allow a 64 MB highWaterMark for the passthrough during testing
const passthrough = new PassThrough({ highWaterMark: 67108864 });
const backup = testBackup(params, srcDb, passthrough);
const restore = testRestore(params, passthrough, targetDb);
return Promise.all([backup, restore]).then(() => {
return dbCompare(srcDb, targetDb);
});
}
describe('Restore command-line', function() {
it('respects the COUCH_URL env variable if the --url restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, process.env.COUCH_URL);
async function testBackupAbortResumeRestore(params, srcDb, backupFile, targetDb) {
return Promise.resolve()
.then(() => {
// First backup with an abort
if (params.opts && params.opts.output) {
return testBackup(params, srcDb, new PassThrough());
} else {
return testBackupToFile(params, srcDb, backupFile);
}
}).then(() => {
// Remove the abort parameter and add the resume parameter
delete params.abort;
params.opts.resume = true;
// Resume the backup
if (params.opts && params.opts.output) {
return testBackup(params, srcDb, new PassThrough());
} else {
return testBackupToFile(params, srcDb, backupFile);
}
}).then(() => {
// Restore the backup
return testRestoreFromFile(params, backupFile, targetDb);
}).then(() => {
// Now compare the restored to the original for validation
return dbCompare(srcDb, targetDb);
});
}
it('respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, process.env.COUCH_DATABASE);
async function dbCompare(db1Name, db2Name) {
const client = request.client(process.env.COUCH_BACKEND_URL, {});
return compare.compare(db1Name, db2Name, client.service)
.then(result => {
return assert.strictEqual(result, true, 'The database comparison should succeed, but failed');
});
}
it('respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, parseInt(process.env.COUCH_BUFFER_SIZE, 10));
});
function sortByIdThenRev(o1, o2) {
if (o1._id < o2._id) return -1;
if (o1._id > o2._id) return 1;
if (o1._rev < o2._rev) return -1;
if (o1._rev > o2._rev) return 1;
return 0;
}
it('respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parseInt(process.env.COUCH_PARALLELISM, 10));
});
function readSortAndDeepEqual(actualContentPath, expectedContentPath) {
const backupContent = JSON.parse(fs.readFileSync(actualContentPath, 'utf8'));
const expectedContent = JSON.parse(fs.readFileSync(expectedContentPath, 'utf8'));
// Array order of the docs is important for equality, but not for backup
backupContent.sort(sortByIdThenRev);
expectedContent.sort(sortByIdThenRev);
// Assert that the backup matches the expected
assert.deepStrictEqual(backupContent, expectedContent);
}
it('respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, parseInt(process.env.COUCH_REQUEST_TIMEOUT, 10));
});
function setTimeout(context, timeout) {
// Increase timeout using TEST_TIMEOUT_MULTIPLIER
const multiplier = (typeof process.env.TEST_TIMEOUT_MULTIPLIER !== 'undefined') ? parseInt(process.env.TEST_TIMEOUT_MULTIPLIER) : 1;
timeout *= multiplier;
// Set the mocha timeout
context.timeout(timeout * 1000);
}
it('respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, process.env.CLOUDANT_IAM_API_KEY);
});
function assertGzipFile(path) {
// 1f 8b is the gzip magic number
const expectedBytes = Buffer.from([0x1f, 0x8b]);
const buffer = Buffer.alloc(2);
const fd = fs.openSync(path, 'r');
// Read the first two bytes
fs.readSync(fd, buffer, 0, 2, 0);
fs.closeSync(fd);
// Assert the magic number corresponds to gz extension
assert.deepStrictEqual(buffer, expectedBytes, 'The backup file should be gz compressed.');
}
it('respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
function assertEncryptedFile(path) {
// Openssl encrypted files start with Salted
const expectedBytes = Buffer.from('Salted');
const buffer = Buffer.alloc(6);
const fd = fs.openSync(path, 'r');
// Read the first six bytes
fs.readSync(fd, buffer, 0, 6, 0);
fs.closeSync(fd);
// Assert first 6 characters of the file are "Salted"
assert.deepStrictEqual(buffer, expectedBytes, 'The backup file should be encrypted.');
}
it('respects the restore --url command-line parameter', function() {
const url = 'https://a:b@myurl3.com';
process.argv = ['node', 'test', '--url', url];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, url);
});
function assertWrittenFewerThan(total, number) {
assert(total < number && total > 0, `Saw ${total} but expected between 1 and ${number - 1} documents for the resumed backup.`);
}
it('respects the restore --db command-line parameter', function() {
const db = 'mydb3';
process.argv = ['node', 'test', '--db', db];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, db);
});
function augmentParamsWithApiKey(params) {
if (process.env.COUCHBACKUP_TEST_IAM_API_KEY) {
if (!params.opts) {
params.opts = {};
}
params.opts.iamApiKey = process.env.COUCHBACKUP_TEST_IAM_API_KEY;
params.opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
it('respects the restore --buffer-size command-line parameter', function() {
const bufferSize = 250;
process.argv = ['node', 'test', '--buffer-size', bufferSize];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, bufferSize);
});
it('respects the restore --parallelism command-line parameter', function() {
const parallelism = 5;
process.argv = ['node', 'test', '--parallelism', parallelism];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parallelism);
});
it('respects the restore --request-timeout command-line parameter', function() {
const requestTimeout = 10000;
process.argv = ['node', 'test', '--request-timeout', requestTimeout];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, requestTimeout);
});
it('respects the restore --iam-api-key command-line parameter', function() {
const key = '123abc-789zyx_CBA987-XYZ321';
process.argv = ['node', 'test', '--iam-api-key', key];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, key);
});
it('respects the restore --quiet command-line parameter', function() {
process.argv = ['node', 'test', '--quiet'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
});
});
module.exports = {
scenario,
p: params,
setTimeout,
dbCompare,
readSortAndDeepEqual,
assertGzipFile,
assertEncryptedFile,
testBackup,
testRestore,
testDirectBackupAndRestore,
testBackupToFile,
testRestoreFromFile,
testBackupAndRestoreViaFile,
testBackupAbortResumeRestore
};

@@ -15,3 +15,3 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it before after beforeEach */
/* global describe it */
'use strict';

@@ -22,274 +22,23 @@

const u = require('./citestutils.js');
const mockServerPort = +process.env.COUCHBACKUP_MOCK_SERVER_PORT || 7777;
const { once } = require('node:events');
const url = `http://localhost:${mockServerPort}`;
const nock = require('nock');
const httpProxy = require('http-proxy');
const Readable = require('stream').Readable;
// Create an infinite stream to read.
// It just keeps sending a backup line, useful for testing cases of
// termination while a stream has content remaining (the animaldb backup
// is too small for that).
class InfiniteBackupStream extends Readable {
constructor(opt) {
super(opt);
this.contents = Buffer.from('[{"_id":"giraffe","_rev":"3-7665c3e66315ff40616cceef62886bd8","min_weight":830,"min_length":5,"max_weight":1600,"max_length":6,"wiki_page":"http://en.wikipedia.org/wiki/Giraffe","class":"mammal","diet":"herbivore","_revisions":{"start":3,"ids":["7665c3e66315ff40616cceef62886bd8","aaaf10d5a68cdf22d95a5482a0e95549","967a00dff5e02add41819138abb3284d"]}}]\n', 'utf8');
}
_read() {
let proceed;
do {
proceed = this.push(this.contents);
} while (proceed);
}
}
function assertNock() {
try {
assert.ok(nock.isDone());
} catch (err) {
console.error('pending mocks: %j', nock.pendingMocks());
throw err;
}
}
function testPromiseWithAssertNock(testPromise) {
return testPromise.finally(() => {
assertNock();
});
}
async function backupHttpError(opts, errorName, errorCode) {
const p = u.p(opts, { expectedBackupError: { name: errorName, code: errorCode } });
// Create a file and attempt a backup to it
const output = fs.createWriteStream('/dev/null');
return once(output, 'open')
.then(() => {
return testPromiseWithAssertNock(u.testBackup(p, 'fakenockdb', output));
describe('Write error tests', function() {
it('calls callback with error set when stream is not writeable', async function() {
u.setTimeout(this, 10);
const dirname = fs.mkdtempSync('test_backup_');
// make temp dir read only
fs.chmodSync(dirname, 0o444);
const filename = dirname + '/test.backup';
const backupStream = fs.createWriteStream(filename, { flags: 'w' });
const params = { useApi: true };
// try to do backup and check err was set in callback
return u.testBackup(params, 'animaldb', backupStream).then(() => {
assert.fail('Should throw an "EACCES" error');
}).catch((resultErr) => {
// cleanup temp dir
fs.rmdirSync(dirname);
// error should have been set
assert.ok(resultErr);
assert.strictEqual(resultErr.code, 'EACCES');
});
}
async function restoreHttpError(opts, errorName, errorCode) {
const q = u.p(opts, { expectedRestoreError: { name: errorName, code: errorCode } });
return testPromiseWithAssertNock(u.testRestoreFromFile(q, './test/fixtures/animaldb_expected.json', 'fakenockdb'));
}
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('#unit Fatal errors', params), function() {
// These tests do real requests with mocks and if they run slowly
// the 2 second default mocha timeout can be insufficient, use 10 s
this.timeout(10000);
let processEnvCopy;
let proxy;
before('Set process data for test', function() {
const proxyPort = mockServerPort + 1000;
// Copy env and argv so we can reset them after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
// Set up a proxy to point to our nock server because the nock override
// isn't visible to the spawned CLI process
if (!params.useApi) {
proxy = httpProxy.createProxyServer({ target: url }).listen(proxyPort, 'localhost');
proxy.on('error', (err, req, res) => {
console.log(`Proxy received error ${err}`);
res.writeHead(400, {
'Content-Type': 'application/json'
});
res.end(JSON.stringify(err));
});
}
// setup environment variables
process.env.COUCH_URL = (params.useApi) ? url : `http://localhost:${proxyPort}`;
nock.emitter.on('no match', (req, opts) => {
console.error(`Unmatched nock request ${opts.method} ${opts.protocol}${opts.host}${opts.path}`);
});
});
after('Reset process data', function(done) {
process.env = processEnvCopy;
nock.emitter.removeAllListeners();
if (!params.useApi) {
proxy.close(done);
} else {
done();
}
});
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
describe('for backup', function() {
it('should terminate when DB does not exist', function() {
// Simulate existence check
nock(url).head('/fakenockdb').reply(404, { error: 'not_found', reason: 'missing' });
return backupHttpError(params, 'DatabaseNotFound', 10);
});
it('should terminate on BulkGetError', function() {
// Simulate existence check
const n = nock(url).head('/fakenockdb').reply(200);
// Simulate _bulk_get not available
n.post('/fakenockdb/_bulk_get').reply(404, { error: 'not_found', reason: 'missing' });
return backupHttpError(params, 'BulkGetError', 50);
});
it('should terminate on Unauthorized existence check', function() {
// Simulate a 401
nock(url).head('/fakenockdb').reply(401, { error: 'unauthorized', reason: '_reader access is required for this request' });
return backupHttpError(params, 'Unauthorized', 11);
});
it('should terminate on Forbidden no _reader', function() {
// Simulate a 403
nock(url).head('/fakenockdb').reply(403, { error: 'forbidden', reason: '_reader access is required for this request' });
return backupHttpError(params, 'Forbidden', 12);
});
it('should terminate on _bulk_get HTTPFatalError', function() {
// Provide a mock complete changes log to allow a resume to skip ahead
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/test.log' } });
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a fatal HTTP error when trying to fetch docs
// Note: 2 outstanding batches, so 2 responses, 1 mock is optional because we can't guarantee timing
n.post('/fakenockdb/_bulk_get').query(true).reply(400, { error: 'bad_request', reason: 'testing bad response' });
n.post('/fakenockdb/_bulk_get').query(true).optionally().reply(400, { error: 'bad_request', reason: 'testing bad response' });
return backupHttpError(p, 'HTTPFatalError', 40);
});
it('should terminate on NoLogFileName', function() {
// Don't supply a log file name with resume
const p = u.p(params, { opts: { resume: true } });
return backupHttpError(p, 'NoLogFileName', 20);
});
it('should terminate on LogDoesNotExist', function() {
// Use a non-existent log file
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/doesnotexist.log' } });
return backupHttpError(p, 'LogDoesNotExist', 21);
});
it('should terminate on IncompleteChangesInLogFile', function() {
// Use an incomplete changes log file
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/incomplete_changes.log' } });
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Should fail when it reads the incomplete changes
return backupHttpError(p, 'IncompleteChangesInLogFile', 22);
});
it('should terminate on _changes HTTPFatalError', function() {
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a fatal HTTP error when trying to fetch docs (note 2 outstanding batches)
n.post('/fakenockdb/_changes').query(true).reply(400, { error: 'bad_request', reason: 'testing bad response' });
return backupHttpError(params, 'HTTPFatalError', 40);
});
it('should terminate on SpoolChangesError', function() {
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a changes without a last_seq
n.post('/fakenockdb/_changes').query(true).reply(200,
{
results: [{
seq: '2-g1AAAAEbeJzLYWBgYMlgTmFQSElKzi9KdUhJstTLTS3KLElMT9VLzskvTUnMK9HLSy3JAapkSmRIsv___39WBnMiUy5QgN3MzDIxOdEMWb85dv0gSxThigyN8diS5AAkk-pBFiUyoOkzxKMvjwVIMjQAKaDW_Zh6TQnqPQDRC7I3CwDPDV1k',
id: 'badger',
changes: [{ rev: '4-51aa94e4b0ef37271082033bba52b850' }]
}]
});
return backupHttpError(params, 'SpoolChangesError', 30);
});
});
describe('for restore', function() {
it('should terminate on Unauthorized db existence check', function() {
// Simulate a 401
nock(url).get('/fakenockdb').reply(401, { error: 'unauthorized', reason: '_reader access is required for this request' });
return restoreHttpError(params, 'Unauthorized', 11);
});
it('should terminate on Forbidden no _writer', function() {
// Simulate the DB exists (i.e. you can read it)
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 403 trying to write
n.post('/fakenockdb/_bulk_docs').reply(403, { error: 'forbidden', reason: '_writer access is required for this request' });
return restoreHttpError(params, 'Forbidden', 12);
});
it('should terminate on RestoreDatabaseNotFound', function() {
// Simulate the DB does not exist
nock(url).get('/fakenockdb').reply(404, { error: 'not_found', reason: 'Database does not exist.' });
return restoreHttpError(params, 'DatabaseNotFound', 10);
});
it('should terminate on notEmptyDBErr when database is not empty', function() {
// Simulate the DB that does exist and not empty
nock(url).get('/fakenockdb').reply(200, { doc_count: 10, doc_del_count: 0 });
return restoreHttpError(params, 'DatabaseNotEmpty', 13);
});
it('should terminate on notEmptyDBErr when database is not new', function() {
// Simulate the DB that does exist and not new
nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 10 });
return restoreHttpError(params, 'DatabaseNotEmpty', 13);
});
it('should terminate on _bulk_docs HTTPFatalError', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Use a parallelism of one and mock one response
const p = u.p(params, { opts: { parallelism: 1 } });
// Simulate a 400 trying to write
n.post('/fakenockdb/_bulk_docs').reply(400, { error: 'bad_request', reason: 'testing bad response' });
return restoreHttpError(p, 'HTTPFatalError', 40);
});
it('should terminate on _bulk_docs HTTPFatalError from system database', function() {
// Simulate that target database exists and is _not_ empty.
// This should pass validator as we exclude system databases from the check.
const n = nock(url).get('/_replicator').reply(200, { doc_count: 1, doc_del_count: 0 });
// Simulate a 400 trying to write
n.post('/_replicator/_bulk_docs').reply(400, { error: 'bad_request', reason: 'testing bad response' });
// Use a parallelism of one and mock one response
const q = u.p(params, { opts: { parallelism: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return testPromiseWithAssertNock(u.testRestore(q, new InfiniteBackupStream(), '_replicator'));
});
it('should terminate on _bulk_docs HTTPFatalError large stream', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 400 trying to write
// Provide a body function to handle the stream, but allow any body
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).reply(400, { error: 'bad_request', reason: 'testing bad response' });
// Use only parallelism 1 so we don't have to mock up loads of responses
const q = u.p(params, { opts: { parallelism: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return testPromiseWithAssertNock(u.testRestore(q, new InfiniteBackupStream(), 'fakenockdb'));
});
it('should terminate on multiple _bulk_docs HTTPFatalError', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 400 trying to write docs, 5 times because of default parallelism
// Provide a body function to handle the stream, but allow any body
// Four of the mocks are optional because of parallelism 5 we can't guarantee that the exit will happen
// after all 5 requests, but we must get at least one of them
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).reply(400, { error: 'bad_request', reason: 'testing bad response' });
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).times(4).optionally().reply(400, { error: 'bad_request', reason: 'testing bad response' });
const q = u.p(params, { opts: { bufferSize: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return restoreHttpError(q, 'HTTPFatalError', 40);
});
});
});
});

@@ -23,85 +23,41 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Basic backup and restore', params), function() {
it('should backup animaldb to a file correctly', async function() {
// Allow up to 40 s to backup and compare (it should be much faster)!
u.setTimeout(this, 40);
const actualBackup = `./${this.fileName}`;
// Create a file and backup to it
const output = fs.createWriteStream(actualBackup);
describe(u.scenario('Compression tests', params), function() {
const p = u.p(params, { compression: true });
it('should backup animaldb to a compressed file', async function() {
// Allow up to 60 s for backup of animaldb
u.setTimeout(this, 60);
const compressedBackup = `./${this.fileName}`;
const output = fs.createWriteStream(compressedBackup);
return once(output, 'open')
.then(() => {
return u.testBackup(params, 'animaldb', output);
return u.testBackup(p, 'animaldb', output);
}).then(() => {
return u.readSortAndDeepEqual(actualBackup, './test/fixtures/animaldb_expected.json');
return u.assertGzipFile(compressedBackup);
});
});
it('should restore animaldb to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
it('should backup and restore animaldb via a compressed file', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_expected.json');
const dbName = this.dbName;
return once(input, 'open').then(() => {
return u.testRestore(params, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
const compressedBackup = `./${this.fileName}`;
return u.testBackupAndRestoreViaFile(p, 'animaldb', compressedBackup, this.dbName).then(() => {
return u.assertGzipFile(compressedBackup);
});
});
it('should execute a shallow mode backup successfully', async function() {
// Allow 30 s
u.setTimeout(this, 30);
const actualBackup = `./${this.fileName}`;
const output = fs.createWriteStream(actualBackup);
// Add the shallow mode option
const p = u.p(params, { opts: { mode: 'shallow' } });
return once(output, 'open')
.then(() => {
return u.testBackup(p, 'animaldb', output);
}).then(() => {
return u.readSortAndDeepEqual(actualBackup, './test/fixtures/animaldb_expected_shallow.json');
});
it('should backup and restore animaldb via a compressed stream', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
return u.testDirectBackupAndRestore(p, 'animaldb', this.dbName);
});
describe(u.scenario('Buffer size tests', params), function() {
it('should backup/restore animaldb with the same buffer size', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const p = u.p(params, { opts: { log: logFile, bufferSize: 1 } });
return u.testBackupAndRestoreViaFile(p, 'animaldb', actualBackup, this.dbName);
});
it('should backup/restore animaldb with backup buffer > restore buffer', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const dbName = this.dbName;
const p = u.p(params, { opts: { log: logFile, bufferSize: 2 } }); // backup
const q = u.p(params, { opts: { bufferSize: 1 } }); // restore
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
return u.testRestoreFromFile(q, actualBackup, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
it('should backup/restore animaldb with backup buffer < restore buffer', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const dbName = this.dbName;
const p = u.p(params, { opts: { log: logFile, bufferSize: 1 } }); // backup
const q = u.p(params, { opts: { bufferSize: 2 } }); // restore
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
return u.testRestoreFromFile(q, actualBackup, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
it('should backup and restore largedb2g via a compressed file #slower', async function() {
// Takes ~ 25 min using CLI, but sometimes over an hour with API
u.setTimeout(this, 180 * 60);
const compressedBackup = `./${this.fileName}`;
params.compression = true;
return u.testBackupAndRestoreViaFile(p, 'largedb2g', compressedBackup, this.dbName);
});
});
});

@@ -15,3 +15,3 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it */
/* global describe it before after beforeEach */
'use strict';

@@ -22,23 +22,274 @@

const u = require('./citestutils.js');
const mockServerPort = +process.env.COUCHBACKUP_MOCK_SERVER_PORT || 7777;
const { once } = require('node:events');
const url = `http://localhost:${mockServerPort}`;
const nock = require('nock');
const httpProxy = require('http-proxy');
const Readable = require('stream').Readable;
describe('Write error tests', function() {
it('calls callback with error set when stream is not writeable', async function() {
u.setTimeout(this, 10);
const dirname = fs.mkdtempSync('test_backup_');
// make temp dir read only
fs.chmodSync(dirname, 0o444);
const filename = dirname + '/test.backup';
const backupStream = fs.createWriteStream(filename, { flags: 'w' });
const params = { useApi: true };
// try to do backup and check err was set in callback
return u.testBackup(params, 'animaldb', backupStream).then(() => {
assert.fail('Should throw an "EACCES" error');
}).catch((resultErr) => {
// cleanup temp dir
fs.rmdirSync(dirname);
// error should have been set
assert.ok(resultErr);
assert.strictEqual(resultErr.code, 'EACCES');
// Create an infinite stream to read.
// It just keeps sending a backup line, useful for testing cases of
// termination while a stream has content remaining (the animaldb backup
// is too small for that).
class InfiniteBackupStream extends Readable {
constructor(opt) {
super(opt);
this.contents = Buffer.from('[{"_id":"giraffe","_rev":"3-7665c3e66315ff40616cceef62886bd8","min_weight":830,"min_length":5,"max_weight":1600,"max_length":6,"wiki_page":"http://en.wikipedia.org/wiki/Giraffe","class":"mammal","diet":"herbivore","_revisions":{"start":3,"ids":["7665c3e66315ff40616cceef62886bd8","aaaf10d5a68cdf22d95a5482a0e95549","967a00dff5e02add41819138abb3284d"]}}]\n', 'utf8');
}
_read() {
let proceed;
do {
proceed = this.push(this.contents);
} while (proceed);
}
}
function assertNock() {
try {
assert.ok(nock.isDone());
} catch (err) {
console.error('pending mocks: %j', nock.pendingMocks());
throw err;
}
}
function testPromiseWithAssertNock(testPromise) {
return testPromise.finally(() => {
assertNock();
});
}
async function backupHttpError(opts, errorName, errorCode) {
const p = u.p(opts, { expectedBackupError: { name: errorName, code: errorCode } });
// Create a file and attempt a backup to it
const output = fs.createWriteStream('/dev/null');
return once(output, 'open')
.then(() => {
return testPromiseWithAssertNock(u.testBackup(p, 'fakenockdb', output));
});
}
async function restoreHttpError(opts, errorName, errorCode) {
const q = u.p(opts, { expectedRestoreError: { name: errorName, code: errorCode } });
return testPromiseWithAssertNock(u.testRestoreFromFile(q, './test/fixtures/animaldb_expected.json', 'fakenockdb'));
}
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('#unit Fatal errors', params), function() {
// These tests do real requests with mocks and if they run slowly
// the 2 second default mocha timeout can be insufficient, use 10 s
this.timeout(10000);
let processEnvCopy;
let proxy;
before('Set process data for test', function() {
const proxyPort = mockServerPort + 1000;
// Copy env and argv so we can reset them after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
// Set up a proxy to point to our nock server because the nock override
// isn't visible to the spawned CLI process
if (!params.useApi) {
proxy = httpProxy.createProxyServer({ target: url }).listen(proxyPort, 'localhost');
proxy.on('error', (err, req, res) => {
console.log(`Proxy received error ${err}`);
res.writeHead(400, {
'Content-Type': 'application/json'
});
res.end(JSON.stringify(err));
});
}
// setup environment variables
process.env.COUCH_URL = (params.useApi) ? url : `http://localhost:${proxyPort}`;
nock.emitter.on('no match', (req, opts) => {
console.error(`Unmatched nock request ${opts.method} ${opts.protocol}${opts.host}${opts.path}`);
});
});
after('Reset process data', function(done) {
process.env = processEnvCopy;
nock.emitter.removeAllListeners();
if (!params.useApi) {
proxy.close(done);
} else {
done();
}
});
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
describe('for backup', function() {
it('should terminate when DB does not exist', function() {
// Simulate existence check
nock(url).head('/fakenockdb').reply(404, { error: 'not_found', reason: 'missing' });
return backupHttpError(params, 'DatabaseNotFound', 10);
});
it('should terminate on BulkGetError', function() {
// Simulate existence check
const n = nock(url).head('/fakenockdb').reply(200);
// Simulate _bulk_get not available
n.post('/fakenockdb/_bulk_get').reply(404, { error: 'not_found', reason: 'missing' });
return backupHttpError(params, 'BulkGetError', 50);
});
it('should terminate on Unauthorized existence check', function() {
// Simulate a 401
nock(url).head('/fakenockdb').reply(401, { error: 'unauthorized', reason: '_reader access is required for this request' });
return backupHttpError(params, 'Unauthorized', 11);
});
it('should terminate on Forbidden no _reader', function() {
// Simulate a 403
nock(url).head('/fakenockdb').reply(403, { error: 'forbidden', reason: '_reader access is required for this request' });
return backupHttpError(params, 'Forbidden', 12);
});
it('should terminate on _bulk_get HTTPFatalError', function() {
// Provide a mock complete changes log to allow a resume to skip ahead
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/test.log' } });
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a fatal HTTP error when trying to fetch docs
// Note: 2 outstanding batches, so 2 responses, 1 mock is optional because we can't guarantee timing
n.post('/fakenockdb/_bulk_get').query(true).reply(400, { error: 'bad_request', reason: 'testing bad response' });
n.post('/fakenockdb/_bulk_get').query(true).optionally().reply(400, { error: 'bad_request', reason: 'testing bad response' });
return backupHttpError(p, 'HTTPFatalError', 40);
});
it('should terminate on NoLogFileName', function() {
// Don't supply a log file name with resume
const p = u.p(params, { opts: { resume: true } });
return backupHttpError(p, 'NoLogFileName', 20);
});
it('should terminate on LogDoesNotExist', function() {
// Use a non-existent log file
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/doesnotexist.log' } });
return backupHttpError(p, 'LogDoesNotExist', 21);
});
it('should terminate on IncompleteChangesInLogFile', function() {
// Use an incomplete changes log file
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/incomplete_changes.log' } });
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Should fail when it reads the incomplete changes
return backupHttpError(p, 'IncompleteChangesInLogFile', 22);
});
it('should terminate on _changes HTTPFatalError', function() {
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a fatal HTTP error when trying to fetch docs (note 2 outstanding batches)
n.post('/fakenockdb/_changes').query(true).reply(400, { error: 'bad_request', reason: 'testing bad response' });
return backupHttpError(params, 'HTTPFatalError', 40);
});
it('should terminate on SpoolChangesError', function() {
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a changes without a last_seq
n.post('/fakenockdb/_changes').query(true).reply(200,
{
results: [{
seq: '2-g1AAAAEbeJzLYWBgYMlgTmFQSElKzi9KdUhJstTLTS3KLElMT9VLzskvTUnMK9HLSy3JAapkSmRIsv___39WBnMiUy5QgN3MzDIxOdEMWb85dv0gSxThigyN8diS5AAkk-pBFiUyoOkzxKMvjwVIMjQAKaDW_Zh6TQnqPQDRC7I3CwDPDV1k',
id: 'badger',
changes: [{ rev: '4-51aa94e4b0ef37271082033bba52b850' }]
}]
});
return backupHttpError(params, 'SpoolChangesError', 30);
});
});
describe('for restore', function() {
it('should terminate on Unauthorized db existence check', function() {
// Simulate a 401
nock(url).get('/fakenockdb').reply(401, { error: 'unauthorized', reason: '_reader access is required for this request' });
return restoreHttpError(params, 'Unauthorized', 11);
});
it('should terminate on Forbidden no _writer', function() {
// Simulate the DB exists (i.e. you can read it)
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 403 trying to write
n.post('/fakenockdb/_bulk_docs').reply(403, { error: 'forbidden', reason: '_writer access is required for this request' });
return restoreHttpError(params, 'Forbidden', 12);
});
it('should terminate on RestoreDatabaseNotFound', function() {
// Simulate the DB does not exist
nock(url).get('/fakenockdb').reply(404, { error: 'not_found', reason: 'Database does not exist.' });
return restoreHttpError(params, 'DatabaseNotFound', 10);
});
it('should terminate on notEmptyDBErr when database is not empty', function() {
// Simulate the DB that does exist and not empty
nock(url).get('/fakenockdb').reply(200, { doc_count: 10, doc_del_count: 0 });
return restoreHttpError(params, 'DatabaseNotEmpty', 13);
});
it('should terminate on notEmptyDBErr when database is not new', function() {
// Simulate the DB that does exist and not new
nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 10 });
return restoreHttpError(params, 'DatabaseNotEmpty', 13);
});
it('should terminate on _bulk_docs HTTPFatalError', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Use a parallelism of one and mock one response
const p = u.p(params, { opts: { parallelism: 1 } });
// Simulate a 400 trying to write
n.post('/fakenockdb/_bulk_docs').reply(400, { error: 'bad_request', reason: 'testing bad response' });
return restoreHttpError(p, 'HTTPFatalError', 40);
});
it('should terminate on _bulk_docs HTTPFatalError from system database', function() {
// Simulate that target database exists and is _not_ empty.
// This should pass validator as we exclude system databases from the check.
const n = nock(url).get('/_replicator').reply(200, { doc_count: 1, doc_del_count: 0 });
// Simulate a 400 trying to write
n.post('/_replicator/_bulk_docs').reply(400, { error: 'bad_request', reason: 'testing bad response' });
// Use a parallelism of one and mock one response
const q = u.p(params, { opts: { parallelism: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return testPromiseWithAssertNock(u.testRestore(q, new InfiniteBackupStream(), '_replicator'));
});
it('should terminate on _bulk_docs HTTPFatalError large stream', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 400 trying to write
// Provide a body function to handle the stream, but allow any body
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).reply(400, { error: 'bad_request', reason: 'testing bad response' });
// Use only parallelism 1 so we don't have to mock up loads of responses
const q = u.p(params, { opts: { parallelism: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return testPromiseWithAssertNock(u.testRestore(q, new InfiniteBackupStream(), 'fakenockdb'));
});
it('should terminate on multiple _bulk_docs HTTPFatalError', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 400 trying to write docs, 5 times because of default parallelism
// Provide a body function to handle the stream, but allow any body
// Four of the mocks are optional because of parallelism 5 we can't guarantee that the exit will happen
// after all 5 requests, but we must get at least one of them
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).reply(400, { error: 'bad_request', reason: 'testing bad response' });
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).times(4).optionally().reply(400, { error: 'bad_request', reason: 'testing bad response' });
const q = u.p(params, { opts: { bufferSize: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return restoreHttpError(q, 'HTTPFatalError', 40);
});
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -15,138 +15,27 @@ // Licensed under the Apache License, Version 2.0 (the "License");

/* global */
/* global describe it */
'use strict';
const { fork, spawn } = require('node:child_process');
const { once } = require('node:events');
const { Duplex } = require('node:stream');
const debug = require('debug');
const logProcess = debug('couchbackup:test:process');
const assert = require('assert');
const logfilesummary = require('../includes/logfilesummary.js');
class TestProcess {
constructor(cmd, args, mode) {
this.cmd = cmd;
// Child process stdio [stdin, stdout, stderr, ...extra channels]
const childProcessOptions = { stdio: [] };
switch (mode) {
case 'readable':
// Readable only, no writing to stdin so ignore it
childProcessOptions.stdio = ['ignore', 'pipe', 'inherit'];
break;
case 'writable':
// Writable only, no reading from stdout so ignore it
childProcessOptions.stdio = ['pipe', 'ignore', 'inherit'];
break;
default:
// Default Duplex mode pipe both stdin and stdout
childProcessOptions.stdio = ['pipe', 'pipe', 'inherit'];
break;
}
if (cmd.endsWith('.js')) {
// Add Node fork ipc channel
childProcessOptions.stdio.push('ipc');
logProcess(`Forking Node process for ${cmd} with stdio:[${childProcessOptions.stdio}]`);
this.childProcess = fork(cmd, args, childProcessOptions);
} else {
logProcess(`Spawning process for ${cmd} with stdio:[${childProcessOptions.stdio}]`);
this.childProcess = spawn(cmd, args, childProcessOptions);
}
this.childProcessPromise = once(this.childProcess, 'close').then(() => {
const code = this.childProcess.exitCode;
const signal = this.childProcess.signalCode;
logProcess(`Test process ${cmd} closed with code ${code} and signal ${signal}`);
if (code === 0) {
logProcess(`Resolving process promise for ${cmd}`);
return Promise.resolve(code);
} else {
const e = new Error(`Test child process ${cmd} exited with code ${code} and ${signal}. This may be normal for error case testing.`);
e.code = code;
e.signal = signal;
logProcess(`Will reject process promise for ${cmd} with ${e}`);
return Promise.reject(e);
}
describe('#unit Fetching summary from the log file', function() {
it('should fetch a summary correctly', function() {
return new Promise((resolve, reject) => {
logfilesummary('./test/fixtures/test.log', function(err, data) {
try {
assert.ok(!err);
assert.ok(data);
assert.strictEqual(data.changesComplete, true);
assert.strictEqual(typeof data.batches, 'object');
assert.strictEqual(Object.keys(data.batches).length, 2);
assert.deepStrictEqual(data.batches['1'], true);
assert.deepStrictEqual(data.batches['4'], true);
resolve();
} catch (err) {
reject(err);
}
});
});
switch (mode) {
case 'readable':
this.duplexFrom = this.childProcess.stdout;
break;
case 'writable':
this.duplexFrom = this.childProcess.stdin;
break;
default:
// Default is duplex
this.duplexFrom = { writable: this.childProcess.stdin, readable: this.childProcess.stdout };
}
this.stream = Duplex.from(this.duplexFrom);
}
}
module.exports = {
TestProcess,
cliBackup: function(databaseName, params = {}) {
const args = ['--db', databaseName];
if (params.opts) {
if (params.opts.mode) {
args.push('--mode');
args.push(params.opts.mode);
}
if (params.opts.output) {
args.push('--output');
args.push(params.opts.output);
}
if (params.opts.log) {
args.push('--log');
args.push(params.opts.log);
}
if (params.opts.resume) {
args.push('--resume');
args.push(params.opts.resume);
}
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
return new TestProcess('./bin/couchbackup.bin.js', args, 'readable');
},
cliRestore: function(databaseName, params) {
const args = ['--db', databaseName];
if (params.opts) {
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.parallelism) {
args.push('--parallelism');
args.push(params.opts.parallelism);
}
if (params.opts.requestTimeout) {
args.push('--request-timeout');
args.push(params.opts.requestTimeout);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
return new TestProcess('./bin/couchrestore.bin.js', args, 'writable');
},
cliGzip: function() {
return new TestProcess('gzip', []);
},
cliGunzip: function() {
return new TestProcess('gunzip', []);
},
cliEncrypt: function() {
return new TestProcess('openssl', ['aes-128-cbc', '-pass', 'pass:12345']);
},
cliDecrypt: function() {
return new TestProcess('openssl', ['aes-128-cbc', '-d', '-pass', 'pass:12345']);
}
};
});
});

@@ -1,75 +0,424 @@

// Copyright © 2018, 2023 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* global describe it */
'use strict';
const fs = require('fs');
const { once } = require('node:events');
const readline = require('readline');
const u = require('./citestutils.js');
const uuid = require('uuid').v4;
const params = { useApi: true };
describe(u.scenario('Concurrent database backups', params), function() {
it('should run concurrent API database backups correctly #slower', async function() {
// Allow up to 900 s to backup and compare (it should be much faster)!
u.setTimeout(this, 900);
const checkForEmptyBatches = async function(fileName) {
let foundEmptyBatch = false;
const rd = readline.createInterface({
input: fs.createReadStream(fileName),
output: fs.createWriteStream('/dev/null'),
terminal: false
});
rd.on('line', function(line) {
if (JSON.parse(line).length === 0) {
// Note: Empty batch arrays indicate that the running backup is
// incorrectly sharing a log file with another ongoing backup job.
foundEmptyBatch = true;
}
});
rd.on('close', function() {
if (foundEmptyBatch) {
return Promise.reject(new Error(`Log file '${fileName}' contains empty batches`));
} else {
return Promise.resolve();
}
});
};
const backupPromise = async function() {
const actualBackup = `./${uuid()}`;
const output = fs.createWriteStream(actualBackup);
return once(output, 'open').then(() => {
return u.testBackup(params, 'largedb1g', output);
}).then(() => {
return checkForEmptyBatches(actualBackup);
});
};
// [1] Run 'largedb1g' database backup
const backup1 = backupPromise();
// [2] Run 'largedb1g' database backup
const backup2 = backupPromise();
return Promise.all([backup1, backup2]);
});
});
<testsuites name="test">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:21" time="0.104">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.002">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.026">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid buffer size type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.007">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid log type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid log type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode string" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid output type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid output type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid parallelism type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid request timeout type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero request timeout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float request timout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid resume type" time="0.008">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid key type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for resume arg in shallow mode" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for parallelism arg in shallow mode" time="0.005">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:21" time="4.504">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.937">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.819">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.624">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:26" time="10.43">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="2.579">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.463">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.582">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:36" time="5.429">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.335">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.257">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.039">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:42" time="13.196">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.574">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.315">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.511">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:55" time="5.373">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.9">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.53">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.15">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:00:00" time="7.185">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.324">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.391">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.67">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:00:07" time="308.498">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.172">
</testcase>
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="305.553">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:05:16" time="474.964">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.721">
</testcase>
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="471.462">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:11" time="2.713">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.439">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:14" time="0.274">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.014">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:14" time="2.312">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.878">
</testcase>
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.897">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:16" time="5.267">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.874">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.816">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.782">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:21" time="6.535">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.32">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.223">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.191">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:28" time="32.977">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="15.784">
</testcase>
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.657">
</testcase>
</testsuite>
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:01" time="0.01">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_DATABASE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_BUFFER_SIZE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_PARALLELISM env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_REQUEST_TIMEOUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_API_KEY env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_TOKEN_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_LOG env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_RESUME env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_OUTPUT env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_MODE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:01" time="0.099">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.009">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.015">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on NoLogFileName" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on LogDoesNotExist" time="0">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on IncompleteChangesInLogFile" time="0.009">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.018">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.021">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:01" time="0.117">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on RestoreDatabaseNotFound" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.003">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.009">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.039">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.028">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.011">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:01" time="3.462">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.333">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.405">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.33">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.312">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.363">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.292">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.286">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.401">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.359">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.375">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:05" time="3.198">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.325">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.375">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.317">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.31">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.331">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.349">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.405">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.401">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.378">
</testcase>
</testsuite>
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:08" time="0.003">
<testcase classname="test.#unit Fetching batches from a log file" name="should fetch multiple batches correctly" time="0.002">
</testcase>
</testsuite>
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:08" time="0.002">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:08" time="0.04">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.015">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --db command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --request-timeout command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --iam-api-key command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode full command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode shallow command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --quiet command-line parameter" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:08" time="0.013">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --quiet command-line parameter" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:08" time="0.005">
<testcase classname="test.#unit Check request headers" name="should have a couchbackup user-agent" time="0.004">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:08" time="10.593">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.003">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.016">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.018">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.015">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with fatal error for 404 response" time="0.006">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with same error for no status code error response" time="2.013">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.509">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.01">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:18" time="0.012">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.011">
</testcase>
</testsuite>
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:18" time="0.557">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.018">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.523">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.014">
</testcase>
</testsuite>
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:19" time="15.203">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.012">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.016">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="1.866">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="9.303">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:34" time="43.349">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="43.071">
</testcase>
</testsuite>
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:15:18" time="4.116">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.024">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should terminate on a fatal error" time="0.008">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.035">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.017">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.023">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should get a batch error for non-empty array response with new_edits false" time="0.007">
</testcase>
</testsuite>
</testsuites>

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -15,49 +15,33 @@ // Licensed under the Apache License, Version 2.0 (the "License");

/* global describe it */
/* global after before describe */
'use strict';
const fs = require('fs');
const { once } = require('node:events');
const u = require('./citestutils.js');
// Import the common hooks
require('../test/hooks.js');
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Compression tests', params), function() {
const p = u.p(params, { compression: true });
const poisons = [
'normal',
'bandwidth-limit',
'latency',
'slow-read',
'rate-limit'
];
it('should backup animaldb to a compressed file', async function() {
// Allow up to 60 s for backup of animaldb
u.setTimeout(this, 60);
const compressedBackup = `./${this.fileName}`;
const output = fs.createWriteStream(compressedBackup);
return once(output, 'open')
.then(() => {
return u.testBackup(p, 'animaldb', output);
}).then(() => {
return u.assertGzipFile(compressedBackup);
});
});
poisons.forEach(function(poison) {
describe('unreliable network tests (using poison ' + poison + ')', function() {
before('start server', function() {
it('should backup and restore animaldb via a compressed file', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const compressedBackup = `./${this.fileName}`;
return u.testBackupAndRestoreViaFile(p, 'animaldb', compressedBackup, this.dbName).then(() => {
return u.assertGzipFile(compressedBackup);
});
// **************************
// Currently these tests do nothing
// pending resolution of https://github.com/IBM/couchbackup/issues/360
// to add a new toxic server
// **************************
});
it('should backup and restore animaldb via a compressed stream', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
return u.testDirectBackupAndRestore(p, 'animaldb', this.dbName);
after('stop server', function() {
});
it('should backup and restore largedb2g via a compressed file #slower', async function() {
// Takes ~ 25 min using CLI, but sometimes over an hour with API
u.setTimeout(this, 180 * 60);
const compressedBackup = `./${this.fileName}`;
params.compression = true;
return u.testBackupAndRestoreViaFile(p, 'largedb2g', compressedBackup, this.dbName);
});
delete require.cache[require.resolve('../test/ci_e2e.js')];
require('../test/ci_e2e.js');
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -14,122 +14,63 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it beforeEach */
'use strict';
const assert = require('assert');
const fs = require('fs');
const nock = require('nock');
const request = require('../includes/request.js');
const writer = require('../includes/writer.js');
const noopEmitter = new (require('events')).EventEmitter();
const liner = require('../includes/liner.js');
const { once } = require('node:events');
const { pipeline } = require('node:stream/promises');
const longTestTimeout = 3000;
const stream = require('stream');
const liner = require('./liner.js');
describe('#unit Check database restore writer', function() {
const dbUrl = 'http://localhost:5984/animaldb';
const db = request.client(dbUrl, { parallelism: 1 });
const onLine = function(onCommand, batches) {
const change = new stream.Transform({ objectMode: true });
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
let matches;
it('should complete successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, []); // success
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
});
it('should terminate on a fatal error', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(401, { error: 'Unauthorized' }); // fatal error
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Unauthorized');
assert.strictEqual(err.message, 'Access is denied due to invalid credentials.');
assert.ok(nock.isDone());
return true;
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
);
});
it('should retry on transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(200, { ok: true }); // third time lucky success
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
}).timeout(longTestTimeout);
it('should fail after 3 transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(503, { error: 'Service Unavailable' }); // Final transient error
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 : post ${dbUrl}/_bulk_docs - Error: Service Unavailable`);
assert.ok(nock.isDone());
return true;
// if this is one we want
if (obj.command === 't' && batches.indexOf(obj.batch) > -1) {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
onCommand(obj);
}
);
}).timeout(longTestTimeout);
}
done();
};
return change;
};
it('should restore shallow backups without rev info successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ ok: true, id: 'foo', rev: '1-abc' }]); // success
module.exports = function(log, batches, callback) {
// our sense of state
const retval = { };
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_old_shallow.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 11);
assert.ok(nock.isDone());
})]);
});
// called with each line from the log file
const onCommand = function(obj) {
retval[obj.batch] = obj;
};
it('should get a batch error for non-empty array response with new_edits false', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ id: 'foo', error: 'foo', reason: 'bar' }]);
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Error');
assert.strictEqual(err.message, 'Error writing batch with new_edits:false and 1 items');
assert.ok(nock.isDone());
return true;
}
);
});
});
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, batches))
.on('error', function(err) {
callback(err);
})
.on('finish', function() {
callback(null, retval);
});
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//

@@ -14,20 +14,47 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const u = require('./citestutils.js');
/**
* Utility methods for the command line interface.
* @module cliutils
* @see module:cliutils
*/
describe('Encryption tests', function() {
// Note CLI only to use openssl command
const p = { useApi: false, encryption: true };
const url = require('url');
const error = require('./error.js');
it('should backup and restore animaldb via an encrypted file', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const encryptedBackup = `./${this.fileName}`;
return u.testBackupAndRestoreViaFile(p, 'animaldb', encryptedBackup, this.dbName).then(() => {
return u.assertEncryptedFile(encryptedBackup);
});
});
});
module.exports = {
/**
* Combine a base URL and a database name, ensuring at least single slash
* between root and database name. This allows users to have Couch behind
* proxies that mount Couch's / endpoint at some other mount point.
* @param {string} root - root URL
* @param {string} databaseName - database name
* @return concatenated URL.
*
* @private
*/
databaseUrl: function databaseUrl(root, databaseName) {
if (!root.endsWith('/')) {
root = root + '/';
}
try {
return new url.URL(encodeURIComponent(databaseName), root).toString();
} catch (err) {
throw error.wrapPossibleInvalidUrlError(err);
}
},
/**
* Generate CLI argument usage text.
*
* @param {string} description - argument description.
* @param {string} defaultValue - default argument value.
*
* @private
*/
getUsage: function getUsage(description, defaultValue) {
return `${description} ${defaultValue !== undefined ? ` (default: ${defaultValue})` : ''}`;
}
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,53 +14,152 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global beforeEach afterEach */
'use strict';
const { CloudantV1 } = require('@ibm-cloud/cloudant');
const url = new URL((process.env.COUCH_BACKEND_URL) ? process.env.COUCH_BACKEND_URL : 'https://no-couch-backend-url-set.test');
const { BasicAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const authenticator = (url.username) ? new BasicAuthenticator({ username: url.username, password: decodeURIComponent(url.password) }) : new NoAuthAuthenticator();
const serviceOpts = {
authenticator: authenticator
};
const cloudant = new CloudantV1(serviceOpts);
// Remove auth from URL before using for service
cloudant.setServiceUrl(new URL(url.pathname, url.origin).toString());
const uuid = require('uuid').v4;
const fs = require('fs');
const async = require('async');
const stream = require('stream');
const error = require('./error.js');
const debug = require('debug')('couchbackup:writer');
// Mocha hooks that will be at the root context so run for all tests
module.exports = function(db, bufferSize, parallelism, ee) {
const writer = new stream.Transform({ objectMode: true });
let buffer = [];
let written = 0;
let linenumber = 0;
beforeEach('Create test database', async function() {
// Don't run hook for unit tests, just for CI
if (!this.currentTest.fullTitle().includes('#unit')) {
// Allow 10 seconds to create the DB
this.timeout(10 * 1000);
const unique = uuid();
this.fileName = `${unique}`;
this.dbName = 'couchbackup_test_' + unique;
// this is the queue of chunks that are written to the database
// the queue's payload will be an array of documents to be written,
// the size of the array will be bufferSize. The variable parallelism
// determines how many HTTP requests will occur at any one time.
const q = async.queue(function(payload, cb) {
// if we are restoring known revisions, we need to supply new_edits=false
if (payload.docs && payload.docs[0] && payload.docs[0]._rev) {
payload.new_edits = false;
debug('Using new_edits false mode.');
}
return cloudant.putDatabase({ db: this.dbName });
}
});
if (!didError) {
db.service.postBulkDocs({
db: db.db,
bulkDocs: payload
}).then(response => {
if (!response.result || (payload.new_edits === false && response.result.length > 0)) {
throw new Error(`Error writing batch with new_edits:${payload.new_edits !== false}` +
` and ${response.result ? response.result.length : 'unavailable'} items`);
}
written += payload.docs.length;
writer.emit('restored', { documents: payload.docs.length, total: written });
cb();
}).catch(err => {
err = error.convertResponseError(err);
debug(`Error writing docs ${err.name} ${err.message}`);
cb(err, payload);
});
}
}, parallelism);
afterEach('Delete test database', async function() {
// Don't run hook for unit tests, just for CI
if (!this.currentTest.fullTitle().includes('#unit')) {
// Allow 10 seconds to delete the DB
this.timeout(10 * 1000);
deleteIfExists(this.fileName);
deleteIfExists(`${this.fileName}.log`);
return cloudant.deleteDatabase({ db: this.dbName });
let didError = false;
// write the contents of the buffer to CouchDB in blocks of bufferSize
function processBuffer(flush, callback) {
function taskCallback(err, payload) {
if (err && !didError) {
debug(`Queue task failed with error ${err.name}`);
didError = true;
q.kill();
writer.emit('error', err);
}
}
if (flush || buffer.length >= bufferSize) {
// work through the buffer to break off bufferSize chunks
// and feed the chunks to the queue
do {
// split the buffer into bufferSize chunks
const toSend = buffer.splice(0, bufferSize);
// and add the chunk to the queue
debug(`Adding ${toSend.length} to the write queue.`);
q.push({ docs: toSend }, taskCallback);
} while (buffer.length >= bufferSize);
// send any leftover documents to the queue
if (flush && buffer.length > 0) {
debug(`Adding remaining ${buffer.length} to the write queue.`);
q.push({ docs: buffer }, taskCallback);
}
// wait until the queue size falls to a reasonable level
async.until(
// wait until the queue length drops to twice the paralellism
// or until empty on the last write
function(callback) {
// if we encountered an error, stop this until loop
if (didError) {
return callback(null, true);
}
if (flush) {
callback(null, q.idle() && q.length() === 0);
} else {
callback(null, q.length() <= parallelism * 2);
}
},
function(cb) {
setTimeout(cb, 20);
},
function() {
if (flush && !didError) {
writer.emit('finished', { total: written });
}
// callback when we're happy with the queue size
callback();
});
} else {
callback();
}
}
});
function deleteIfExists(fileName) {
fs.unlink(fileName, function(err) {
if (err) {
if (err.code !== 'ENOENT') {
console.error(`${err.code} ${err.message}`);
// take an object
writer._transform = function(obj, encoding, done) {
// each obj that arrives here is a line from the backup file
// it should contain an array of objects. The length of the array
// depends on the bufferSize at backup time.
linenumber++;
if (!didError && obj !== '') {
// see if it parses as JSON
try {
const arr = JSON.parse(obj);
// if it's an array with a length
if (typeof arr === 'object' && arr.length > 0) {
// push each document into a buffer
buffer = buffer.concat(arr);
// pause the stream
// it's likely that the speed with which data can be read from disk
// may exceed the rate it can be written to CouchDB. To prevent
// the whole file being buffered in memory, we pause the stream here.
// it is resumed, when processBuffer calls back and we call done()
this.pause();
// break the buffer in to bufferSize chunks to be written to the database
processBuffer(false, done);
} else {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - not an array`));
done();
}
} catch (e) {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - cannot parse as JSON`));
// Could be an incomplete write that was subsequently resumed
done();
}
} else {
done();
}
});
}
};
// called when we need to flush everything
writer._flush = function(done) {
processBuffer(true, done);
};
return writer;
};

@@ -1,424 +0,80 @@

<testsuites name="test">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:27:30" time="0.097">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.002">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.028">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid buffer size type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero buffer size" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid log type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid log type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode string" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid output type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid output type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid parallelism type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid request timeout type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero request timeout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float request timout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.003">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid resume type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid key type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for resume arg in shallow mode" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for parallelism arg in shallow mode" time="0.004">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:27:31" time="4.521">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.955">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.783">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.634">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:27:35" time="10.315">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="2.587">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.469">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.457">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:27:45" time="6.42">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.903">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.435">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.288">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:27:52" time="14.378">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="4.065">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.724">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.765">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:28:06" time="5.316">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.91">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.495">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.118">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:28:12" time="8.281">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.865">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.792">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.812">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:28:20" time="316.012">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.19">
</testcase>
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="313.044">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:33:36" time="526.414">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="4.18">
</testcase>
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="521.449">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:42:22" time="2.97">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.703">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:42:25" time="0.275">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.011">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:42:25" time="2.288">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.877">
</testcase>
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.886">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:42:28" time="5.202">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.88">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.767">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.754">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:42:33" time="7.073">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.485">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.339">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.447">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:42:40" time="34.648">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="16.936">
</testcase>
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="17.176">
</testcase>
</testsuite>
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:15" time="0.009">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_DATABASE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_BUFFER_SIZE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_PARALLELISM env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_REQUEST_TIMEOUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_API_KEY env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_TOKEN_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_LOG env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_RESUME env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_OUTPUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_MODE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:15" time="0.088">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.006">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.006">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.025">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on NoLogFileName" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on LogDoesNotExist" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on IncompleteChangesInLogFile" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.013">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:15" time="0.127">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.012">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on RestoreDatabaseNotFound" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.011">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.032">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.032">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.009">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:15" time="7.744">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.796">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.752">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="1.057">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.64">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.755">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.804">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.776">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.799">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.74">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.616">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:23" time="5.552">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.682">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.621">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.518">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.522">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.554">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.574">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.637">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.693">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.741">
</testcase>
</testsuite>
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:28" time="0.002">
<testcase classname="test.#unit Fetching batches from a log file" name="should fetch multiple batches correctly" time="0.002">
</testcase>
</testsuite>
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:28" time="0.002">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:28" time="0.035">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.013">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --db command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --iam-api-key command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode full command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode shallow command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --quiet command-line parameter" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:28" time="0.01">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --url command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --quiet command-line parameter" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:28" time="0.005">
<testcase classname="test.#unit Check request headers" name="should have a couchbackup user-agent" time="0.004">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:28" time="10.589">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.003">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.014">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.015">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.016">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with fatal error for 404 response" time="0.003">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with same error for no status code error response" time="2.012">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.508">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.013">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:39" time="0.012">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.01">
</testcase>
</testsuite>
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:39" time="0.556">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.017">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.523">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.014">
</testcase>
</testsuite>
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:39" time="15.159">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.011">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.019">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="1.948">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="9.176">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:43:55" time="42.351">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="42.079">
</testcase>
</testsuite>
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2023-12-18T09:44:37" time="4.097">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.024">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should terminate on a fatal error" time="0.008">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.017">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.014">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.022">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should get a batch error for non-empty array response with new_edits false" time="0.006">
</testcase>
</testsuite>
</testsuites>
// Copyright © 2017, 2022 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'use strict';
const async = require('async');
const error = require('./error.js');
const events = require('events');
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime();
let batch = 0;
let hasErrored = false;
let startKey = null;
let total = 0;
async.doUntil(
function(callback) {
// Note, include_docs: true is set automatically when using the
// fetch function.
const opts = { db: db.db, limit: options.bufferSize, includeDocs: true };
// To avoid double fetching a document solely for the purposes of getting
// the next ID to use as a startKey for the next page we instead use the
// last ID of the current page and append the lowest unicode sort
// character.
if (startKey) opts.startKey = `${startKey}\0`;
db.service.postAllDocs(opts).then(response => {
const body = response.result;
if (!body.rows) {
ee.emit('error', new error.BackupError(
'AllDocsError', 'ERROR: Invalid all docs response'));
callback();
} else {
if (body.rows.length < opts.limit) {
startKey = null; // last batch
} else {
startKey = body.rows[opts.limit - 1].id;
}
const docs = [];
body.rows.forEach(function(doc) {
docs.push(doc.doc);
});
if (docs.length > 0) {
ee.emit('received', {
batch: batch++,
data: docs,
length: docs.length,
time: (new Date().getTime() - start) / 1000,
total: total += docs.length
});
}
callback();
}
}).catch(err => {
err = error.convertResponseError(err);
ee.emit('error', err);
hasErrored = true;
callback();
});
},
function(callback) { callback(null, hasErrored || startKey == null); },
function() { ee.emit('finished', { total: total }); }
);
return ee;
};

@@ -14,34 +14,80 @@ // Copyright © 2017 IBM Corp. All rights reserved.

// limitations under the License.
/* global after before describe */
'use strict';
// Import the common hooks
require('../test/hooks.js');
const fs = require('fs');
const stream = require('stream');
const liner = require('./liner.js');
const poisons = [
'normal',
'bandwidth-limit',
'latency',
'slow-read',
'rate-limit'
];
const onLine = function(onCommand, getDocs) {
const change = new stream.Transform({ objectMode: true });
poisons.forEach(function(poison) {
describe('unreliable network tests (using poison ' + poison + ')', function() {
before('start server', function() {
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
// **************************
// Currently these tests do nothing
// pending resolution of https://github.com/IBM/couchbackup/issues/360
// to add a new toxic server
// **************************
});
let matches;
after('stop server', function() {
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
// extract doc ids
if (getDocs && obj.command === 't') {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
}
onCommand(obj);
}
done();
};
return change;
};
/**
* Generate a list of remaining batches from a download file.
*
* @param {string} log - log file name
* @param {function} callback - callback with err, {changesComplete: N, batches: N}.
* changesComplete signifies whether the log file appeared to
* have completed reading the changes feed (contains :changes_complete).
* batches are remaining batch IDs for download.
*/
module.exports = function(log, callback) {
// our sense of state
const state = {
};
let changesComplete = false;
// called with each line from the log file
const onCommand = function(obj) {
if (obj.command === 't') {
state[obj.batch] = true;
} else if (obj.command === 'd') {
delete state[obj.batch];
} else if (obj.command === 'changes_complete') {
changesComplete = true;
}
};
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, false))
.on('finish', function() {
const obj = { changesComplete: changesComplete, batches: state };
callback(null, obj);
});
delete require.cache[require.resolve('../test/ci_e2e.js')];
require('../test/ci_e2e.js');
});
});
};

@@ -16,400 +16,99 @@ // Copyright © 2017, 2021 IBM Corp. All rights reserved.

/**
* CouchBackup module.
* @module couchbackup
* @see module:couchbackup
*/
// fatal errors
const codes = {
Error: 1,
InvalidOption: 2,
DatabaseNotFound: 10,
Unauthorized: 11,
Forbidden: 12,
DatabaseNotEmpty: 13,
NoLogFileName: 20,
LogDoesNotExist: 21,
IncompleteChangesInLogFile: 22,
SpoolChangesError: 30,
HTTPFatalError: 40,
BulkGetError: 50
};
const backupFull = require('./includes/backup.js');
const defaults = require('./includes/config.js').apiDefaults;
const error = require('./includes/error.js');
const request = require('./includes/request.js');
const restoreInternal = require('./includes/restore.js');
const backupShallow = require('./includes/shallowbackup.js');
const debug = require('debug')('couchbackup:app');
const events = require('events');
const fs = require('fs');
const URL = require('url').URL;
/**
* Test for a positive, safe integer.
*
* @param {object} x - Object under test.
*/
function isSafePositiveInteger(x) {
// https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER
const MAX_SAFE_INTEGER = Number.MAX_SAFE_INTEGER || 9007199254740991;
// Is it a number?
return Object.prototype.toString.call(x) === '[object Number]' &&
// Is it an integer?
x % 1 === 0 &&
// Is it positive?
x > 0 &&
// Is it less than the maximum safe integer?
x <= MAX_SAFE_INTEGER;
class BackupError extends Error {
constructor(name, message) {
super(message);
this.name = name;
}
}
/**
* Validate arguments.
*
* @param {object} url - URL of database.
* @param {object} opts - Options.
* @param {function} cb - Callback to be called on error.
*/
function validateArgs(url, opts, cb) {
if (typeof url !== 'string') {
cb(new error.BackupError('InvalidOption', 'Invalid URL, must be type string'), null);
return;
}
if (opts && typeof opts.bufferSize !== 'undefined' && !isSafePositiveInteger(opts.bufferSize)) {
cb(new error.BackupError('InvalidOption', 'Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'), null);
return;
}
if (opts && typeof opts.iamApiKey !== 'undefined' && typeof opts.iamApiKey !== 'string') {
cb(new error.BackupError('InvalidOption', 'Invalid iamApiKey option, must be type string'), null);
return;
}
if (opts && typeof opts.log !== 'undefined' && typeof opts.log !== 'string') {
cb(new error.BackupError('InvalidOption', 'Invalid log option, must be type string'), null);
return;
}
if (opts && typeof opts.mode !== 'undefined' && ['full', 'shallow'].indexOf(opts.mode) === -1) {
cb(new error.BackupError('InvalidOption', 'Invalid mode option, must be either "full" or "shallow"'), null);
return;
}
if (opts && typeof opts.output !== 'undefined' && typeof opts.output !== 'string') {
cb(new error.BackupError('InvalidOption', 'Invalid output option, must be type string'), null);
return;
}
if (opts && typeof opts.parallelism !== 'undefined' && !isSafePositiveInteger(opts.parallelism)) {
cb(new error.BackupError('InvalidOption', 'Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'), null);
return;
}
if (opts && typeof opts.requestTimeout !== 'undefined' && !isSafePositiveInteger(opts.requestTimeout)) {
cb(new error.BackupError('InvalidOption', 'Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'), null);
return;
}
if (opts && typeof opts.resume !== 'undefined' && typeof opts.resume !== 'boolean') {
cb(new error.BackupError('InvalidOption', 'Invalid resume option, must be type boolean'), null);
return;
}
// Validate URL and ensure no auth if using key
try {
const urlObject = new URL(url);
// We require a protocol, host and path (for db), fail if any is missing.
if (urlObject.protocol !== 'https:' && urlObject.protocol !== 'http:') {
cb(new error.BackupError('InvalidOption', 'Invalid URL protocol.'));
return;
class HTTPError extends BackupError {
constructor(responseError, name) {
// Special case some names for more useful error messages
switch (responseError.status) {
case 401:
name = 'Unauthorized';
break;
case 403:
name = 'Forbidden';
break;
default:
name = name || 'HTTPFatalError';
}
if (!urlObject.pathname || urlObject.pathname === '/') {
cb(new error.BackupError('InvalidOption', 'Invalid URL, missing path element (no database).'));
return;
}
if (opts && opts.iamApiKey && (urlObject.username || urlObject.password)) {
cb(new error.BackupError('InvalidOption', 'URL user information must not be supplied when using IAM API key.'));
return;
}
} catch (err) {
cb(error.wrapPossibleInvalidUrlError(err));
return;
super(name, responseError.message);
}
}
// Perform validation of invalid options for shallow mode and WARN
// We don't error for backwards compatibility with scripts that may have been
// written passing complete sets of options through
if (opts && opts.mode === 'shallow') {
if (opts.log || opts.resume) {
console.warn('WARNING: the options "log" and "resume" are invalid when using shallow mode.');
// Default function to return an error for HTTP status codes
// < 400 -> OK
// 4XX (except 429) -> Fatal
// 429 & >=500 -> Transient
function checkResponse(err) {
if (err) {
// Construct an HTTPError if there is request information on the error
// Codes < 400 are considered OK
if (err.status >= 400) {
return new HTTPError(err);
} else {
// Send it back again if there was no status code, e.g. a cxn error
return augmentMessage(err);
}
if (opts.parallelism) {
console.warn('WARNING: the option "parallelism" has no effect when using shallow mode.');
}
}
}
if (opts && opts.resume) {
if (!opts.log) {
// This is the second place we check for the presence of the log option in conjunction with resume
// It has to be here for the API case
cb(new error.BackupError('NoLogFileName', 'To resume a backup, a log file must be specified'), null);
return;
} else if (!fs.existsSync(opts.log)) {
cb(new error.BackupError('LogDoesNotExist', 'To resume a backup, the log file must exist'), null);
return;
}
function convertResponseError(responseError, errorFactory) {
if (!errorFactory) {
errorFactory = checkResponse;
}
return true;
return errorFactory(responseError);
}
function addEventListener(indicator, emitter, event, f) {
emitter.on(event, function(...args) {
if (!indicator.errored) {
if (event === 'error') indicator.errored = true;
f(...args);
}
});
function augmentMessage(err) {
// For errors that don't have a status code, we are likely looking at a cxn
// error.
// Try to augment the message with more detail (core puts the code in statusText)
if (err && err.statusText) {
err.message = `${err.message} ${err.statusText}`;
}
if (err && err.description) {
err.message = `${err.message} ${err.description}`;
}
return err;
}
/*
Check the backup database exists and that the credentials used have
visibility. Callback with a fatal error if there is a problem with the DB.
@param {string} db - database object
@param {function(err)} callback - error is undefined if DB exists
*/
function proceedIfBackupDbValid(db, callback) {
db.service.headDatabase({ db: db.db }).then(() => callback()).catch(err => {
err = error.convertResponseError(err, err => parseIfDbValidResponseError(db, err));
callback(err);
});
}
/*
Check that the restore database exists, is new and is empty. Also verify that the credentials used have
visibility. Callback with a fatal error if there is a problem with the DB.
@param {string} db - database object
@param {function(err)} callback - error is undefined if DB exists, new and empty
*/
function proceedIfRestoreDbValid(db, callback) {
db.service.getDatabaseInformation({ db: db.db }).then(response => {
const { doc_count: docCount, doc_del_count: deletedDocCount } = response.result;
// The system databases can have a validation ddoc(s) injected in them on creation.
// This sets the doc count off, so we just complitely exclude the system databases from this check.
// The assumption here is that users restoring system databases know what they are doing.
if (!db.db.startsWith('_') && (docCount !== 0 || deletedDocCount !== 0)) {
const notEmptyDBErr = new Error(`Target database ${db.url}${db.db} is not empty.`);
notEmptyDBErr.name = 'DatabaseNotEmpty';
callback(notEmptyDBErr);
} else {
callback();
}
}).catch(err => {
err = error.convertResponseError(err, err => parseIfDbValidResponseError(db, err));
callback(err);
});
}
/*
Convert the database validation response error to a special DatabaseNotFound error
in case the database is missing. Otherwise delegate to the default error factory.
@param {object} db - database object
@param {object} err - HTTP response error
*/
function parseIfDbValidResponseError(db, err) {
if (err && err.status === 404) {
// Override the error type and message for the DB not found case
const msg = `Database ${db.url}` +
`${db.db} does not exist. ` +
'Check the URL and database name have been specified correctly.';
const noDBErr = new Error(msg);
noDBErr.name = 'DatabaseNotFound';
return noDBErr;
function wrapPossibleInvalidUrlError(err) {
if (err.code === 'ERR_INVALID_URL') {
// Wrap ERR_INVALID_URL in our own InvalidOption
return new BackupError('InvalidOption', err.message);
}
// Delegate to the default error factory if it wasn't a 404
return error.convertResponseError(err);
return err;
}
module.exports = {
/**
* Backup a Cloudant database to a stream.
*
* @param {string} srcUrl - URL of database to backup.
* @param {stream.Writable} targetStream - Stream to write content to.
* @param {object} opts - Backup options.
* @param {number} [opts.parallelism=5] - Number of parallel HTTP requests to use.
* @param {number} [opts.bufferSize=500] - Number of documents per batch request.
* @param {number} [opts.requestTimeout=120000] - Milliseconds to wait before retrying a HTTP request.
* @param {string} [opts.iamApiKey] - IAM API key to use to access Cloudant database.
* @param {string} [opts.log] - Log file name. Default uses a temporary file.
* @param {boolean} [opts.resume] - Whether to resume from existing log.
* @param {string} [opts.mode=full] - Use `full` or `shallow` mode.
* @param {backupRestoreCallback} callback - Called on completion.
*/
backup: function(srcUrl, targetStream, opts, callback) {
const listenerErrorIndicator = { errored: false };
if (typeof callback === 'undefined' && typeof opts === 'function') {
callback = opts;
opts = {};
BackupError,
HTTPError,
wrapPossibleInvalidUrlError,
convertResponseError,
terminationCallback: function terminationCallback(err, data) {
if (err) {
console.error(`ERROR: ${err.message}`);
process.exitCode = codes[err.name] || 1;
process.exit();
}
if (!validateArgs(srcUrl, opts, callback)) {
// bad args, bail
return;
}
// if there is an error writing to the stream, call the completion
// callback with the error set
addEventListener(listenerErrorIndicator, targetStream, 'error', function(err) {
debug('Error ' + JSON.stringify(err));
if (callback) callback(err);
});
opts = Object.assign({}, defaults(), opts);
const ee = new events.EventEmitter();
// Set up the DB client
const backupDB = request.client(srcUrl, opts);
// Validate the DB exists, before proceeding to backup
proceedIfBackupDbValid(backupDB, function(err) {
if (err) {
if (err.name === 'DatabaseNotFound') {
err.message = `${err.message} Ensure the backup source database exists.`;
}
// Didn't exist, or another fatal error, exit
callback(err);
return;
}
let backup = null;
if (opts.mode === 'shallow') {
backup = backupShallow;
} else { // full mode
backup = backupFull;
}
// If resuming write a newline as it's possible one would be missing from
// an interruption of the previous backup. If the backup was clean this
// will cause an empty line that will be gracefully handled by the restore.
if (opts.resume) {
targetStream.write('\n');
}
// Get the event emitter from the backup process so we can handle events
// before passing them on to the app's event emitter if needed.
const internalEE = backup(backupDB, opts);
addEventListener(listenerErrorIndicator, internalEE, 'changes', function(batch) {
ee.emit('changes', batch);
});
addEventListener(listenerErrorIndicator, internalEE, 'received', function(obj, q, logCompletedBatch) {
// this may be too verbose to have as well as the "backed up" message
// debug(' received batch', obj.batch, ' docs: ', obj.total, 'Time', obj.time);
// Callback to emit the written event when the content is flushed
function writeFlushed() {
ee.emit('written', { total: obj.total, time: obj.time, batch: obj.batch });
if (logCompletedBatch) {
logCompletedBatch(obj.batch);
}
debug(' backed up batch', obj.batch, ' docs: ', obj.total, 'Time', obj.time);
}
// Write the received content to the targetStream
const continueWriting = targetStream.write(JSON.stringify(obj.data) + '\n',
'utf8',
writeFlushed);
if (!continueWriting) {
// The buffer was full, pause the queue to stop the writes until we
// get a drain event
if (q && !q.paused) {
q.pause();
targetStream.once('drain', function() {
q.resume();
});
}
}
});
// For errors we expect, may or may not be fatal
addEventListener(listenerErrorIndicator, internalEE, 'error', function(err) {
debug('Error ' + JSON.stringify(err));
callback(err);
});
addEventListener(listenerErrorIndicator, internalEE, 'finished', function(obj) {
function emitFinished() {
debug('Backup complete - written ' + JSON.stringify(obj));
const summary = { total: obj.total };
ee.emit('finished', summary);
if (callback) callback(null, summary);
}
if (targetStream === process.stdout) {
// stdout cannot emit a finish event so use a final write + callback
targetStream.write('', 'utf8', emitFinished);
} else {
// If we're writing to a file, end the writes and register the
// emitFinished function for a callback when the file stream's finish
// event is emitted.
targetStream.end('', 'utf8', emitFinished);
}
});
});
return ee;
},
/**
* Restore a backup from a stream.
*
* @param {stream.Readable} srcStream - Stream containing backed up data.
* @param {string} targetUrl - Target database.
* @param {object} opts - Restore options.
* @param {number} opts.parallelism - Number of parallel HTTP requests to use. Default 5.
* @param {number} opts.bufferSize - Number of documents per batch request. Default 500.
* @param {number} opts.requestTimeout - Milliseconds to wait before retrying a HTTP request. Default 120000.
* @param {string} opts.iamApiKey - IAM API key to use to access Cloudant database.
* @param {backupRestoreCallback} callback - Called on completion.
*/
restore: function(srcStream, targetUrl, opts, callback) {
const listenerErrorIndicator = { errored: false };
if (typeof callback === 'undefined' && typeof opts === 'function') {
callback = opts;
opts = {};
}
validateArgs(targetUrl, opts, callback);
opts = Object.assign({}, defaults(), opts);
const ee = new events.EventEmitter();
// Set up the DB client
const restoreDB = request.client(targetUrl, opts);
// Validate the DB exists, before proceeding to restore
proceedIfRestoreDbValid(restoreDB, function(err) {
if (err) {
if (err.name === 'DatabaseNotFound') {
err.message = `${err.message} Create the target database before restoring.`;
} else if (err.name === 'DatabaseNotEmpty') {
err.message = `${err.message} A target database must be a new and empty database.`;
}
// Didn't exist, or another fatal error, exit
callback(err);
return;
}
restoreInternal(
restoreDB,
opts,
srcStream,
ee,
function(err, writer) {
if (err) {
callback(err, null);
return;
}
if (writer != null) {
addEventListener(listenerErrorIndicator, writer, 'restored', function(obj) {
debug(' restored ', obj.total);
ee.emit('restored', { documents: obj.documents, total: obj.total });
});
addEventListener(listenerErrorIndicator, writer, 'error', function(err) {
debug('Error ' + JSON.stringify(err));
// Only call destroy if it is available on the stream
if (srcStream.destroy && srcStream.destroy instanceof Function) {
srcStream.destroy();
}
callback(err);
});
addEventListener(listenerErrorIndicator, writer, 'finished', function(obj) {
debug('restore complete');
ee.emit('finished', { total: obj.total });
callback(null, obj);
});
}
}
);
});
return ee;
}
};
/**
* Backup/restore callback
* @callback backupRestoreCallback
* @param {Error} err - Error object if operation failed.
* @param {object} data - summary data for backup/restore
*/

@@ -1,2 +0,2 @@

// Copyright © 2017, 2022 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -16,66 +16,164 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const async = require('async');
const error = require('./error.js');
const events = require('events');
const pkg = require('../package.json');
const stream = require('stream');
const { CloudantV1, CouchdbSessionAuthenticator } = require('@ibm-cloud/cloudant');
const { IamAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const retryPlugin = require('retry-axios');
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime();
let batch = 0;
let hasErrored = false;
let startKey = null;
let total = 0;
const userAgent = 'couchbackup-cloudant/' + pkg.version + ' (Node.js ' +
process.version + ')';
async.doUntil(
function(callback) {
// Note, include_docs: true is set automatically when using the
// fetch function.
const opts = { db: db.db, limit: options.bufferSize, includeDocs: true };
// Class for streaming _changes error responses into
// In general the response is a small error/reason JSON object
// so it is OK to have this in memory.
class ResponseWriteable extends stream.Writable {
constructor(options) {
super(options);
this.data = [];
}
// To avoid double fetching a document solely for the purposes of getting
// the next ID to use as a startKey for the next page we instead use the
// last ID of the current page and append the lowest unicode sort
// character.
if (startKey) opts.startKey = `${startKey}\0`;
db.service.postAllDocs(opts).then(response => {
const body = response.result;
if (!body.rows) {
ee.emit('error', new error.BackupError(
'AllDocsError', 'ERROR: Invalid all docs response'));
callback();
} else {
if (body.rows.length < opts.limit) {
startKey = null; // last batch
} else {
startKey = body.rows[opts.limit - 1].id;
}
_write(chunk, encoding, callback) {
this.data.push(chunk);
callback();
}
const docs = [];
body.rows.forEach(function(doc) {
docs.push(doc.doc);
stringBody() {
return Buffer.concat(this.data).toString();
}
}
// An interceptor function to help augment error bodies with a little
// extra information so we can continue to use consistent messaging
// after the ugprade to @ibm-cloud/cloudant
const errorHelper = async function(err) {
let method;
let requestUrl;
if (err.response) {
if (err.response.config.url) {
requestUrl = err.response.config.url;
method = err.response.config.method;
}
// Override the status text with an improved message
let errorMsg = `${err.response.status} ${err.response.statusText || ''}: ` +
`${method} ${requestUrl}`;
if (err.response.data) {
// Check if we have a JSON response and try to get the error/reason
if (err.response.headers['content-type'] === 'application/json') {
if (!err.response.data.error && err.response.data.pipe) {
// If we didn't find a JSON object with `error` then we might have a stream response.
// Detect the stream by the presence of `pipe` and use it to get the body and parse
// the error information.
const p = new Promise((resolve, reject) => {
const errorBody = new ResponseWriteable();
err.response.data.pipe(errorBody)
.on('finish', () => { resolve(JSON.parse(errorBody.stringBody())); })
.on('error', () => { reject(err); });
});
if (docs.length > 0) {
ee.emit('received', {
batch: batch++,
data: docs,
length: docs.length,
time: (new Date().getTime() - start) / 1000,
total: total += docs.length
});
// Replace the stream on the response with the parsed object
err.response.data = await p;
}
// Append the error/reason if available
if (err.response.data.error) {
// Override the status text with our more complete message
errorMsg += ` - Error: ${err.response.data.error}`;
if (err.response.data.reason) {
errorMsg += `, Reason: ${err.response.data.reason}`;
}
callback();
}
}).catch(err => {
err = error.convertResponseError(err);
ee.emit('error', err);
hasErrored = true;
callback();
} else {
errorMsg += err.response.data;
}
// Set a new message for use by the node-sdk-core
// We use the errors array because it gets processed
// ahead of all other service errors.
err.response.data.errors = [{ message: errorMsg }];
}
} else if (err.request) {
if (!err.message.includes(err.config.url)) {
// Augment the message with the URL and method
// but don't do it again if we already have the URL.
err.message = `${err.message}: ${err.config.method} ${err.config.url}`;
}
}
return Promise.reject(err);
};
module.exports = {
client: function(rawUrl, opts) {
const url = new URL(rawUrl);
// Split the URL to separate service from database
// Use origin as the "base" to remove auth elements
const actUrl = new URL(url.pathname.substring(0, url.pathname.lastIndexOf('/')), url.origin);
const dbName = url.pathname.substring(url.pathname.lastIndexOf('/') + 1);
let authenticator;
// Default to cookieauth unless an IAM key is provided
if (opts.iamApiKey) {
const iamAuthOpts = { apikey: opts.iamApiKey };
if (opts.iamTokenUrl) {
iamAuthOpts.url = opts.iamTokenUrl;
}
authenticator = new IamAuthenticator(iamAuthOpts);
} else if (url.username) {
authenticator = new CouchdbSessionAuthenticator({
username: decodeURIComponent(url.username),
password: decodeURIComponent(url.password)
});
},
function(callback) { callback(null, hasErrored || startKey == null); },
function() { ee.emit('finished', { total: total }); }
);
} else {
authenticator = new NoAuthAuthenticator();
}
const serviceOpts = {
authenticator: authenticator,
timeout: opts.requestTimeout,
// Axios performance options
maxContentLength: -1
};
return ee;
const service = new CloudantV1(serviceOpts);
// Configure retries
const maxRetries = 2; // for 3 total attempts
service.getHttpClient().defaults.raxConfig = {
// retries for status codes
retry: maxRetries,
// retries for non-response e.g. ETIMEDOUT
noResponseRetries: maxRetries,
backoffType: 'exponential',
httpMethodsToRetry: ['GET', 'HEAD', 'POST'],
statusCodesToRetry: [
[429, 429],
[500, 599]
],
shouldRetry: err => {
const cfg = retryPlugin.getConfig(err);
// cap at max retries regardless of response/non-response type
if (cfg.currentRetryAttempt >= maxRetries) {
return false;
} else {
return retryPlugin.shouldRetryRequest(err);
}
},
instance: service.getHttpClient()
};
retryPlugin.attach(service.getHttpClient());
service.setServiceUrl(actUrl.toString());
if (authenticator instanceof CouchdbSessionAuthenticator) {
// Awkward workaround for known Couch issue with compression on _session requests
// It is not feasible to disable compression on all requests with the amount of
// data this lib needs to move, so override the property in the tokenManager instance.
authenticator.tokenManager.requestWrapperInstance.compressRequestData = false;
}
if (authenticator.tokenManager && authenticator.tokenManager.requestWrapperInstance) {
authenticator.tokenManager.requestWrapperInstance.axiosInstance.interceptors.response.use(null, errorHelper);
}
// Add error interceptors to put URLs in error messages
service.getHttpClient().interceptors.response.use(null, errorHelper);
// Add request interceptor to add user-agent (adding it with custom request headers gets overwritten)
service.getHttpClient().interceptors.request.use(function(requestConfig) {
requestConfig.headers['User-Agent'] = userAgent;
return requestConfig;
}, null);
return { service: service, db: dbName, url: actUrl.toString() };
}
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2018 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -16,45 +16,115 @@ // Licensed under the Apache License, Version 2.0 (the "License");

/**
* Utility methods for the command line interface.
* @module cliutils
* @see module:cliutils
*/
const url = require('url');
const cliutils = require('./cliutils.js');
const config = require('./config.js');
const error = require('./error.js');
const path = require('path');
const pkg = require('../package.json');
module.exports = {
function parseBackupArgs() {
const program = require('commander');
/**
* Combine a base URL and a database name, ensuring at least single slash
* between root and database name. This allows users to have Couch behind
* proxies that mount Couch's / endpoint at some other mount point.
* @param {string} root - root URL
* @param {string} databaseName - database name
* @return concatenated URL.
*
* @private
*/
databaseUrl: function databaseUrl(root, databaseName) {
if (!root.endsWith('/')) {
root = root + '/';
}
try {
return new url.URL(encodeURIComponent(databaseName), root).toString();
} catch (err) {
throw error.wrapPossibleInvalidUrlError(err);
}
},
// Option CLI defaults
const defaults = config.cliDefaults();
/**
* Generate CLI argument usage text.
*
* @param {string} description - argument description.
* @param {string} defaultValue - default argument value.
*
* @private
*/
getUsage: function getUsage(description, defaultValue) {
return `${description} ${defaultValue !== undefined ? ` (default: ${defaultValue})` : ''}`;
// Options set by environment variables
const envVarOptions = {};
config.applyEnvironmentVariables(envVarOptions);
program
.version(pkg.version)
.description('Backup a CouchDB/Cloudant database to a backup text file.')
.usage('[options...]')
.option('-b, --buffer-size <n>',
cliutils.getUsage('number of documents fetched at once', defaults.bufferSize),
Number)
.option('-d, --db <db>',
cliutils.getUsage('name of the database to backup', defaults.db))
.option('-k, --iam-api-key <API key>',
cliutils.getUsage('IAM API key to access the Cloudant server'))
.option('-l, --log <file>',
cliutils.getUsage('file to store logging information during backup; invalid in "shallow" mode', 'a temporary file'),
path.normalize)
.option('-m, --mode <mode>',
cliutils.getUsage('"shallow" if only a superficial backup is done (ignoring conflicts and revision tokens), else "full" for complete backup', defaults.mode),
(mode) => { return mode.toLowerCase(); })
.option('-o, --output <file>',
cliutils.getUsage('file name to store the backup data', 'stdout'),
path.normalize)
.option('-p, --parallelism <n>',
cliutils.getUsage('number of HTTP requests to perform in parallel when performing a backup; ignored in "shallow" mode', defaults.parallelism),
Number)
.option('-q, --quiet',
cliutils.getUsage('suppress batch messages', defaults.quiet))
.option('-r, --resume',
cliutils.getUsage('continue a previous backup from its last known position; invalid in "shallow" mode', defaults.resume))
.option('-t, --request-timeout <n>',
cliutils.getUsage('milliseconds to wait for a response to a HTTP request before retrying the request', defaults.requestTimeout),
Number)
.option('-u, --url <url>',
cliutils.getUsage('URL of the CouchDB/Cloudant server', defaults.url))
.parse(process.argv);
// Remove defaults that don't apply when using shallow mode
if (program.opts().mode === 'shallow' || envVarOptions.mode === 'shallow') {
delete defaults.parallelism;
delete defaults.log;
delete defaults.resume;
}
// Apply the options in order so that the CLI overrides env vars and env variables
// override defaults.
const opts = Object.assign({}, defaults, envVarOptions, program.opts());
if (opts.resume && (opts.log === defaults.log)) {
// If resuming and the log file arg is the newly generated tmp name from defaults then we know that --log wasn't specified.
// We have to do this check here for the CLI case because of the default.
error.terminationCallback(new error.BackupError('NoLogFileName', 'To resume a backup, a log file must be specified'));
}
return opts;
}
function parseRestoreArgs() {
const program = require('commander');
// Option CLI defaults
const defaults = config.cliDefaults();
// Options set by environment variables
const envVarOptions = {};
config.applyEnvironmentVariables(envVarOptions);
program
.version(pkg.version)
.description('Restore a CouchDB/Cloudant database from a backup text file.')
.usage('[options...]')
.option('-b, --buffer-size <n>',
cliutils.getUsage('number of documents restored at once', defaults.bufferSize),
Number)
.option('-d, --db <db>',
cliutils.getUsage('name of the new, existing database to restore to', defaults.db))
.option('-k, --iam-api-key <API key>',
cliutils.getUsage('IAM API key to access the Cloudant server'))
.option('-p, --parallelism <n>',
cliutils.getUsage('number of HTTP requests to perform in parallel when restoring a backup', defaults.parallelism),
Number)
.option('-q, --quiet',
cliutils.getUsage('suppress batch messages', defaults.quiet))
.option('-t, --request-timeout <n>',
cliutils.getUsage('milliseconds to wait for a response to a HTTP request before retrying the request', defaults.requestTimeout),
Number)
.option('-u, --url <url>',
cliutils.getUsage('URL of the CouchDB/Cloudant server', defaults.url))
.parse(process.argv);
// Apply the options in order so that the CLI overrides env vars and env variables
// override defaults.
const opts = Object.assign({}, defaults, envVarOptions, program.opts());
return opts;
}
module.exports = {
parseBackupArgs: parseBackupArgs,
parseRestoreArgs: parseRestoreArgs
};

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -16,78 +16,267 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const async = require('async');
const events = require('events');
const fs = require('fs');
const stream = require('stream');
const liner = require('./liner.js');
const error = require('./error.js');
const spoolchanges = require('./spoolchanges.js');
const logfilesummary = require('./logfilesummary.js');
const logfilegetbatches = require('./logfilegetbatches.js');
const onLine = function(onCommand, getDocs) {
const change = new stream.Transform({ objectMode: true });
/**
* Read documents from a database to be backed up.
*
* @param {string} db - `@cloudant/cloudant` DB object for source database.
* @param {number} blocksize - number of documents to download in single request
* @param {number} parallelism - number of concurrent downloads
* @param {string} log - path to log file to use
* @param {boolean} resume - whether to resume from an existing log file
* @returns EventEmitter with following events:
* - `received` - called with a block of documents to write to backup
* - `error` - on error
* - `finished` - when backup process is finished (either complete or errored)
*/
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime(); // backup start time
const batchesPerDownloadSession = 50; // max batches to read from log file for download at a time (prevent OOM)
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
function proceedWithBackup() {
if (options.resume) {
// pick up from existing log file from previous run
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
} else {
// create new log file and process
spoolchanges(db, options.log, options.bufferSize, ee, function(err) {
if (err) {
ee.emit('error', err);
} else {
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
}
});
}
}
let matches;
validateBulkGetSupport(db, function(err) {
if (err) {
return ee.emit('error', err);
} else {
proceedWithBackup();
}
});
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
return ee;
};
/**
* Validate /_bulk_get support for a specified database.
*
* @param {string} db - nodejs-cloudant db
* @param {function} callback - called on completion with signature (err)
*/
function validateBulkGetSupport(db, callback) {
db.service.postBulkGet({ db: db.db, docs: [] }).then(() => { callback(); }).catch(err => {
err = error.convertResponseError(err, function(err) {
switch (err.status) {
case undefined:
// There was no status code on the error
return err;
case 404:
return new error.BackupError('BulkGetError', 'Database does not support /_bulk_get endpoint');
default:
return new error.HTTPError(err);
}
});
callback(err);
});
}
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
/**
* Download remaining batches in a log file, splitting batches into sets
* to avoid enqueueing too many in one go.
*
* @param {string} log - log file name to maintain download state
* @param {string} db - nodejs-cloudant db
* @param {events.EventEmitter} ee - event emitter to emit received events on
* @param {time} startTime - start time for backup process
* @param {number} batchesPerDownloadSession - max batches to enqueue for
* download at a time. As batches contain many doc IDs, this helps avoid
* exhausting memory.
* @param {number} parallelism - number of concurrent downloads
* @returns function to call do download remaining batches with signature
* (err, {batches: batch, docs: doccount}) {@see spoolchanges}.
*/
function downloadRemainingBatches(log, db, ee, startTime, batchesPerDownloadSession, parallelism) {
let total = 0; // running total of documents downloaded so far
let noRemainingBatches = false;
// Generate a set of batches (up to batchesPerDownloadSession) to download from the
// log file and download them. Set noRemainingBatches to `true` for last batch.
function downloadSingleBatchSet(done) {
// Fetch the doc IDs for the batches in the current set to
// download them.
function batchSetComplete(err, data) {
if (!err) {
total = data.total;
}
done(err);
}
function processRetrievedBatches(err, batches) {
if (!err) {
// process them in parallelised queue
processBatchSet(db, parallelism, log, batches, ee, startTime, total, batchSetComplete);
} else {
batchSetComplete(err);
}
}
// extract doc ids
if (getDocs && obj.command === 't') {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, function(err, batchSetIds) {
if (err) {
ee.emit('error', err);
// Stop processing changes file for fatal errors
noRemainingBatches = true;
done();
} else {
if (batchSetIds.length === 0) {
noRemainingBatches = true;
return done();
}
logfilegetbatches(log, batchSetIds, processRetrievedBatches);
}
onCommand(obj);
});
}
// Return true if all batches in log file have been downloaded
function isFinished(callback) { callback(null, noRemainingBatches); }
function onComplete() {
ee.emit('finished', { total: total });
}
async.doUntil(downloadSingleBatchSet, isFinished, onComplete);
}
/**
* Return a set of uncompleted download batch IDs from the log file.
*
* @param {string} log - log file path
* @param {number} batchesPerDownloadSession - maximum IDs to return
* @param {function} callback - sign (err, batchSetIds array)
*/
function readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, callback) {
logfilesummary(log, function processSummary(err, summary) {
if (!err) {
if (!summary.changesComplete) {
callback(new error.BackupError('IncompleteChangesInLogFile',
'WARNING: Changes did not finish spooling'));
return;
}
if (Object.keys(summary.batches).length === 0) {
return callback(null, []);
}
// batch IDs are the property names of summary.batches
const batchSetIds = getPropertyNames(summary.batches, batchesPerDownloadSession);
callback(null, batchSetIds);
} else {
callback(err);
}
done();
};
return change;
};
});
}
/**
* Generate a list of remaining batches from a download file.
* Download a set of batches retrieved from a log file. When a download is
* complete, add a line to the logfile indicating such.
*
* @param {string} log - log file name
* @param {function} callback - callback with err, {changesComplete: N, batches: N}.
* changesComplete signifies whether the log file appeared to
* have completed reading the changes feed (contains :changes_complete).
* batches are remaining batch IDs for download.
* @param {any} db - nodejs-cloudant database
* @param {any} parallelism - number of concurrent requests to make
* @param {any} log - log file to drive downloads from
* @param {any} batches - batches to download
* @param {any} ee - event emitter for progress. This funciton emits
* received and error events.
* @param {any} start - time backup started, to report deltas
* @param {any} grandtotal - count of documents downloaded prior to this set
* of batches
* @param {any} callback - completion callback, (err, {total: number}).
*/
module.exports = function(log, callback) {
// our sense of state
const state = {
function processBatchSet(db, parallelism, log, batches, ee, start, grandtotal, callback) {
let hasErrored = false;
let total = grandtotal;
};
let changesComplete = false;
// queue to process the fetch requests in an orderly fashion using _bulk_get
const q = async.queue(function(payload, done) {
const output = [];
const thisBatch = payload.batch;
delete payload.batch;
delete payload.command;
// called with each line from the log file
const onCommand = function(obj) {
if (obj.command === 't') {
state[obj.batch] = true;
} else if (obj.command === 'd') {
delete state[obj.batch];
} else if (obj.command === 'changes_complete') {
changesComplete = true;
function logCompletedBatch(batch) {
if (log) {
fs.appendFile(log, ':d batch' + thisBatch + '\n', done);
} else {
done();
}
}
};
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, false))
.on('finish', function() {
const obj = { changesComplete: changesComplete, batches: state };
callback(null, obj);
// do the /db/_bulk_get request
db.service.postBulkGet({
db: db.db,
revs: true,
docs: payload.docs
}).then(response => {
// create an output array with the docs returned
response.result.results.forEach(function(d) {
if (d.docs) {
d.docs.forEach(function(doc) {
if (doc.ok) {
output.push(doc.ok);
}
});
}
});
total += output.length;
const t = (new Date().getTime() - start) / 1000;
ee.emit('received', {
batch: thisBatch,
data: output,
length: output.length,
time: t,
total: total
}, q, logCompletedBatch);
}).catch(err => {
if (!hasErrored) {
hasErrored = true;
err = error.convertResponseError(err);
// Kill the queue for fatal errors
q.kill();
ee.emit('error', err);
}
done();
});
};
}, parallelism);
for (const i in batches) {
q.push(batches[i]);
}
q.drain(function() {
callback(null, { total: total });
});
}
/**
* Returns first N properties on an object.
*
* @param {object} obj - object with properties
* @param {number} count - number of properties to return
*/
function getPropertyNames(obj, count) {
// decide which batch numbers to deal with
const batchestofetch = [];
let j = 0;
for (const i in obj) {
batchestofetch.push(parseInt(i));
j++;
if (j >= count) break;
}
return batchestofetch;
}

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//

@@ -16,164 +16,17 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const pkg = require('../package.json');
const stream = require('stream');
const { CloudantV1, CouchdbSessionAuthenticator } = require('@ibm-cloud/cloudant');
const { IamAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const retryPlugin = require('retry-axios');
module.exports = function(db, options, readstream, ee, callback) {
const liner = require('../includes/liner.js')();
const writer = require('../includes/writer.js')(db, options.bufferSize, options.parallelism, ee);
const userAgent = 'couchbackup-cloudant/' + pkg.version + ' (Node.js ' +
process.version + ')';
// pipe the input to the output, via transformation functions
readstream
.pipe(liner) // transform the input stream into per-line
.on('error', function(err) {
// Forward the error to the writer event emitter where we already have
// listeners on for handling errors
writer.emit('error', err);
})
.pipe(writer); // transform the data
// Class for streaming _changes error responses into
// In general the response is a small error/reason JSON object
// so it is OK to have this in memory.
class ResponseWriteable extends stream.Writable {
constructor(options) {
super(options);
this.data = [];
}
_write(chunk, encoding, callback) {
this.data.push(chunk);
callback();
}
stringBody() {
return Buffer.concat(this.data).toString();
}
}
// An interceptor function to help augment error bodies with a little
// extra information so we can continue to use consistent messaging
// after the ugprade to @ibm-cloud/cloudant
const errorHelper = async function(err) {
let method;
let requestUrl;
if (err.response) {
if (err.response.config.url) {
requestUrl = err.response.config.url;
method = err.response.config.method;
}
// Override the status text with an improved message
let errorMsg = `${err.response.status} ${err.response.statusText || ''}: ` +
`${method} ${requestUrl}`;
if (err.response.data) {
// Check if we have a JSON response and try to get the error/reason
if (err.response.headers['content-type'] === 'application/json') {
if (!err.response.data.error && err.response.data.pipe) {
// If we didn't find a JSON object with `error` then we might have a stream response.
// Detect the stream by the presence of `pipe` and use it to get the body and parse
// the error information.
const p = new Promise((resolve, reject) => {
const errorBody = new ResponseWriteable();
err.response.data.pipe(errorBody)
.on('finish', () => { resolve(JSON.parse(errorBody.stringBody())); })
.on('error', () => { reject(err); });
});
// Replace the stream on the response with the parsed object
err.response.data = await p;
}
// Append the error/reason if available
if (err.response.data.error) {
// Override the status text with our more complete message
errorMsg += ` - Error: ${err.response.data.error}`;
if (err.response.data.reason) {
errorMsg += `, Reason: ${err.response.data.reason}`;
}
}
} else {
errorMsg += err.response.data;
}
// Set a new message for use by the node-sdk-core
// We use the errors array because it gets processed
// ahead of all other service errors.
err.response.data.errors = [{ message: errorMsg }];
}
} else if (err.request) {
if (!err.message.includes(err.config.url)) {
// Augment the message with the URL and method
// but don't do it again if we already have the URL.
err.message = `${err.message}: ${err.config.method} ${err.config.url}`;
}
}
return Promise.reject(err);
callback(null, writer);
};
module.exports = {
client: function(rawUrl, opts) {
const url = new URL(rawUrl);
// Split the URL to separate service from database
// Use origin as the "base" to remove auth elements
const actUrl = new URL(url.pathname.substring(0, url.pathname.lastIndexOf('/')), url.origin);
const dbName = url.pathname.substring(url.pathname.lastIndexOf('/') + 1);
let authenticator;
// Default to cookieauth unless an IAM key is provided
if (opts.iamApiKey) {
const iamAuthOpts = { apikey: opts.iamApiKey };
if (opts.iamTokenUrl) {
iamAuthOpts.url = opts.iamTokenUrl;
}
authenticator = new IamAuthenticator(iamAuthOpts);
} else if (url.username) {
authenticator = new CouchdbSessionAuthenticator({
username: decodeURIComponent(url.username),
password: decodeURIComponent(url.password)
});
} else {
authenticator = new NoAuthAuthenticator();
}
const serviceOpts = {
authenticator: authenticator,
timeout: opts.requestTimeout,
// Axios performance options
maxContentLength: -1
};
const service = new CloudantV1(serviceOpts);
// Configure retries
const maxRetries = 2; // for 3 total attempts
service.getHttpClient().defaults.raxConfig = {
// retries for status codes
retry: maxRetries,
// retries for non-response e.g. ETIMEDOUT
noResponseRetries: maxRetries,
backoffType: 'exponential',
httpMethodsToRetry: ['GET', 'HEAD', 'POST'],
statusCodesToRetry: [
[429, 429],
[500, 599]
],
shouldRetry: err => {
const cfg = retryPlugin.getConfig(err);
// cap at max retries regardless of response/non-response type
if (cfg.currentRetryAttempt >= maxRetries) {
return false;
} else {
return retryPlugin.shouldRetryRequest(err);
}
},
instance: service.getHttpClient()
};
retryPlugin.attach(service.getHttpClient());
service.setServiceUrl(actUrl.toString());
if (authenticator instanceof CouchdbSessionAuthenticator) {
// Awkward workaround for known Couch issue with compression on _session requests
// It is not feasible to disable compression on all requests with the amount of
// data this lib needs to move, so override the property in the tokenManager instance.
authenticator.tokenManager.requestWrapperInstance.compressRequestData = false;
}
if (authenticator.tokenManager && authenticator.tokenManager.requestWrapperInstance) {
authenticator.tokenManager.requestWrapperInstance.axiosInstance.interceptors.response.use(null, errorHelper);
}
// Add error interceptors to put URLs in error messages
service.getHttpClient().interceptors.response.use(null, errorHelper);
// Add request interceptor to add user-agent (adding it with custom request headers gets overwritten)
service.getHttpClient().interceptors.request.use(function(requestConfig) {
requestConfig.headers['User-Agent'] = userAgent;
return requestConfig;
}, null);
return { service: service, db: dbName, url: actUrl.toString() };
}
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2018 IBM Corp. All rights reserved.
// Copyright © 2017, 2022 IBM Corp. All rights reserved.
//

@@ -16,17 +16,97 @@ // Licensed under the Apache License, Version 2.0 (the "License");

module.exports = function(db, options, readstream, ee, callback) {
const liner = require('../includes/liner.js')();
const writer = require('../includes/writer.js')(db, options.bufferSize, options.parallelism, ee);
const fs = require('fs');
const liner = require('./liner.js');
const change = require('./change.js');
const error = require('./error.js');
const debug = require('debug')('couchbackup:spoolchanges');
// pipe the input to the output, via transformation functions
readstream
.pipe(liner) // transform the input stream into per-line
.on('error', function(err) {
// Forward the error to the writer event emitter where we already have
// listeners on for handling errors
writer.emit('error', err);
})
.pipe(writer); // transform the data
/**
* Write log file for all changes from a database, ready for downloading
* in batches.
*
* @param {string} dbUrl - URL of database
* @param {string} log - path to log file to use
* @param {number} bufferSize - the number of changes per batch/log line
* @param {function(err)} callback - a callback to run on completion
*/
module.exports = function(db, log, bufferSize, ee, callback) {
// list of document ids to process
const buffer = [];
let batch = 0;
let lastSeq = null;
const logStream = fs.createWriteStream(log);
let pending = 0;
// The number of changes to fetch per request
const limit = 100000;
callback(null, writer);
// send documents ids to the queue in batches of bufferSize + the last batch
const processBuffer = function(lastOne) {
if (buffer.length >= bufferSize || (lastOne && buffer.length > 0)) {
debug('writing', buffer.length, 'changes to the backup file');
const b = { docs: buffer.splice(0, bufferSize), batch: batch };
logStream.write(':t batch' + batch + ' ' + JSON.stringify(b.docs) + '\n');
ee.emit('changes', batch);
batch++;
}
};
// called once per received change
const onChange = function(c) {
if (c) {
if (c.error) {
ee.emit('error', new error.BackupError('InvalidChange', `Received invalid change: ${c}`));
} else if (c.changes) {
const obj = { id: c.id };
buffer.push(obj);
processBuffer(false);
} else if (c.last_seq) {
lastSeq = c.last_seq;
pending = c.pending;
}
}
};
function getChanges(since = 0) {
debug('making changes request since ' + since);
return db.service.postChangesAsStream({ db: db.db, since: since, limit: limit, seqInterval: limit })
.then(response => {
response.result.pipe(liner())
.on('error', function(err) {
logStream.end();
callback(err);
})
.pipe(change(onChange))
.on('error', function(err) {
logStream.end();
callback(err);
})
.on('finish', function() {
processBuffer(true);
if (!lastSeq) {
logStream.end();
debug('changes request terminated before last_seq was sent');
callback(new error.BackupError('SpoolChangesError', 'Changes request terminated before last_seq was sent'));
} else {
debug(`changes request completed with last_seq: ${lastSeq} and ${pending} changes pending.`);
if (pending > 0) {
// Return the next promise
return getChanges(lastSeq);
} else {
debug('finished streaming database changes');
logStream.end(':changes_complete ' + lastSeq + '\n', 'utf8', callback);
}
}
});
})
.catch(err => {
logStream.end();
if (err.status && err.status >= 400) {
callback(error.convertResponseError(err));
} else if (err.name !== 'SpoolChangesError') {
callback(new error.BackupError('SpoolChangesError', `Failed changes request - ${err.message}`));
}
});
}
getChanges();
};

@@ -0,1 +1,2 @@

#!/usr/bin/env node
// Copyright © 2017, 2021 IBM Corp. All rights reserved.

@@ -16,104 +17,45 @@ //

const path = require('path');
const tmp = require('tmp');
const error = require('../includes/error.js');
const cliutils = require('../includes/cliutils.js');
const couchbackup = require('../app.js');
const parser = require('../includes/parser.js');
const debug = require('debug');
const restoreDebug = debug('couchbackup:restore');
const restoreBatchDebug = debug('couchbackup:restore:batch');
/**
Return API default settings.
*/
function apiDefaults() {
return {
parallelism: 5,
bufferSize: 500,
requestTimeout: 120000,
log: tmp.tmpNameSync(),
resume: false,
mode: 'full'
restoreDebug.enabled = true;
try {
const program = parser.parseRestoreArgs();
const databaseUrl = cliutils.databaseUrl(program.url, program.db);
const opts = {
bufferSize: program.bufferSize,
parallelism: program.parallelism,
requestTimeout: program.requestTimeout,
iamApiKey: program.iamApiKey,
iamTokenUrl: program.iamTokenUrl
};
}
/**
Return CLI default settings.
*/
function cliDefaults() {
const defaults = apiDefaults();
// log configuration to console
console.error('='.repeat(80));
console.error('Performing restore on ' + databaseUrl.replace(/\/\/.+@/g, '//****:****@') + ' using configuration:');
console.error(JSON.stringify(opts, null, 2).replace(/"iamApiKey": "[^"]+"/, '"iamApiKey": "****"'));
console.error('='.repeat(80));
// add additional legacy settings
defaults.db = 'test';
defaults.url = 'http://localhost:5984';
restoreBatchDebug.enabled = !program.quiet;
// add CLI only option
defaults.quiet = false;
return defaults;
return couchbackup.restore(
process.stdin, // restore from stdin
databaseUrl,
opts,
error.terminationCallback
).on('restored', function(obj) {
restoreBatchDebug('restored', obj.total);
}).on('error', function(e) {
restoreDebug('ERROR', e);
}).on('finished', function(obj) {
restoreDebug('finished', obj);
});
} catch (err) {
error.terminationCallback(err);
}
/**
Override settings **in-place** with environment variables.
*/
function applyEnvironmentVariables(opts) {
// if we have a custom CouchDB url
if (typeof process.env.COUCH_URL !== 'undefined') {
opts.url = process.env.COUCH_URL;
}
// if we have a specified databases
if (typeof process.env.COUCH_DATABASE !== 'undefined') {
opts.db = process.env.COUCH_DATABASE;
}
// if we have a specified buffer size
if (typeof process.env.COUCH_BUFFER_SIZE !== 'undefined') {
opts.bufferSize = parseInt(process.env.COUCH_BUFFER_SIZE);
}
// if we have a specified parallelism
if (typeof process.env.COUCH_PARALLELISM !== 'undefined') {
opts.parallelism = parseInt(process.env.COUCH_PARALLELISM);
}
// if we have a specified request timeout
if (typeof process.env.COUCH_REQUEST_TIMEOUT !== 'undefined') {
opts.requestTimeout = parseInt(process.env.COUCH_REQUEST_TIMEOUT);
}
// if we have a specified log file
if (typeof process.env.COUCH_LOG !== 'undefined') {
opts.log = path.normalize(process.env.COUCH_LOG);
}
// if we are instructed to resume
if (typeof process.env.COUCH_RESUME !== 'undefined' && process.env.COUCH_RESUME === 'true') {
opts.resume = true;
}
// if we are given an output filename
if (typeof process.env.COUCH_OUTPUT !== 'undefined') {
opts.output = path.normalize(process.env.COUCH_OUTPUT);
}
// if we only want a shallow copy
if (typeof process.env.COUCH_MODE !== 'undefined' && process.env.COUCH_MODE === 'shallow') {
opts.mode = 'shallow';
}
// if we are instructed to be quiet
if (typeof process.env.COUCH_QUIET !== 'undefined' && process.env.COUCH_QUIET === 'true') {
opts.quiet = true;
}
// if we have a specified API key
if (typeof process.env.CLOUDANT_IAM_API_KEY !== 'undefined') {
opts.iamApiKey = process.env.CLOUDANT_IAM_API_KEY;
}
// if we have a specified IAM token endpoint
if (typeof process.env.CLOUDANT_IAM_TOKEN_URL !== 'undefined') {
opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
module.exports = {
apiDefaults: apiDefaults,
cliDefaults: cliDefaults,
applyEnvironmentVariables: applyEnvironmentVariables
};

@@ -0,1 +1,2 @@

#!/usr/bin/env node
// Copyright © 2017, 2021 IBM Corp. All rights reserved.

@@ -16,99 +17,66 @@ //

// fatal errors
const codes = {
Error: 1,
InvalidOption: 2,
DatabaseNotFound: 10,
Unauthorized: 11,
Forbidden: 12,
DatabaseNotEmpty: 13,
NoLogFileName: 20,
LogDoesNotExist: 21,
IncompleteChangesInLogFile: 22,
SpoolChangesError: 30,
HTTPFatalError: 40,
BulkGetError: 50
};
const error = require('../includes/error.js');
const fs = require('fs');
const cliutils = require('../includes/cliutils.js');
const couchbackup = require('../app.js');
const parser = require('../includes/parser.js');
const debug = require('debug');
const backupDebug = debug('couchbackup:backup');
const backupBatchDebug = debug('couchbackup:backup:batch');
class BackupError extends Error {
constructor(name, message) {
super(message);
this.name = name;
}
}
backupDebug.enabled = true;
class HTTPError extends BackupError {
constructor(responseError, name) {
// Special case some names for more useful error messages
switch (responseError.status) {
case 401:
name = 'Unauthorized';
break;
case 403:
name = 'Forbidden';
break;
default:
name = name || 'HTTPFatalError';
}
super(name, responseError.message);
}
}
try {
const program = parser.parseBackupArgs();
const databaseUrl = cliutils.databaseUrl(program.url, program.db);
// Default function to return an error for HTTP status codes
// < 400 -> OK
// 4XX (except 429) -> Fatal
// 429 & >=500 -> Transient
function checkResponse(err) {
if (err) {
// Construct an HTTPError if there is request information on the error
// Codes < 400 are considered OK
if (err.status >= 400) {
return new HTTPError(err);
} else {
// Send it back again if there was no status code, e.g. a cxn error
return augmentMessage(err);
}
}
}
const opts = {
bufferSize: program.bufferSize,
log: program.log,
mode: program.mode,
parallelism: program.parallelism,
requestTimeout: program.requestTimeout,
resume: program.resume,
iamApiKey: program.iamApiKey,
iamTokenUrl: program.iamTokenUrl
};
function convertResponseError(responseError, errorFactory) {
if (!errorFactory) {
errorFactory = checkResponse;
}
return errorFactory(responseError);
}
// log configuration to console
console.error('='.repeat(80));
console.error('Performing backup on ' + databaseUrl.replace(/\/\/.+@/g, '//****:****@') + ' using configuration:');
console.error(JSON.stringify(opts, null, 2).replace(/"iamApiKey": "[^"]+"/, '"iamApiKey": "****"'));
console.error('='.repeat(80));
function augmentMessage(err) {
// For errors that don't have a status code, we are likely looking at a cxn
// error.
// Try to augment the message with more detail (core puts the code in statusText)
if (err && err.statusText) {
err.message = `${err.message} ${err.statusText}`;
}
if (err && err.description) {
err.message = `${err.message} ${err.description}`;
}
return err;
}
backupBatchDebug.enabled = !program.quiet;
function wrapPossibleInvalidUrlError(err) {
if (err.code === 'ERR_INVALID_URL') {
// Wrap ERR_INVALID_URL in our own InvalidOption
return new BackupError('InvalidOption', err.message);
}
return err;
}
let ws = process.stdout;
module.exports = {
BackupError,
HTTPError,
wrapPossibleInvalidUrlError,
convertResponseError,
terminationCallback: function terminationCallback(err, data) {
if (err) {
console.error(`ERROR: ${err.message}`);
process.exitCode = codes[err.name] || 1;
process.exit();
// open output file
if (program.output) {
let flags = 'w';
if (program.log && program.resume) {
flags = 'a';
}
const fd = fs.openSync(program.output, flags);
ws = fs.createWriteStream(null, { fd });
}
};
backupDebug('Fetching all database changes...');
return couchbackup.backup(
databaseUrl,
ws,
opts,
error.terminationCallback
).on('changes', function(batch) {
backupBatchDebug('Total batches received:', batch + 1);
}).on('written', function(obj) {
backupBatchDebug('Written batch ID:', obj.batch, 'Total document revisions written:', obj.total, 'Time:', obj.time);
}).on('error', function(e) {
backupDebug('ERROR', e);
}).on('finished', function(obj) {
backupDebug('Finished - Total document revisions written:', obj.total);
});
} catch (err) {
error.terminationCallback(err);
}

@@ -1,111 +0,424 @@

// Copyright © 2017, 2022 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'use strict';
const fs = require('fs');
const liner = require('./liner.js');
const change = require('./change.js');
const error = require('./error.js');
const debug = require('debug')('couchbackup:spoolchanges');
/**
* Write log file for all changes from a database, ready for downloading
* in batches.
*
* @param {string} dbUrl - URL of database
* @param {string} log - path to log file to use
* @param {number} bufferSize - the number of changes per batch/log line
* @param {function(err)} callback - a callback to run on completion
*/
module.exports = function(db, log, bufferSize, ee, callback) {
// list of document ids to process
const buffer = [];
let batch = 0;
let lastSeq = null;
const logStream = fs.createWriteStream(log);
let pending = 0;
// The number of changes to fetch per request
const limit = 100000;
// send documents ids to the queue in batches of bufferSize + the last batch
const processBuffer = function(lastOne) {
if (buffer.length >= bufferSize || (lastOne && buffer.length > 0)) {
debug('writing', buffer.length, 'changes to the backup file');
const b = { docs: buffer.splice(0, bufferSize), batch: batch };
logStream.write(':t batch' + batch + ' ' + JSON.stringify(b.docs) + '\n');
ee.emit('changes', batch);
batch++;
}
};
// called once per received change
const onChange = function(c) {
if (c) {
if (c.error) {
ee.emit('error', new error.BackupError('InvalidChange', `Received invalid change: ${c}`));
} else if (c.changes) {
const obj = { id: c.id };
buffer.push(obj);
processBuffer(false);
} else if (c.last_seq) {
lastSeq = c.last_seq;
pending = c.pending;
}
}
};
function getChanges(since = 0) {
debug('making changes request since ' + since);
return db.service.postChangesAsStream({ db: db.db, since: since, limit: limit, seqInterval: limit })
.then(response => {
response.result.pipe(liner())
.on('error', function(err) {
logStream.end();
callback(err);
})
.pipe(change(onChange))
.on('error', function(err) {
logStream.end();
callback(err);
})
.on('finish', function() {
processBuffer(true);
if (!lastSeq) {
logStream.end();
debug('changes request terminated before last_seq was sent');
callback(new error.BackupError('SpoolChangesError', 'Changes request terminated before last_seq was sent'));
} else {
debug(`changes request completed with last_seq: ${lastSeq} and ${pending} changes pending.`);
if (pending > 0) {
// Return the next promise
return getChanges(lastSeq);
} else {
debug('finished streaming database changes');
logStream.end(':changes_complete ' + lastSeq + '\n', 'utf8', callback);
}
}
});
})
.catch(err => {
logStream.end();
if (err.status && err.status >= 400) {
callback(error.convertResponseError(err));
} else if (err.name !== 'SpoolChangesError') {
callback(new error.BackupError('SpoolChangesError', `Failed changes request - ${err.message}`));
}
});
}
getChanges();
};
<testsuites name="test">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:21" time="0.091">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.023">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid buffer size type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid log type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid log type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode string" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid output type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid output type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid parallelism type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float parallelism" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid request timeout type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero request timeout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float request timout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid resume type" time="0.003">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid key type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.008">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for resume arg in shallow mode" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for parallelism arg in shallow mode" time="0.004">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:21" time="4.46">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.928">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.809">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.619">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:26" time="10.354">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="2.598">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.45">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.514">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:36" time="5.885">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.502">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.399">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.191">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:42" time="14.079">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.794">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.654">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.831">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:56" time="5.409">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.912">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.524">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.183">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:00:02" time="8.018">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.666">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.723">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.813">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:00:10" time="307.545">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.173">
</testcase>
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="304.607">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:05:17" time="514.336">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.782">
</testcase>
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="510.632">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:51" time="2.959">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.691">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:54" time="0.282">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.011">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:55" time="2.312">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.87">
</testcase>
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.897">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:57" time="5.215">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.874">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.75">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.774">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:02" time="7.167">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.526">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.39">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.418">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:09" time="34.293">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="16.848">
</testcase>
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.895">
</testcase>
</testsuite>
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:44" time="0.009">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_DATABASE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_BUFFER_SIZE env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_PARALLELISM env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_REQUEST_TIMEOUT env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_API_KEY env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_TOKEN_URL env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_LOG env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_RESUME env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_OUTPUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_MODE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:44" time="0.079">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.009">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.003">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.014">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on NoLogFileName" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on LogDoesNotExist" time="0">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on IncompleteChangesInLogFile" time="0.007">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.012">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.012">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:44" time="0.113">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.007">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.011">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on RestoreDatabaseNotFound" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.003">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.003">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.009">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.036">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.022">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.011">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:44" time="5.195">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.513">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.54">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.54">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.472">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.564">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.495">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.482">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.485">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.561">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.537">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:49" time="5.025">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.488">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.53">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.529">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.538">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.491">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.498">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.579">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.808">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.557">
</testcase>
</testsuite>
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:54" time="0.002">
<testcase classname="test.#unit Fetching batches from a log file" name="should fetch multiple batches correctly" time="0.002">
</testcase>
</testsuite>
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:54" time="0.002">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:54" time="0.037">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.014">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --db command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --iam-api-key command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode full command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode shallow command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --quiet command-line parameter" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:54" time="0.011">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --quiet command-line parameter" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:54" time="0.004">
<testcase classname="test.#unit Check request headers" name="should have a couchbackup user-agent" time="0.004">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:54" time="10.596">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.003">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.015">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.018">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.016">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with fatal error for 404 response" time="0.005">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with same error for no status code error response" time="2.014">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.509">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.01">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:15:05" time="0.011">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.011">
</testcase>
</testsuite>
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:15:05" time="0.563">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.019">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.524">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.02">
</testcase>
</testsuite>
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:15:05" time="15.191">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.012">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.015">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="1.912">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="9.246">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:15:21" time="42.286">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="42.012">
</testcase>
</testsuite>
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:16:03" time="4.122">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.024">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should terminate on a fatal error" time="0.008">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.037">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.017">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.022">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should get a batch error for non-empty array response with new_edits false" time="0.008">
</testcase>
</testsuite>
</testsuites>

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//

@@ -14,152 +14,178 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
// Small script which backs up a Cloudant or CouchDB database to an S3
// bucket, using an intermediary file on disk.
//
// The script generates the backup object name by combining together the path
// part of the database URL and the current time.
'use strict';
const async = require('async');
const stream = require('stream');
const error = require('./error.js');
const debug = require('debug')('couchbackup:writer');
const fs = require('fs');
const url = require('url');
module.exports = function(db, bufferSize, parallelism, ee) {
const writer = new stream.Transform({ objectMode: true });
let buffer = [];
let written = 0;
let linenumber = 0;
const AWS = require('aws-sdk');
const couchbackup = require('@cloudant/couchbackup');
const debug = require('debug')('s3-backup');
const tmp = require('tmp');
const VError = require('verror').VError;
// this is the queue of chunks that are written to the database
// the queue's payload will be an array of documents to be written,
// the size of the array will be bufferSize. The variable parallelism
// determines how many HTTP requests will occur at any one time.
const q = async.queue(function(payload, cb) {
// if we are restoring known revisions, we need to supply new_edits=false
if (payload.docs && payload.docs[0] && payload.docs[0]._rev) {
payload.new_edits = false;
debug('Using new_edits false mode.');
}
/*
Main function, run from base of file.
*/
function main() {
const argv = require('yargs')
.usage('Usage: $0 [options]')
.example('$0 -s https://user:pass@host/db -b <bucket>', 'Backup db to bucket')
.options({
source: { alias: 's', nargs: 1, demandOption: true, describe: 'Source database URL' },
bucket: { alias: 'b', nargs: 1, demandOption: true, describe: 'Destination bucket' },
prefix: { alias: 'p', nargs: 1, describe: 'Prefix for backup object key', default: 'couchbackup' },
s3url: { nargs: 1, describe: 'S3 endpoint URL' },
awsprofile: { nargs: 1, describe: 'The profile section to use in the ~/.aws/credentials file', default: 'default' }
})
.help('h').alias('h', 'help')
.epilog('Copyright (C) IBM 2017')
.argv;
if (!didError) {
db.service.postBulkDocs({
db: db.db,
bulkDocs: payload
}).then(response => {
if (!response.result || (payload.new_edits === false && response.result.length > 0)) {
throw new Error(`Error writing batch with new_edits:${payload.new_edits !== false}` +
` and ${response.result ? response.result.length : 'unavailable'} items`);
}
written += payload.docs.length;
writer.emit('restored', { documents: payload.docs.length, total: written });
cb();
}).catch(err => {
err = error.convertResponseError(err);
debug(`Error writing docs ${err.name} ${err.message}`);
cb(err, payload);
});
}
}, parallelism);
const sourceUrl = argv.source;
const backupBucket = argv.bucket;
const backupName = new url.URL(sourceUrl).pathname.split('/').filter(function(x) { return x; }).join('-');
const backupKeyPrefix = `${argv.prefix}-${backupName}`;
let didError = false;
const backupKey = `${backupKeyPrefix}-${new Date().toISOString()}`;
const backupTmpFile = tmp.fileSync();
// write the contents of the buffer to CouchDB in blocks of bufferSize
function processBuffer(flush, callback) {
function taskCallback(err, payload) {
if (err && !didError) {
debug(`Queue task failed with error ${err.name}`);
didError = true;
q.kill();
writer.emit('error', err);
}
}
const s3Endpoint = argv.s3url;
const awsProfile = argv.awsprofile;
if (flush || buffer.length >= bufferSize) {
// work through the buffer to break off bufferSize chunks
// and feed the chunks to the queue
do {
// split the buffer into bufferSize chunks
const toSend = buffer.splice(0, bufferSize);
// Creds are from ~/.aws/credentials, environment etc. (see S3 docs).
const awsOpts = {
signatureVersion: 'v4',
credentials: new AWS.SharedIniFileCredentials({ profile: awsProfile })
};
if (typeof s3Endpoint !== 'undefined') {
awsOpts.endpoint = new AWS.Endpoint(s3Endpoint);
}
const s3 = new AWS.S3(awsOpts);
// and add the chunk to the queue
debug(`Adding ${toSend.length} to the write queue.`);
q.push({ docs: toSend }, taskCallback);
} while (buffer.length >= bufferSize);
debug(`Creating a new backup of ${s(sourceUrl)} at ${backupBucket}/${backupKey}...`);
bucketAccessible(s3, backupBucket)
.then(() => {
return createBackupFile(sourceUrl, backupTmpFile.name);
})
.then(() => {
return uploadNewBackup(s3, backupTmpFile.name, backupBucket, backupKey);
})
.then(() => {
debug('Backup successful!');
backupTmpFile.removeCallback();
debug('done.');
})
.catch((reason) => {
debug(`Error: ${reason}`);
});
}
// send any leftover documents to the queue
if (flush && buffer.length > 0) {
debug(`Adding remaining ${buffer.length} to the write queue.`);
q.push({ docs: buffer }, taskCallback);
/**
* Return a promise that resolves if the bucket is available and
* rejects if not.
*
* @param {any} s3 S3 client object
* @param {any} bucketName Bucket name
* @returns Promise
*/
function bucketAccessible(s3, bucketName) {
return new Promise(function(resolve, reject) {
const params = {
Bucket: bucketName
};
s3.headBucket(params, function(err, data) {
if (err) {
reject(new VError(err, 'S3 bucket not accessible'));
} else {
resolve();
}
});
});
}
// wait until the queue size falls to a reasonable level
async.until(
// wait until the queue length drops to twice the paralellism
// or until empty on the last write
function(callback) {
// if we encountered an error, stop this until loop
if (didError) {
return callback(null, true);
}
if (flush) {
callback(null, q.idle() && q.length() === 0);
} else {
callback(null, q.length() <= parallelism * 2);
}
},
function(cb) {
setTimeout(cb, 20);
},
/**
* Use couchbackup to create a backup of the specified database to a file path.
*
* @param {any} sourceUrl Database URL
* @param {any} backupTmpFilePath Path to write file
* @returns Promise
*/
function createBackupFile(sourceUrl, backupTmpFilePath) {
return new Promise((resolve, reject) => {
couchbackup.backup(
sourceUrl,
fs.createWriteStream(backupTmpFilePath),
(err) => {
if (err) {
return reject(new VError(err, 'CouchBackup process failed'));
}
debug('couchbackup to file done; uploading to S3');
resolve('creating backup file complete');
}
);
});
}
function() {
if (flush && !didError) {
writer.emit('finished', { total: written });
}
// callback when we're happy with the queue size
callback();
});
} else {
callback();
}
}
/**
* Upload a backup file to an S3 bucket.
*
* @param {any} s3 Object store client
* @param {any} backupTmpFilePath Path of backup file to write.
* @param {any} bucket Object store bucket name
* @param {any} key Object store key name
* @returns Promise
*/
function uploadNewBackup(s3, backupTmpFilePath, bucket, key) {
return new Promise((resolve, reject) => {
debug(`Uploading from ${backupTmpFilePath} to ${bucket}/${key}`);
// take an object
writer._transform = function(obj, encoding, done) {
// each obj that arrives here is a line from the backup file
// it should contain an array of objects. The length of the array
// depends on the bufferSize at backup time.
linenumber++;
if (!didError && obj !== '') {
// see if it parses as JSON
try {
const arr = JSON.parse(obj);
function uploadFromStream(s3, bucket, key) {
const pass = new stream.PassThrough();
// if it's an array with a length
if (typeof arr === 'object' && arr.length > 0) {
// push each document into a buffer
buffer = buffer.concat(arr);
const params = {
Bucket: bucket,
Key: key,
Body: pass
};
s3.upload(params, function(err, data) {
debug('S3 upload done');
if (err) {
debug(err);
reject(new VError(err, 'Upload failed'));
return;
}
debug('Upload succeeded');
debug(data);
resolve();
}).httpUploadProgress = (progress) => {
debug(`S3 upload progress: ${progress}`);
};
// pause the stream
// it's likely that the speed with which data can be read from disk
// may exceed the rate it can be written to CouchDB. To prevent
// the whole file being buffered in memory, we pause the stream here.
// it is resumed, when processBuffer calls back and we call done()
this.pause();
// break the buffer in to bufferSize chunks to be written to the database
processBuffer(false, done);
} else {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - not an array`));
done();
}
} catch (e) {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - cannot parse as JSON`));
// Could be an incomplete write that was subsequently resumed
done();
}
} else {
done();
return pass;
}
};
// called when we need to flush everything
writer._flush = function(done) {
processBuffer(true, done);
};
return writer;
};
const inputStream = fs.createReadStream(backupTmpFilePath);
const s3Stream = uploadFromStream(s3, bucket, key);
inputStream.pipe(s3Stream);
});
}
/**
* Remove creds from a URL, e.g., before logging
*
* @param {string} url URL to safen
*/
function s(originalUrl) {
const parts = new url.URL(originalUrl);
return url.format(parts, { auth: false });
}
main();

@@ -1,60 +0,424 @@

#!/usr/bin/env node
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'use strict';
const error = require('../includes/error.js');
const cliutils = require('../includes/cliutils.js');
const couchbackup = require('../app.js');
const parser = require('../includes/parser.js');
const debug = require('debug');
const restoreDebug = debug('couchbackup:restore');
const restoreBatchDebug = debug('couchbackup:restore:batch');
restoreDebug.enabled = true;
try {
const program = parser.parseRestoreArgs();
const databaseUrl = cliutils.databaseUrl(program.url, program.db);
const opts = {
bufferSize: program.bufferSize,
parallelism: program.parallelism,
requestTimeout: program.requestTimeout,
iamApiKey: program.iamApiKey,
iamTokenUrl: program.iamTokenUrl
};
// log configuration to console
console.error('='.repeat(80));
console.error('Performing restore on ' + databaseUrl.replace(/\/\/.+@/g, '//****:****@') + ' using configuration:');
console.error(JSON.stringify(opts, null, 2).replace(/"iamApiKey": "[^"]+"/, '"iamApiKey": "****"'));
console.error('='.repeat(80));
restoreBatchDebug.enabled = !program.quiet;
return couchbackup.restore(
process.stdin, // restore from stdin
databaseUrl,
opts,
error.terminationCallback
).on('restored', function(obj) {
restoreBatchDebug('restored', obj.total);
}).on('error', function(e) {
restoreDebug('ERROR', e);
}).on('finished', function(obj) {
restoreDebug('finished', obj);
});
} catch (err) {
error.terminationCallback(err);
}
<testsuites name="test">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:22" time="0.092">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.002">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.024">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid buffer size type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero buffer size" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float buffer size" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.007">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid log type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid log type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode string" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid output type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid output type" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid parallelism type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float parallelism" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid request timeout type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero request timeout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float request timout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid resume type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid key type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for resume arg in shallow mode" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for parallelism arg in shallow mode" time="0.004">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:22" time="4.481">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.948">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.809">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.613">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:26" time="10.247">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="2.547">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.416">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.502">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:36" time="5.281">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.308">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.167">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.021">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:42" time="13.086">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.454">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.225">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.605">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T03:59:55" time="5.367">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.904">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.526">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.151">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:00:00" time="6.937">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.259">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.316">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.572">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:00:07" time="303.027">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.189">
</testcase>
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="300.053">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:05:10" time="474.031">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.593">
</testcase>
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="470.651">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:04" time="2.636">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.366">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:07" time="0.285">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.012">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:07" time="2.282">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.875">
</testcase>
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.873">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:09" time="5.167">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.87">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.76">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.733">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:15" time="6.456">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.309">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.182">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.162">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:21" time="31.707">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="15.619">
</testcase>
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="15.551">
</testcase>
</testsuite>
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:53" time="0.009">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_DATABASE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_BUFFER_SIZE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_PARALLELISM env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_REQUEST_TIMEOUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_API_KEY env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_TOKEN_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_LOG env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_RESUME env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_OUTPUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_MODE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:53" time="0.083">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.021">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on NoLogFileName" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on LogDoesNotExist" time="0">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on IncompleteChangesInLogFile" time="0.009">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.012">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.012">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:53" time="0.13">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.006">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.011">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on RestoreDatabaseNotFound" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.003">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.022">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.028">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.036">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.009">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:53" time="3.154">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.309">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.336">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.299">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.307">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.368">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.258">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.256">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.336">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.35">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.327">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:56" time="2.91">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.287">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.349">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.285">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.299">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.293">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.321">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.36">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.348">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.359">
</testcase>
</testsuite>
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:59" time="0.002">
<testcase classname="test.#unit Fetching batches from a log file" name="should fetch multiple batches correctly" time="0.002">
</testcase>
</testsuite>
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:59" time="0.001">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:59" time="0.039">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.016">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --db command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --request-timeout command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --iam-api-key command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode full command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode shallow command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --quiet command-line parameter" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:59" time="0.011">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --quiet command-line parameter" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:59" time="0.004">
<testcase classname="test.#unit Check request headers" name="should have a couchbackup user-agent" time="0.004">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:13:59" time="10.589">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.003">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.015">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.018">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.014">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with fatal error for 404 response" time="0.005">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with same error for no status code error response" time="2.013">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.509">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.008">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:10" time="0.015">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.01">
</testcase>
</testsuite>
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:10" time="0.557">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.016">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.522">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.015">
</testcase>
</testsuite>
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:10" time="14.606">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.014">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.016">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="1.59">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="8.984">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:14:25" time="37.213">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="36.943">
</testcase>
</testsuite>
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2024-01-03T04:15:02" time="4.096">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.023">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should terminate on a fatal error" time="0.007">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.018">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.014">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.023">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should get a batch error for non-empty array response with new_edits false" time="0.007">
</testcase>
</testsuite>
</testsuites>
{
"name": "@cloudant/couchbackup",
"version": "2.9.16-SNAPSHOT.181",
"version": "2.9.16-SNAPSHOT.182",
"description": "CouchBackup - command-line backup utility for Cloudant/CouchDB",

@@ -23,3 +23,3 @@ "homepage": "https://github.com/IBM/couchbackup",

"dependencies": {
"@ibm-cloud/cloudant": "0.8.0",
"@ibm-cloud/cloudant": "0.8.1",
"async": "3.2.5",

@@ -26,0 +26,0 @@ "commander": "11.1.0",

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc