Socket
Socket
Sign inDemoInstall

@cloudant/couchbackup

Package Overview
Dependencies
Maintainers
6
Versions
479
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@cloudant/couchbackup - npm Package Compare versions

Comparing version 2.9.15-SNAPSHOT.174 to 2.9.15-SNAPSHOT.175

.scannerwork/scanner-report/changesets-11.pb

4

.scannerwork/report-task.txt

@@ -6,3 +6,3 @@ projectKey=couchbackup

dashboardUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/dashboard?id=couchbackup&branch=main
ceTaskId=AYwTpQVIaQv5QKkt-XEG
ceTaskUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/api/ce/task?id=AYwTpQVIaQv5QKkt-XEG
ceTaskId=AYwbxp37aQv5QKkt-XIL
ceTaskUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/api/ce/task?id=AYwbxp37aQv5QKkt-XIL

@@ -1,102 +0,190 @@

<testsuites name="test-iam">
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:10" time="4.767">
<testcase classname="test-iam.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="1.094">
</testcase>
<testcase classname="test-iam.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.838">
</testcase>
<testcase classname="test-iam.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.709">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:14" time="11.301">
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="3.14">
</testcase>
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.703">
</testcase>
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.657">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:26" time="5.262">
<testcase classname="test-iam.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.24">
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.172">
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.056">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:31" time="12.7">
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.298">
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.298">
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.315">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:44" time="5.283">
<testcase classname="test-iam.Compression tests using API" name="should backup animaldb to a compressed file" time="0.837">
</testcase>
<testcase classname="test-iam.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.473">
</testcase>
<testcase classname="test-iam.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.189">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:49" time="7.112">
<testcase classname="test-iam.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.288">
</testcase>
<testcase classname="test-iam.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.246">
</testcase>
<testcase classname="test-iam.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.787">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:56" time="310.728">
<testcase classname="test-iam.End to end backup and restore using API" name="should backup and restore animaldb" time="2.146">
</testcase>
<testcase classname="test-iam.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="307.776">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:46:07" time="489.886">
<testcase classname="test-iam.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.899">
</testcase>
<testcase classname="test-iam.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="486.209">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:54:17" time="2.827">
<testcase classname="test-iam.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.554">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:54:20" time="0.278">
<testcase classname="test-iam.Write error tests" name="calls callback with error set when stream is not writeable" time="0.013">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:54:20" time="2.537">
<testcase classname="test-iam.Event tests" name="should get a finished event when using stdout" time="1.059">
</testcase>
<testcase classname="test-iam.Event tests" name="should get a finished event when using file output" time="0.913">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:54:22" time="5.149">
<testcase classname="test-iam.Resume tests using API" name="should create a log file" time="0.87">
</testcase>
<testcase classname="test-iam.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.747">
</testcase>
<testcase classname="test-iam.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.735">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:54:28" time="6.837">
<testcase classname="test-iam.Resume tests using CLI" name="should create a log file" time="1.322">
</testcase>
<testcase classname="test-iam.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.523">
</testcase>
<testcase classname="test-iam.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.194">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:54:34" time="32.893">
<testcase classname="test-iam.Resume tests" name="should correctly backup and restore backup10m" time="16.278">
</testcase>
<testcase classname="test-iam.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.08">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:07" time="56.987">
<testcase classname="test-iam.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="56.708">
</testcase>
</testsuite>
</testsuites>
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Small script which backs up a Cloudant or CouchDB database to an S3
// bucket, using an intermediary file on disk.
//
// The script generates the backup object name by combining together the path
// part of the database URL and the current time.
'use strict';
const stream = require('stream');
const fs = require('fs');
const url = require('url');
const AWS = require('aws-sdk');
const couchbackup = require('@cloudant/couchbackup');
const debug = require('debug')('s3-backup');
const tmp = require('tmp');
const VError = require('verror').VError;
/*
Main function, run from base of file.
*/
function main() {
const argv = require('yargs')
.usage('Usage: $0 [options]')
.example('$0 -s https://user:pass@host/db -b <bucket>', 'Backup db to bucket')
.options({
source: { alias: 's', nargs: 1, demandOption: true, describe: 'Source database URL' },
bucket: { alias: 'b', nargs: 1, demandOption: true, describe: 'Destination bucket' },
prefix: { alias: 'p', nargs: 1, describe: 'Prefix for backup object key', default: 'couchbackup' },
s3url: { nargs: 1, describe: 'S3 endpoint URL' },
awsprofile: { nargs: 1, describe: 'The profile section to use in the ~/.aws/credentials file', default: 'default' }
})
.help('h').alias('h', 'help')
.epilog('Copyright (C) IBM 2017')
.argv;
const sourceUrl = argv.source;
const backupBucket = argv.bucket;
const backupName = new url.URL(sourceUrl).pathname.split('/').filter(function(x) { return x; }).join('-');
const backupKeyPrefix = `${argv.prefix}-${backupName}`;
const backupKey = `${backupKeyPrefix}-${new Date().toISOString()}`;
const backupTmpFile = tmp.fileSync();
const s3Endpoint = argv.s3url;
const awsProfile = argv.awsprofile;
// Creds are from ~/.aws/credentials, environment etc. (see S3 docs).
const awsOpts = {
signatureVersion: 'v4',
credentials: new AWS.SharedIniFileCredentials({ profile: awsProfile })
};
if (typeof s3Endpoint !== 'undefined') {
awsOpts.endpoint = new AWS.Endpoint(s3Endpoint);
}
const s3 = new AWS.S3(awsOpts);
debug(`Creating a new backup of ${s(sourceUrl)} at ${backupBucket}/${backupKey}...`);
bucketAccessible(s3, backupBucket)
.then(() => {
return createBackupFile(sourceUrl, backupTmpFile.name);
})
.then(() => {
return uploadNewBackup(s3, backupTmpFile.name, backupBucket, backupKey);
})
.then(() => {
debug('Backup successful!');
backupTmpFile.removeCallback();
debug('done.');
})
.catch((reason) => {
debug(`Error: ${reason}`);
});
}
/**
* Return a promise that resolves if the bucket is available and
* rejects if not.
*
* @param {any} s3 S3 client object
* @param {any} bucketName Bucket name
* @returns Promise
*/
function bucketAccessible(s3, bucketName) {
return new Promise(function(resolve, reject) {
const params = {
Bucket: bucketName
};
s3.headBucket(params, function(err, data) {
if (err) {
reject(new VError(err, 'S3 bucket not accessible'));
} else {
resolve();
}
});
});
}
/**
* Use couchbackup to create a backup of the specified database to a file path.
*
* @param {any} sourceUrl Database URL
* @param {any} backupTmpFilePath Path to write file
* @returns Promise
*/
function createBackupFile(sourceUrl, backupTmpFilePath) {
return new Promise((resolve, reject) => {
couchbackup.backup(
sourceUrl,
fs.createWriteStream(backupTmpFilePath),
(err) => {
if (err) {
return reject(new VError(err, 'CouchBackup process failed'));
}
debug('couchbackup to file done; uploading to S3');
resolve('creating backup file complete');
}
);
});
}
/**
* Upload a backup file to an S3 bucket.
*
* @param {any} s3 Object store client
* @param {any} backupTmpFilePath Path of backup file to write.
* @param {any} bucket Object store bucket name
* @param {any} key Object store key name
* @returns Promise
*/
function uploadNewBackup(s3, backupTmpFilePath, bucket, key) {
return new Promise((resolve, reject) => {
debug(`Uploading from ${backupTmpFilePath} to ${bucket}/${key}`);
function uploadFromStream(s3, bucket, key) {
const pass = new stream.PassThrough();
const params = {
Bucket: bucket,
Key: key,
Body: pass
};
s3.upload(params, function(err, data) {
debug('S3 upload done');
if (err) {
debug(err);
reject(new VError(err, 'Upload failed'));
return;
}
debug('Upload succeeded');
debug(data);
resolve();
}).httpUploadProgress = (progress) => {
debug(`S3 upload progress: ${progress}`);
};
return pass;
}
const inputStream = fs.createReadStream(backupTmpFilePath);
const s3Stream = uploadFromStream(s3, bucket, key);
inputStream.pipe(s3Stream);
});
}
/**
* Remove creds from a URL, e.g., before logging
*
* @param {string} url URL to safen
*/
function s(originalUrl) {
const parts = new url.URL(originalUrl);
return url.format(parts, { auth: false });
}
main();

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -15,436 +15,33 @@ // Licensed under the Apache License, Version 2.0 (the "License");

/* global */
/* global after before describe */
'use strict';
const assert = require('node:assert');
const { once } = require('node:events');
const fs = require('node:fs');
const { PassThrough } = require('node:stream');
const { pipeline } = require('node:stream/promises');
const { createGzip, createGunzip } = require('node:zlib');
const debug = require('debug');
const { Tail } = require('tail');
const app = require('../app.js');
const dbUrl = require('../includes/cliutils.js').databaseUrl;
const compare = require('./compare.js');
const request = require('../includes/request.js');
const { cliBackup, cliDecrypt, cliEncrypt, cliGzip, cliGunzip, cliRestore } = require('./test_process.js');
const testLogger = debug('couchbackup:test:utils');
// Import the common hooks
require('../test/hooks.js');
function scenario(test, params) {
return `${test} ${(params.useApi) ? 'using API' : 'using CLI'}`;
}
const poisons = [
'normal',
'bandwidth-limit',
'latency',
'slow-read',
'rate-limit'
];
function params() {
const p = {};
for (let i = 0; i < arguments.length; i++) {
Object.assign(p, arguments[i]);
}
return p;
}
poisons.forEach(function(poison) {
describe('unreliable network tests (using poison ' + poison + ')', function() {
before('start server', function() {
// Returns the event emitter for API calls, or the child process for CLI calls
async function testBackup(params, databaseName, outputStream) {
const pipelineStreams = [];
const promises = [];
// Configure API key if needed
augmentParamsWithApiKey(params);
let backup;
let backupStream;
let backupPromise;
let tail;
if (params.useApi) {
if (params.useStdOut) {
backupStream = outputStream;
} else {
backupStream = new PassThrough();
}
const backupCallbackPromise = new Promise((resolve, reject) => {
backup = app.backup(
dbUrl(process.env.COUCH_URL, databaseName),
backupStream,
params.opts,
(err, data) => {
if (err) {
testLogger(`API backup callback with ${JSON.stringify(err)}, will reject.`);
reject(err);
} else {
testLogger(`API backup callback with ${JSON.stringify(data)}, will resolve.`);
resolve(data);
}
});
// **************************
// Currently these tests do nothing
// pending resolution of https://github.com/IBM/couchbackup/issues/360
// to add a new toxic server
// **************************
});
const backupFinshedPromise = once(backup, 'finished')
.then((summary) => {
testLogger(`Resolving API backup event promise with ${JSON.stringify(summary)}`);
if (params.resume) {
assertWrittenFewerThan(summary.total, params.exclusiveMaxExpected);
}
})
.catch((err) => {
testLogger(`Rejecting API backup event promise with error ${JSON.stringify(err)}`);
throw err;
});
backupPromise = Promise.all([backupCallbackPromise, backupFinshedPromise])
.then(() => {
testLogger('Both API backup promises resolved.');
});
} else {
backup = cliBackup(databaseName, params);
backupStream = backup.stream;
backupPromise = backup.childProcessPromise;
if (params.abort) {
// Create the log file for abort tests so we can tail it, other tests assert
// the log file is usually created normally by the backup process.
const f = fs.openSync(params.opts.log, 'w');
fs.closeSync(f);
// Use tail to watch the log file for a batch to be completed then abort
tail = new Tail(params.opts.log, { useWatchFile: true, fsWatchOptions: { interval: 500 }, follow: false });
tail.on('line', function(data) {
const matches = data.match(/:d batch\d+/);
if (matches !== null) {
// Turn off the tail.
tail.unwatch();
// Abort the backup
backup.childProcess.kill();
}
});
}
if (params.resume) {
const listenerPromise = new Promise((resolve, reject) => {
const listener = function(data) {
const matches = data.toString().match(/.*Finished - Total document revisions written: (\d+).*/);
if (matches !== null) {
try {
assertWrittenFewerThan(matches[1], params.exclusiveMaxExpected);
resolve();
} catch (err) {
reject(err);
}
process.stderr.removeListener('data', listener);
}
};
backup.childProcess.stderr.on('data', listener);
});
promises.push(listenerPromise);
}
}
promises.push(backupPromise);
if (!params.useStdOut) {
pipelineStreams.push(backupStream);
}
if (params.compression) {
if (params.useApi) {
pipelineStreams.push(createGzip());
} else {
const gzipProcess = cliGzip();
pipelineStreams.push(gzipProcess.stream);
promises.push(gzipProcess.childProcessPromise);
}
}
// Pipe via encryption if requested
if (params.encryption) {
if (params.useApi) {
// Currently only CLI support for testing encryption
return Promise.reject(new Error('Not implemented: cannot test encrypted API backups at this time.'));
} else {
const encryptProcess = cliEncrypt();
pipelineStreams.push(encryptProcess.stream);
promises.push(encryptProcess.childProcessPromise);
}
}
if (!params.useStdOut) {
// Finally add the outputStream to the list we want to pipeline
pipelineStreams.push(outputStream);
// Create the promisified pipeline and add it to the array of promises we'll wait for
promises.unshift(pipeline(pipelineStreams));
}
// Wait for the promises and then assert
return Promise.all(promises)
.then(() => testLogger('All backup promises resolved.'))
.then(() => {
if (params.expectedBackupError) {
return Promise.reject(new Error('Backup passed when it should have failed.'));
}
})
.catch((err) => {
if (params.expectedBackupError || params.abort) {
if (params.useApi) {
assert.strictEqual(err.name, params.expectedBackupError.name, 'The backup should receive the expected error.');
} else {
if (params.abort) {
// The tail should be stopped when we match a line and abort, but if
// something didn't work we need to make sure the tail is stopped
tail.unwatch();
// Assert that the process was aborted as expected
assert.strictEqual(err.signal, 'SIGTERM', `The backup should have terminated with SIGTERM, but was ${err.signal}.`);
} else if (params.expectedBackupError) {
assert.strictEqual(err.code, params.expectedBackupError.code, `The backup exited with unexpected code ${err.code} and signal ${err.signal}.`);
}
}
} else {
return Promise.reject(err);
}
after('stop server', function() {
});
}
async function testRestore(params, inputStream, databaseName) {
const pipelineStreams = [inputStream];
const promises = [];
// Configure API key if needed
augmentParamsWithApiKey(params);
let restore;
let restoreStream;
let restorePromise;
if (params.useApi) {
restoreStream = new PassThrough();
const restoreCallbackPromise = new Promise((resolve, reject) => {
restore = app.restore(
restoreStream,
dbUrl(process.env.COUCH_URL, databaseName),
params.opts,
(err, data) => {
if (err) {
testLogger(`API restore callback with ${err}, will reject.`);
reject(err);
} else {
resolve(data);
}
});
});
const restoreFinshedPromise = once(restore, 'finished')
.then((summary) => {
testLogger(`Resolving API restore promise with ${summary}`);
})
.catch((err) => {
testLogger(`Handling API restore error event ${JSON.stringify(err)}`);
if (params.expectedRestoreErrorRecoverable) {
testLogger(`Expecting restore error ${params.expectedRestoreErrorRecoverable.name}`);
assert.strictEqual(err.name, params.expectedRestoreErrorRecoverable.name, 'The restore should receive the expected recoverable error.');
} else {
testLogger(`API restore will reject by throwing error event ${JSON.stringify(err)}`);
return Promise.reject(err);
}
});
restorePromise = Promise.all([restoreCallbackPromise, restoreFinshedPromise]);
} else {
restore = cliRestore(databaseName, params);
restoreStream = restore.stream;
restorePromise = restore.childProcessPromise;
}
promises.push(restorePromise);
// Pipe via decompression if requested
if (params.compression) {
if (params.useApi) {
pipelineStreams.push(createGunzip());
} else {
const gunzipProcess = cliGunzip();
pipelineStreams.push(gunzipProcess.stream);
promises.push(gunzipProcess.childProcessPromise);
}
}
// Pipe via decryption if requested
if (params.encryption) {
if (params.useApi) {
// Currently only CLI support for testing encryption
return Promise.reject(new Error('Not implemented: cannot test encrypted API backups at this time.'));
} else {
const decryptProcess = cliDecrypt();
pipelineStreams.push(decryptProcess.stream);
promises.push(decryptProcess.childProcessPromise);
}
}
// pipeline everything into the restoreStream
pipelineStreams.push(restoreStream);
// Create the promisified pipeline and add it to the array of promises we'll wait for
promises.unshift(pipeline(pipelineStreams));
// Wait for the all the promises to settle and then assert based on the process promise
return Promise.allSettled(promises)
.then(() => { return restorePromise; })
.then((summary) => {
testLogger(`Restore promise resolved with ${summary}.`);
if (params.expectedRestoreError) {
return Promise.reject(new Error('Restore passed when it should have failed.'));
}
})
.catch((err) => {
testLogger(`Restore promise rejected with ${err}.`);
if (params.expectedRestoreError) {
if (params.useApi) {
assert.strictEqual(err.name, params.expectedRestoreError.name, 'The restore should receive the expected error.');
} else {
assert.strictEqual(err.code, params.expectedRestoreError.code, `The restore exited with unexpected code ${err.code} and signal ${err.signal}.`);
}
} else {
return Promise.reject(err);
}
});
}
// Serial backup and restore via a file on disk
async function testBackupAndRestoreViaFile(params, srcDb, backupFile, targetDb) {
return testBackupToFile(params, srcDb, backupFile).then(() => {
return testRestoreFromFile(params, backupFile, targetDb);
delete require.cache[require.resolve('../test/ci_e2e.js')];
require('../test/ci_e2e.js');
});
}
async function testBackupToFile(params, srcDb, backupFile) {
// Open the file for appending if this is a resume
const output = fs.createWriteStream(backupFile, { flags: (params.opts && params.opts.resume) ? 'a' : 'w' });
return once(output, 'open')
.then(() => {
return testBackup(params, srcDb, output);
});
}
async function testRestoreFromFile(params, backupFile, targetDb) {
const input = fs.createReadStream(backupFile);
return once(input, 'open')
.then(() => {
return testRestore(params, input, targetDb);
});
}
async function testDirectBackupAndRestore(params, srcDb, targetDb) {
// Allow a 64 MB highWaterMark for the passthrough during testing
const passthrough = new PassThrough({ highWaterMark: 67108864 });
const backup = testBackup(params, srcDb, passthrough);
const restore = testRestore(params, passthrough, targetDb);
return Promise.all([backup, restore]).then(() => {
return dbCompare(srcDb, targetDb);
});
}
async function testBackupAbortResumeRestore(params, srcDb, backupFile, targetDb) {
return Promise.resolve()
.then(() => {
// First backup with an abort
if (params.opts && params.opts.output) {
return testBackup(params, srcDb, new PassThrough());
} else {
return testBackupToFile(params, srcDb, backupFile);
}
}).then(() => {
// Remove the abort parameter and add the resume parameter
delete params.abort;
params.opts.resume = true;
// Resume the backup
if (params.opts && params.opts.output) {
return testBackup(params, srcDb, new PassThrough());
} else {
return testBackupToFile(params, srcDb, backupFile);
}
}).then(() => {
// Restore the backup
return testRestoreFromFile(params, backupFile, targetDb);
}).then(() => {
// Now compare the restored to the original for validation
return dbCompare(srcDb, targetDb);
});
}
async function dbCompare(db1Name, db2Name) {
const client = request.client(process.env.COUCH_BACKEND_URL, {});
return compare.compare(db1Name, db2Name, client.service)
.then(result => {
return assert.strictEqual(result, true, 'The database comparison should succeed, but failed');
});
}
function sortByIdThenRev(o1, o2) {
if (o1._id < o2._id) return -1;
if (o1._id > o2._id) return 1;
if (o1._rev < o2._rev) return -1;
if (o1._rev > o2._rev) return 1;
return 0;
}
function readSortAndDeepEqual(actualContentPath, expectedContentPath) {
const backupContent = JSON.parse(fs.readFileSync(actualContentPath, 'utf8'));
const expectedContent = JSON.parse(fs.readFileSync(expectedContentPath, 'utf8'));
// Array order of the docs is important for equality, but not for backup
backupContent.sort(sortByIdThenRev);
expectedContent.sort(sortByIdThenRev);
// Assert that the backup matches the expected
assert.deepStrictEqual(backupContent, expectedContent);
}
function setTimeout(context, timeout) {
// Increase timeout using TEST_TIMEOUT_MULTIPLIER
const multiplier = (typeof process.env.TEST_TIMEOUT_MULTIPLIER !== 'undefined') ? parseInt(process.env.TEST_TIMEOUT_MULTIPLIER) : 1;
timeout *= multiplier;
// Set the mocha timeout
context.timeout(timeout * 1000);
}
function assertGzipFile(path) {
// 1f 8b is the gzip magic number
const expectedBytes = Buffer.from([0x1f, 0x8b]);
const buffer = Buffer.alloc(2);
const fd = fs.openSync(path, 'r');
// Read the first two bytes
fs.readSync(fd, buffer, 0, 2, 0);
fs.closeSync(fd);
// Assert the magic number corresponds to gz extension
assert.deepStrictEqual(buffer, expectedBytes, 'The backup file should be gz compressed.');
}
function assertEncryptedFile(path) {
// Openssl encrypted files start with Salted
const expectedBytes = Buffer.from('Salted');
const buffer = Buffer.alloc(6);
const fd = fs.openSync(path, 'r');
// Read the first six bytes
fs.readSync(fd, buffer, 0, 6, 0);
fs.closeSync(fd);
// Assert first 6 characters of the file are "Salted"
assert.deepStrictEqual(buffer, expectedBytes, 'The backup file should be encrypted.');
}
function assertWrittenFewerThan(total, number) {
assert(total < number && total > 0, `Saw ${total} but expected between 1 and ${number - 1} documents for the resumed backup.`);
}
function augmentParamsWithApiKey(params) {
if (process.env.COUCHBACKUP_TEST_IAM_API_KEY) {
if (!params.opts) {
params.opts = {};
}
params.opts.iamApiKey = process.env.COUCHBACKUP_TEST_IAM_API_KEY;
params.opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
module.exports = {
scenario,
p: params,
setTimeout,
dbCompare,
readSortAndDeepEqual,
assertGzipFile,
assertEncryptedFile,
testBackup,
testRestore,
testDirectBackupAndRestore,
testBackupToFile,
testRestoreFromFile,
testBackupAndRestoreViaFile,
testBackupAbortResumeRestore
};
});

@@ -15,138 +15,87 @@ // Copyright © 2023 IBM Corp. All rights reserved.

/* global */
'use strict';
const { fork, spawn } = require('node:child_process');
const { once } = require('node:events');
const { Duplex } = require('node:stream');
const debug = require('debug');
const logProcess = debug('couchbackup:test:process');
const chunk = require('lodash/chunk');
const difference = require('lodash/difference');
const forOwn = require('lodash/forOwn');
const isEmpty = require('lodash/isEmpty');
const union = require('lodash/union');
class TestProcess {
constructor(cmd, args, mode) {
this.cmd = cmd;
// Child process stdio [stdin, stdout, stderr, ...extra channels]
const childProcessOptions = { stdio: [] };
switch (mode) {
case 'readable':
// Readable only, no writing to stdin so ignore it
childProcessOptions.stdio = ['ignore', 'pipe', 'inherit'];
break;
case 'writable':
// Writable only, no reading from stdout so ignore it
childProcessOptions.stdio = ['pipe', 'ignore', 'inherit'];
break;
default:
// Default Duplex mode pipe both stdin and stdout
childProcessOptions.stdio = ['pipe', 'pipe', 'inherit'];
break;
}
if (cmd.endsWith('.js')) {
// Add Node fork ipc channel
childProcessOptions.stdio.push('ipc');
logProcess(`Forking Node process for ${cmd} with stdio:[${childProcessOptions.stdio}]`);
this.childProcess = fork(cmd, args, childProcessOptions);
} else {
logProcess(`Spawning process for ${cmd} with stdio:[${childProcessOptions.stdio}]`);
this.childProcess = spawn(cmd, args, childProcessOptions);
}
const compare = async function(database1, database2, client) {
// check docs same in both dbs
const allDocs1 = await getAllDocs(client, database1);
const allDocs2 = await getAllDocs(client, database2);
this.childProcessPromise = once(this.childProcess, 'close').then(() => {
const code = this.childProcess.exitCode;
const signal = this.childProcess.signalCode;
logProcess(`Test process ${cmd} closed with code ${code} and signal ${signal}`);
if (code === 0) {
logProcess(`Resolving process promise for ${cmd}`);
return Promise.resolve(code);
} else {
const e = new Error(`Test child process ${cmd} exited with code ${code} and ${signal}. This may be normal for error case testing.`);
e.code = code;
e.signal = signal;
logProcess(`Will reject process promise for ${cmd} with ${e}`);
return Promise.reject(e);
}
});
const onlyInDb1 = (difference(allDocs1, allDocs2));
const onlyInDb2 = (difference(allDocs2, allDocs1));
switch (mode) {
case 'readable':
this.duplexFrom = this.childProcess.stdout;
break;
case 'writable':
this.duplexFrom = this.childProcess.stdin;
break;
default:
// Default is duplex
this.duplexFrom = { writable: this.childProcess.stdin, readable: this.childProcess.stdout };
}
let databasesSame = isEmpty(onlyInDb1) && isEmpty(onlyInDb2);
this.stream = Duplex.from(this.duplexFrom);
if (!databasesSame) {
console.log(onlyInDb1.length + ' documents only in db 1.');
console.log('Document IDs only in db 1: ' + onlyInDb1);
console.log(onlyInDb2.length + ' documents only in db 2.');
console.log('Document IDs only in db 2: ' + onlyInDb2);
}
}
module.exports = {
TestProcess,
cliBackup: function(databaseName, params = {}) {
const args = ['--db', databaseName];
if (params.opts) {
if (params.opts.mode) {
args.push('--mode');
args.push(params.opts.mode);
// check revs same in docs common to both dbs
const partitionSize = 500;
const batches = chunk(union(allDocs1, allDocs2), partitionSize);
const missingRevsInDb2 = await getMissingRevs(client, database1, database2, batches);
const missingRevsInDb1 = await getMissingRevs(client, database2, database1, batches);
databasesSame = databasesSame && isEmpty(missingRevsInDb1) && isEmpty(missingRevsInDb2);
if (!databasesSame) {
console.log('Missing revs in db 1:' + JSON.stringify(missingRevsInDb1));
console.log('Missing revs in db 2:' + JSON.stringify(missingRevsInDb2));
}
return databasesSame;
};
const getMissingRevs = async(client, databaseName1, databaseName2, batcheses) => {
const fakeRevisionId = '9999-a';
const missing = {};
// look in db1 - use a fake revision ID to fetch all leaf revisions
for (const batches of batcheses) {
const documentRevisions = {};
batches.forEach(id => (documentRevisions[id] = [fakeRevisionId]));
const result1 = await client.postRevsDiff({ db: databaseName1, documentRevisions });
const revsDiffRequestDb2 = {};
forOwn(result1.result, (v, k) => (revsDiffRequestDb2[k] = v.possible_ancestors));
// look in db2
const result2 = await client.postRevsDiff({ db: databaseName2, documentRevisions: revsDiffRequestDb2 });
forOwn(result2.result, (v, k) => {
if ('missing' in v) {
missing[k] = v.missing;
}
if (params.opts.output) {
args.push('--output');
args.push(params.opts.output);
}
if (params.opts.log) {
args.push('--log');
args.push(params.opts.log);
}
if (params.opts.resume) {
args.push('--resume');
args.push(params.opts.resume);
}
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
return new TestProcess('./bin/couchbackup.bin.js', args, 'readable');
},
cliRestore: function(databaseName, params) {
const args = ['--db', databaseName];
if (params.opts) {
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.parallelism) {
args.push('--parallelism');
args.push(params.opts.parallelism);
}
if (params.opts.requestTimeout) {
args.push('--request-timeout');
args.push(params.opts.requestTimeout);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
return new TestProcess('./bin/couchrestore.bin.js', args, 'writable');
},
cliGzip: function() {
return new TestProcess('gzip', []);
},
cliGunzip: function() {
return new TestProcess('gunzip', []);
},
cliEncrypt: function() {
return new TestProcess('openssl', ['aes-128-cbc', '-pass', 'pass:12345']);
},
cliDecrypt: function() {
return new TestProcess('openssl', ['aes-128-cbc', '-d', '-pass', 'pass:12345']);
});
}
return missing;
};
const getAllDocs = async function(client, database) {
let allDocIds = [];
const limit = 2000;
let startKey = '\u0000';
do {
const pageOfDocIds = (await client.postAllDocs({ db: database, startKey, limit })).result.rows.map(r => r.id);
allDocIds = allDocIds.concat(pageOfDocIds);
if (pageOfDocIds.length < limit) {
startKey = null;
} else {
startKey = pageOfDocIds[limit - 1] + '\u0000';
}
} while (startKey != null);
return allDocIds;
};
module.exports = {
compare
};

@@ -18,21 +18,171 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

delete require.cache[require.resolve('./citestutils.js')];
const u = require('./citestutils.js');
const assert = require('assert');
const backup = require('../app.js').backup;
const fs = require('fs');
const nock = require('nock');
const util = require('util');
const backupPromise = util.promisify(backup);
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('End to end backup and restore', params), function() {
it('should backup and restore animaldb', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
return u.testDirectBackupAndRestore(params, 'animaldb', this.dbName);
});
const goodUrl = 'http://localhost:5984/db';
// The real validateArgs function of app.js isn't
// exported - so we call the exported backup method
// instead. We don't get as far as a real backup when
// testing error cases. For success cases we nock the
// goodUrl and
const validateArgs = async function(url, opts, errorValidationForAssertRejects) {
const nullStream = fs.createWriteStream('/dev/null');
if (url === goodUrl) {
// Nock the goodUrl
nock(goodUrl).head('').reply(404, { error: 'not_found', reason: 'missing' });
}
return assert.rejects(backupPromise(url, nullStream, opts), errorValidationForAssertRejects);
};
it('should backup and restore largedb1g #slow', async function() {
// Allow up to 30 m for backup and restore of largedb1g
// This is a long time but when many builds run in parallel it can take a
// while to get this done.
u.setTimeout(this, 30 * 60);
return u.testDirectBackupAndRestore(params, 'largedb1g', this.dbName);
});
const validateShallowModeArgs = async function(url, opts, msg) {
// We pass assertNoValidationError because for these shallow opts
// we are expecting only a stderr warning
return validateArgs(url, opts, assertNoValidationError()).then(() => {
// Assert the warning message was in stderr
assert(capturedStderr.indexOf(msg) > -1, 'Log warning message was not present');
});
};
const stderrWriteFun = process.stderr.write;
let capturedStderr;
function captureStderr() {
process.stderr.write = function(string, encoding, fd) {
capturedStderr += string;
};
}
function releaseStderr() {
process.stderr.write = stderrWriteFun;
capturedStderr = null;
}
// Return a validation object for use with assert.rejects
function assertErrorMessage(msg) {
return { name: 'InvalidOption', message: msg };
}
// For cases where validation should pass we reach a real backup that hits a 404
// mock for a DatabaseNotFound, so that it is the expected in the case assertNoValidationError
function assertNoValidationError() { return { name: 'DatabaseNotFound' }; }
describe('#unit Validate arguments', function() {
it('returns error for invalid URL type', async function() {
return validateArgs(true, {}, assertErrorMessage('Invalid URL, must be type string'));
});
it('returns no error for valid URL type', async function() {
return validateArgs(goodUrl, {}, assertNoValidationError());
});
it('returns error for invalid (no host) URL', async function() {
return validateArgs('http://', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid (no protocol) URL', async function() {
return validateArgs('invalid', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid (wrong protocol) URL', async function() {
return validateArgs('ftp://invalid.example.com', {}, assertErrorMessage('Invalid URL protocol.'));
});
it('returns error for invalid (no path) URL', async function() {
return validateArgs('https://invalid.example.com', {}, assertErrorMessage('Invalid URL, missing path element (no database).'));
});
it('returns error for invalid (no protocol, no host) URL', async function() {
return validateArgs('invalid', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid buffer size type', async function() {
return validateArgs(goodUrl, { bufferSize: '123' }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero buffer size', async function() {
return validateArgs(goodUrl, { bufferSize: 0 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float buffer size', async function() {
return validateArgs(goodUrl, { bufferSize: 1.23 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid buffer size type', async function() {
return validateArgs(goodUrl, { bufferSize: 123 }, assertNoValidationError());
});
it('returns error for invalid log type', async function() {
return validateArgs(goodUrl, { log: true }, assertErrorMessage('Invalid log option, must be type string'));
});
it('returns no error for valid log type', async function() {
return validateArgs(goodUrl, { log: 'log.txt' }, assertNoValidationError());
});
it('returns error for invalid mode type', async function() {
return validateArgs(goodUrl, { mode: true }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"'));
});
it('returns error for invalid mode string', async function() {
return validateArgs(goodUrl, { mode: 'foobar' }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"'));
});
it('returns no error for valid mode type', async function() {
return validateArgs(goodUrl, { mode: 'full' }, assertNoValidationError());
});
it('returns error for invalid output type', async function() {
return validateArgs(goodUrl, { output: true }, assertErrorMessage('Invalid output option, must be type string'));
});
it('returns no error for valid output type', async function() {
return validateArgs(goodUrl, { output: 'output.txt' }, assertNoValidationError());
});
it('returns error for invalid parallelism type', async function() {
return validateArgs(goodUrl, { parallelism: '123' }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero parallelism', async function() {
return validateArgs(goodUrl, { parallelism: 0 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float parallelism', async function() {
return validateArgs(goodUrl, { parallelism: 1.23 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid parallelism type', async function() {
return validateArgs(goodUrl, { parallelism: 123 }, assertNoValidationError());
});
it('returns error for invalid request timeout type', async function() {
return validateArgs(goodUrl, { requestTimeout: '123' }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero request timeout', async function() {
return validateArgs(goodUrl, { requestTimeout: 0 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float request timout', async function() {
return validateArgs(goodUrl, { requestTimeout: 1.23 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid request timeout type', async function() {
return validateArgs(goodUrl, { requestTimeout: 123 }, assertNoValidationError());
});
it('returns error for invalid resume type', async function() {
return validateArgs(goodUrl, { resume: 'true' }, assertErrorMessage('Invalid resume option, must be type boolean'));
});
it('returns no error for valid resume type', async function() {
return validateArgs(goodUrl, { resume: false }, assertNoValidationError());
});
it('returns error for invalid key type', async function() {
return validateArgs(goodUrl, { iamApiKey: true }, assertErrorMessage('Invalid iamApiKey option, must be type string'));
});
it('returns error for key and URL credentials supplied', async function() {
return validateArgs('https://a:b@example.com/db', { iamApiKey: 'abc123' }, assertErrorMessage('URL user information must not be supplied when using IAM API key.'));
});
it('warns for log arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', log: 'test' },
'the options "log" and "resume" are invalid when using shallow mode.').finally(
() => {
releaseStderr();
});
});
it('warns for resume arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', log: 'test', resume: true },
'the options "log" and "resume" are invalid when using shallow mode.').finally(
() => {
releaseStderr();
});
});
it('warns for parallelism arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', parallelism: 10 },
'the option "parallelism" has no effect when using shallow mode.').finally(
() => {
releaseStderr();
});
});
});

@@ -18,171 +18,13 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

const assert = require('assert');
const backup = require('../app.js').backup;
const fs = require('fs');
const nock = require('nock');
const util = require('util');
const backupPromise = util.promisify(backup);
const u = require('./citestutils.js');
const goodUrl = 'http://localhost:5984/db';
// The real validateArgs function of app.js isn't
// exported - so we call the exported backup method
// instead. We don't get as far as a real backup when
// testing error cases. For success cases we nock the
// goodUrl and
const validateArgs = async function(url, opts, errorValidationForAssertRejects) {
const nullStream = fs.createWriteStream('/dev/null');
if (url === goodUrl) {
// Nock the goodUrl
nock(goodUrl).head('').reply(404, { error: 'not_found', reason: 'missing' });
}
return assert.rejects(backupPromise(url, nullStream, opts), errorValidationForAssertRejects);
};
const validateShallowModeArgs = async function(url, opts, msg) {
// We pass assertNoValidationError because for these shallow opts
// we are expecting only a stderr warning
return validateArgs(url, opts, assertNoValidationError()).then(() => {
// Assert the warning message was in stderr
assert(capturedStderr.indexOf(msg) > -1, 'Log warning message was not present');
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('#slowest End to end backup and restore', params), function() {
// 10 GB is about the largest the CI can handle before getting very upset
// about how long things are taking
it('should backup and restore largedb10g', async function() {
u.setTimeout(this, 350 * 60);
return u.testDirectBackupAndRestore(params, 'largedb10g', this.dbName);
});
});
};
const stderrWriteFun = process.stderr.write;
let capturedStderr;
function captureStderr() {
process.stderr.write = function(string, encoding, fd) {
capturedStderr += string;
};
}
function releaseStderr() {
process.stderr.write = stderrWriteFun;
capturedStderr = null;
}
// Return a validation object for use with assert.rejects
function assertErrorMessage(msg) {
return { name: 'InvalidOption', message: msg };
}
// For cases where validation should pass we reach a real backup that hits a 404
// mock for a DatabaseNotFound, so that it is the expected in the case assertNoValidationError
function assertNoValidationError() { return { name: 'DatabaseNotFound' }; }
describe('#unit Validate arguments', function() {
it('returns error for invalid URL type', async function() {
return validateArgs(true, {}, assertErrorMessage('Invalid URL, must be type string'));
});
it('returns no error for valid URL type', async function() {
return validateArgs(goodUrl, {}, assertNoValidationError());
});
it('returns error for invalid (no host) URL', async function() {
return validateArgs('http://', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid (no protocol) URL', async function() {
return validateArgs('invalid', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid (wrong protocol) URL', async function() {
return validateArgs('ftp://invalid.example.com', {}, assertErrorMessage('Invalid URL protocol.'));
});
it('returns error for invalid (no path) URL', async function() {
return validateArgs('https://invalid.example.com', {}, assertErrorMessage('Invalid URL, missing path element (no database).'));
});
it('returns error for invalid (no protocol, no host) URL', async function() {
return validateArgs('invalid', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid buffer size type', async function() {
return validateArgs(goodUrl, { bufferSize: '123' }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero buffer size', async function() {
return validateArgs(goodUrl, { bufferSize: 0 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float buffer size', async function() {
return validateArgs(goodUrl, { bufferSize: 1.23 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid buffer size type', async function() {
return validateArgs(goodUrl, { bufferSize: 123 }, assertNoValidationError());
});
it('returns error for invalid log type', async function() {
return validateArgs(goodUrl, { log: true }, assertErrorMessage('Invalid log option, must be type string'));
});
it('returns no error for valid log type', async function() {
return validateArgs(goodUrl, { log: 'log.txt' }, assertNoValidationError());
});
it('returns error for invalid mode type', async function() {
return validateArgs(goodUrl, { mode: true }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"'));
});
it('returns error for invalid mode string', async function() {
return validateArgs(goodUrl, { mode: 'foobar' }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"'));
});
it('returns no error for valid mode type', async function() {
return validateArgs(goodUrl, { mode: 'full' }, assertNoValidationError());
});
it('returns error for invalid output type', async function() {
return validateArgs(goodUrl, { output: true }, assertErrorMessage('Invalid output option, must be type string'));
});
it('returns no error for valid output type', async function() {
return validateArgs(goodUrl, { output: 'output.txt' }, assertNoValidationError());
});
it('returns error for invalid parallelism type', async function() {
return validateArgs(goodUrl, { parallelism: '123' }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero parallelism', async function() {
return validateArgs(goodUrl, { parallelism: 0 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float parallelism', async function() {
return validateArgs(goodUrl, { parallelism: 1.23 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid parallelism type', async function() {
return validateArgs(goodUrl, { parallelism: 123 }, assertNoValidationError());
});
it('returns error for invalid request timeout type', async function() {
return validateArgs(goodUrl, { requestTimeout: '123' }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero request timeout', async function() {
return validateArgs(goodUrl, { requestTimeout: 0 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float request timout', async function() {
return validateArgs(goodUrl, { requestTimeout: 1.23 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid request timeout type', async function() {
return validateArgs(goodUrl, { requestTimeout: 123 }, assertNoValidationError());
});
it('returns error for invalid resume type', async function() {
return validateArgs(goodUrl, { resume: 'true' }, assertErrorMessage('Invalid resume option, must be type boolean'));
});
it('returns no error for valid resume type', async function() {
return validateArgs(goodUrl, { resume: false }, assertNoValidationError());
});
it('returns error for invalid key type', async function() {
return validateArgs(goodUrl, { iamApiKey: true }, assertErrorMessage('Invalid iamApiKey option, must be type string'));
});
it('returns error for key and URL credentials supplied', async function() {
return validateArgs('https://a:b@example.com/db', { iamApiKey: 'abc123' }, assertErrorMessage('URL user information must not be supplied when using IAM API key.'));
});
it('warns for log arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', log: 'test' },
'the options "log" and "resume" are invalid when using shallow mode.').finally(
() => {
releaseStderr();
});
});
it('warns for resume arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', log: 'test', resume: true },
'the options "log" and "resume" are invalid when using shallow mode.').finally(
() => {
releaseStderr();
});
});
it('warns for parallelism arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', parallelism: 10 },
'the option "parallelism" has no effect when using shallow mode.').finally(
() => {
releaseStderr();
});
});
});

@@ -15,200 +15,117 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it beforeEach */
/* global describe it before after */
'use strict';
const assert = require('assert');
const nock = require('nock');
const request = require('../includes/request.js');
const error = require('../includes/error.js');
const applyEnvVars = require('../includes/config.js').applyEnvironmentVariables;
const url = 'http://localhost:7777/testdb';
const db = request.client(url, { parallelism: 1 });
const timeoutDb = request.client(url, { parallelism: 1, requestTimeout: 500 });
const longTestTimeout = 3000;
describe('#unit Configuration', function() {
let processEnvCopy;
beforeEach('Clean nock', function() {
nock.cleanAll();
});
before('Save env', function() {
// Copy env so we can reset it after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
});
describe('#unit Check request headers', function() {
it('should have a couchbackup user-agent', async function() {
const couch = nock(url)
.matchHeader('user-agent', /couchbackup-cloudant\/\d+\.\d+\.\d+(?:-SNAPSHOT)? \(Node.js v\d+\.\d+\.\d+\)/)
.head('/good')
.reply(200);
after('Reset env', function() {
process.env = processEnvCopy;
});
return db.service.headDocument({ db: db.db, docId: 'good' }).then(() => {
assert.ok(couch.isDone());
});
it('respects the COUCH_URL env variable', function() {
process.env.COUCH_URL = 'http://user:pass@myurl.com';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.url, 'string');
assert.strictEqual(config.url, process.env.COUCH_URL);
});
});
describe('#unit Check request response error callback', function() {
it('should not callback with error for 200 response', async function() {
const couch = nock(url)
.get('/good')
.reply(200, { ok: true });
it('respects the COUCH_DATABASE env variable', function() {
process.env.COUCH_DATABASE = 'mydb';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.db, 'string');
assert.strictEqual(config.db, process.env.COUCH_DATABASE);
});
return db.service.getDocument({ db: db.db, docId: 'good' }).then(response => {
assert.ok(response.result);
assert.ok(couch.isDone());
});
it('respects the COUCH_BUFFER_SIZE env variable', function() {
process.env.COUCH_BUFFER_SIZE = '1000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.bufferSize, 'number');
assert.strictEqual(config.bufferSize, 1000);
});
it('should callback with error after 3 500 responses', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.reply(500, function(uri, requestBody) {
this.req.response.statusMessage = 'Internal Server Error';
return { error: 'foo', reason: 'bar' };
});
it('respects the COUCH_PARALLELISM env variable', function() {
process.env.COUCH_PARALLELISM = '20';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.parallelism, 'number');
assert.strictEqual(config.parallelism, 20);
});
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `500 Internal Server Error: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('respects the COUCH_REQUEST_TIMEOUT env variable', function() {
process.env.COUCH_REQUEST_TIMEOUT = '10000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.requestTimeout, 'number');
assert.strictEqual(config.requestTimeout, 10000);
});
it('should callback with error after 3 POST 503 responses', async function() {
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.times(3)
.reply(503, function(uri, requestBody) {
this.req.response.statusMessage = 'Service Unavailable';
return { error: 'service_unavailable', reason: 'Service unavailable' };
});
it('respects the CLOUDANT_IAM_API_KEY env variable', function() {
const key = 'ABC123-ZYX987_cba789-xyz321';
process.env.CLOUDANT_IAM_API_KEY = key;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamApiKey, 'string');
assert.strictEqual(config.iamApiKey, key);
});
return assert.rejects(
db.service.postBulkGet({ db: db.db, revs: true, docs: [] }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 Service Unavailable: post ${url}/_bulk_get - Error: service_unavailable, Reason: Service unavailable`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('respects the CLOUDANT_IAM_TOKEN_URL env variable', function() {
const u = 'https://testhost.example:1234/identity/token';
process.env.CLOUDANT_IAM_TOKEN_URL = u;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamTokenUrl, 'string');
assert.strictEqual(config.iamTokenUrl, u);
});
it('should callback with error after 3 429 responses', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.reply(429, function(uri, requestBody) {
this.req.response.statusMessage = 'Too Many Requests';
return { error: 'foo', reason: 'bar' };
});
it('respects the COUCH_LOG env variable', function() {
process.env.COUCH_LOG = 'my.log';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.log, 'string');
assert.strictEqual(config.log, process.env.COUCH_LOG);
});
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `429 Too Many Requests: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('respects the COUCH_RESUME env variable', function() {
process.env.COUCH_RESUME = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.resume, 'boolean');
assert.strictEqual(config.resume, true);
});
it('should callback with fatal error for 404 response', async function() {
const couch = nock(url)
.get('/bad')
.reply(404, function(uri, requestBody) {
this.req.response.statusMessage = 'Not Found';
return { error: 'foo', reason: 'bar' };
});
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `404 Not Found: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
it('respects the COUCH_OUTPUT env variable', function() {
process.env.COUCH_OUTPUT = 'myfile.txt';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.output, 'string');
assert.strictEqual(config.output, process.env.COUCH_OUTPUT);
});
it('should callback with same error for no status code error response', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.replyWithError('testing badness');
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
const err2 = error.convertResponseError(err);
assert.strictEqual(err, err2);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should retry request if HTTP request gets timed out', async function() {
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.delay(1000)
.reply(200, { results: { docs: [{ id: '1', ok: { _id: '1' } }] } })
.post('/_bulk_get')
.query(true)
.reply(200, { results: { docs: [{ id: '1', ok: { _id: '1' } }, { id: '2', ok: { _id: '2' } }] } });
return timeoutDb.service.postBulkGet({ db: db.db, revs: true, docs: [] }).then((response) => {
assert.ok(response);
assert.ok(response.result);
assert.ok(response.result.results);
assert.ok(response.result.results.docs);
assert.strictEqual(response.result.results.docs.length, 2);
assert.ok(couch.isDone());
});
it('respects the COUCH_MODE env variable', function() {
process.env.COUCH_MODE = 'shallow';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.mode, 'string');
assert.strictEqual(config.mode, 'shallow');
});
it('should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out', async function() {
// Increase the timeout for this test to allow for the delays
this.timeout(3000);
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.delay(1000)
.times(3)
.reply(200, { ok: true });
return assert.rejects(
timeoutDb.service.postBulkGet({ db: db.db, revs: true, docs: [] }),
(err) => {
err = error.convertResponseError(err);
// Note axios returns ECONNABORTED rather than ESOCKETTIMEDOUT
// See https://github.com/axios/axios/issues/2710 via https://github.com/axios/axios/issues/1543`
assert.strictEqual(err.statusText, 'ECONNABORTED');
assert.strictEqual(err.message, `timeout of 500ms exceeded: post ${url}/_bulk_get ECONNABORTED`);
assert.ok(couch.isDone());
return true;
});
it('respects the COUCH_QUIET env variable', function() {
process.env.COUCH_QUIET = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.quiet, 'boolean');
assert.strictEqual(config.quiet, true);
});
describe('#unit Check credentials', async function() {
it('should properly decode username and password', async function() {
const username = 'user%123';
const password = 'colon:at@321';
const url = `http://${encodeURIComponent(username)}:${encodeURIComponent(password)}@localhost:7777/testdb`;
const sessionUrl = 'http://localhost:7777';
const couch = nock(sessionUrl)
.post('/_session', { username: username, password: password })
.reply(200, { ok: true }, { 'Set-Cookie': 'AuthSession=ABC123DEF4356;' })
.get('/')
.reply(200);
const db = request.client(url, { parallelism: 1 });
return db.service.getServerInformation().then(response => {
assert.ok(response);
assert.ok(couch.isDone());
});
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2018, 2023 IBM Corp. All rights reserved.
//

@@ -15,117 +15,62 @@ // Licensed under the Apache License, Version 2.0 (the "License");

/* global describe it before after */
/* global describe it */
'use strict';
const assert = require('assert');
const applyEnvVars = require('../includes/config.js').applyEnvironmentVariables;
const fs = require('fs');
const { once } = require('node:events');
const readline = require('readline');
const u = require('./citestutils.js');
const uuid = require('uuid').v4;
describe('#unit Configuration', function() {
let processEnvCopy;
const params = { useApi: true };
before('Save env', function() {
// Copy env so we can reset it after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
});
describe(u.scenario('Concurrent database backups', params), function() {
it('should run concurrent API database backups correctly #slower', async function() {
// Allow up to 900 s to backup and compare (it should be much faster)!
u.setTimeout(this, 900);
after('Reset env', function() {
process.env = processEnvCopy;
});
const checkForEmptyBatches = async function(fileName) {
let foundEmptyBatch = false;
it('respects the COUCH_URL env variable', function() {
process.env.COUCH_URL = 'http://user:pass@myurl.com';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.url, 'string');
assert.strictEqual(config.url, process.env.COUCH_URL);
});
const rd = readline.createInterface({
input: fs.createReadStream(fileName),
output: fs.createWriteStream('/dev/null'),
terminal: false
});
it('respects the COUCH_DATABASE env variable', function() {
process.env.COUCH_DATABASE = 'mydb';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.db, 'string');
assert.strictEqual(config.db, process.env.COUCH_DATABASE);
});
rd.on('line', function(line) {
if (JSON.parse(line).length === 0) {
// Note: Empty batch arrays indicate that the running backup is
// incorrectly sharing a log file with another ongoing backup job.
foundEmptyBatch = true;
}
});
it('respects the COUCH_BUFFER_SIZE env variable', function() {
process.env.COUCH_BUFFER_SIZE = '1000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.bufferSize, 'number');
assert.strictEqual(config.bufferSize, 1000);
});
rd.on('close', function() {
if (foundEmptyBatch) {
return Promise.reject(new Error(`Log file '${fileName}' contains empty batches`));
} else {
return Promise.resolve();
}
});
};
it('respects the COUCH_PARALLELISM env variable', function() {
process.env.COUCH_PARALLELISM = '20';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.parallelism, 'number');
assert.strictEqual(config.parallelism, 20);
});
const backupPromise = async function() {
const actualBackup = `./${uuid()}`;
const output = fs.createWriteStream(actualBackup);
return once(output, 'open').then(() => {
return u.testBackup(params, 'largedb1g', output);
}).then(() => {
return checkForEmptyBatches(actualBackup);
});
};
it('respects the COUCH_REQUEST_TIMEOUT env variable', function() {
process.env.COUCH_REQUEST_TIMEOUT = '10000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.requestTimeout, 'number');
assert.strictEqual(config.requestTimeout, 10000);
});
// [1] Run 'largedb1g' database backup
const backup1 = backupPromise();
it('respects the CLOUDANT_IAM_API_KEY env variable', function() {
const key = 'ABC123-ZYX987_cba789-xyz321';
process.env.CLOUDANT_IAM_API_KEY = key;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamApiKey, 'string');
assert.strictEqual(config.iamApiKey, key);
});
// [2] Run 'largedb1g' database backup
const backup2 = backupPromise();
it('respects the CLOUDANT_IAM_TOKEN_URL env variable', function() {
const u = 'https://testhost.example:1234/identity/token';
process.env.CLOUDANT_IAM_TOKEN_URL = u;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamTokenUrl, 'string');
assert.strictEqual(config.iamTokenUrl, u);
return Promise.all([backup1, backup2]);
});
it('respects the COUCH_LOG env variable', function() {
process.env.COUCH_LOG = 'my.log';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.log, 'string');
assert.strictEqual(config.log, process.env.COUCH_LOG);
});
it('respects the COUCH_RESUME env variable', function() {
process.env.COUCH_RESUME = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.resume, 'boolean');
assert.strictEqual(config.resume, true);
});
it('respects the COUCH_OUTPUT env variable', function() {
process.env.COUCH_OUTPUT = 'myfile.txt';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.output, 'string');
assert.strictEqual(config.output, process.env.COUCH_OUTPUT);
});
it('respects the COUCH_MODE env variable', function() {
process.env.COUCH_MODE = 'shallow';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.mode, 'string');
assert.strictEqual(config.mode, 'shallow');
});
it('respects the COUCH_QUIET env variable', function() {
process.env.COUCH_QUIET = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.quiet, 'boolean');
assert.strictEqual(config.quiet, true);
});
});
<testsuites name="test">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:10" time="0.095">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:33:16" time="0.098">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.002">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.022">
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.025">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0.001">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0.001">
</testcase>

@@ -21,9 +21,9 @@ <testcase classname="test.#unit Validate arguments" name="returns error for invalid buffer size type" time="0">

</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float buffer size" time="0.001">
<testcase classname="test.#unit Validate arguments" name="returns error for float buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.007">
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.008">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid log type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid log type" time="0.005">
<testcase classname="test.#unit Validate arguments" name="returns no error for valid log type" time="0.004">
</testcase>

@@ -34,3 +34,3 @@ <testcase classname="test.#unit Validate arguments" name="returns error for invalid mode type" time="0">

</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.004">
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.005">
</testcase>

@@ -41,9 +41,9 @@ <testcase classname="test.#unit Validate arguments" name="returns error for invalid output type" time="0">

</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid parallelism type" time="0">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid parallelism type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float parallelism" time="0">
<testcase classname="test.#unit Validate arguments" name="returns error for float parallelism" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.005">
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.004">
</testcase>

@@ -56,3 +56,3 @@ <testcase classname="test.#unit Validate arguments" name="returns error for invalid request timeout type" time="0.001">

</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.005">
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.004">
</testcase>

@@ -65,108 +65,108 @@ <testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0">

</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0">
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.005">
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.007">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for resume arg in shallow mode" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for parallelism arg in shallow mode" time="0.004">
<testcase classname="test.#unit Validate arguments" name="warns for parallelism arg in shallow mode" time="0.003">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:10" time="4.517">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.93">
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:33:16" time="4.462">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.918">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.807">
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.794">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.631">
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.62">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:15" time="10.46">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="2.559">
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:33:20" time="10.364">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="2.576">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.463">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.435">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.63">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.557">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:25" time="5.894">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.54">
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:33:31" time="6.149">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.712">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.343">
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.428">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.206">
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.201">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:31" time="14.629">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.847">
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:33:37" time="16.111">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.865">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.778">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="6.258">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="5.201">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="5.193">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:46" time="5.369">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.87">
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:33:53" time="5.327">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.877">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.529">
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.51">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.172">
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.138">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:51" time="8.142">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.416">
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:33:58" time="7.924">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.521">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.698">
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.776">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="3.226">
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.833">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:40:59" time="303.561">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.153">
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:34:06" time="304.444">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.177">
</testcase>
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="300.632">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="299.492">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:46:03" time="492.652">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.898">
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:39:11" time="519.792">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.833">
</testcase>
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="488.981">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="516.198">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:54:15" time="3.032">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.761">
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:47:50" time="3.106">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.844">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:54:18" time="0.273">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.008">
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:47:53" time="0.274">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.011">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:54:19" time="2.303">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.873">
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:47:54" time="2.252">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.877">
</testcase>
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.893">
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.849">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:54:21" time="5.235">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.906">
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:47:56" time="5.146">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.86">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.781">
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.749">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.742">
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.744">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:54:26" time="7.057">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.5">
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:01" time="8.546">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.58">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.355">
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.778">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.415">
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.64">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:54:33" time="33.886">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="16.813">
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:10" time="34.51">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="17.097">
</testcase>
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.548">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.893">
</testcase>
</testsuite>
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:07" time="0.01">
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:44" time="0.009">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0.001">

@@ -194,15 +194,15 @@ </testcase>

</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0">
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:07" time="0.087">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.011">
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:44" time="0.095">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.009">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.009">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.011">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.004">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.004">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.016">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.018">
</testcase>

@@ -213,11 +213,11 @@ <testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on NoLogFileName" time="0.001">

</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on IncompleteChangesInLogFile" time="0.009">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on IncompleteChangesInLogFile" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.014">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.012">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.014">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.015">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:07" time="0.187">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.008">
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:44" time="0.118">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.007">
</testcase>

@@ -228,66 +228,66 @@ <testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.013">

</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.005">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.003">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.005">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.003">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.01">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.009">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.036">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.027">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.021">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.033">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.01">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.014">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:08" time="5.69">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.559">
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:44" time="6.778">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.53">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.517">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.683">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.523">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.576">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.495">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.611">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.559">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.681">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.583">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.528">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.715">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.5">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.584">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="1.137">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.561">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.91">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.586">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.611">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:13" time="5.22">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.507">
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:51" time="4.941">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.497">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.659">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.523">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.788">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.511">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.478">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.531">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.546">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.533">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.512">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.503">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.596">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.59">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.616">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.532">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.622">
</testcase>
</testsuite>
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:18" time="0.002">
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:56" time="0.002">
<testcase classname="test.#unit Fetching batches from a log file" name="should fetch multiple batches correctly" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:18" time="0.002">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0.001">
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:56" time="0.002">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:18" time="0.031">
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:56" time="0.037">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.011">

@@ -305,11 +305,11 @@ </testcase>

</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0.001">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0.001">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0.001">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0.001">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0">
</testcase>

@@ -320,15 +320,15 @@ <testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --url command-line parameter" time="0.001">

</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --request-timeout command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --iam-api-key command-line parameter" time="0.001">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --iam-api-key command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0.001">
</testcase>

@@ -339,31 +339,31 @@ <testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode full command-line parameter" time="0.001">

</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --quiet command-line parameter" time="0.001">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --quiet command-line parameter" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:18" time="0.011">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0.001">
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:56" time="0.011">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing" time="0.001">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0.001">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --url command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0.001">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0.001">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --request-timeout command-line parameter" time="0">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0.001">
</testcase>

@@ -373,14 +373,14 @@ <testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --quiet command-line parameter" time="0.001">

</testsuite>
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:18" time="0.004">
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:56" time="0.004">
<testcase classname="test.#unit Check request headers" name="should have a couchbackup user-agent" time="0.004">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:18" time="10.582">
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:56" time="10.602">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.004">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.015">
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.014">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.017">
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.016">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.014">
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.025">
</testcase>

@@ -391,34 +391,34 @@ <testcase classname="test.#unit Check request response error callback" name="should callback with fatal error for 404 response" time="0.004">

</testcase>
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.506">
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.512">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.006">
<testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.009">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:29" time="0.016">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.015">
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:49:07" time="0.013">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.012">
</testcase>
</testsuite>
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:29" time="0.556">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.017">
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:49:07" time="0.56">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.02">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.522">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.525">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.015">
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.013">
</testcase>
</testsuite>
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:30" time="14.944">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.013">
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:49:07" time="14.795">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.012">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.015">
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.013">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="1.949">
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="1.691">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="8.964">
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="9.076">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:55:45" time="41.548">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="41.279">
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:49:22" time="37.875">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="37.611">
</testcase>
</testsuite>
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2023-11-28T01:56:26" time="4.097">
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:50:00" time="4.102">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.024">

@@ -428,7 +428,7 @@ </testcase>

</testcase>
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.02">
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.019">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.013">
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.017">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.021">
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.022">
</testcase>

@@ -435,0 +435,0 @@ <testcase classname="test.#unit Check database restore writer" name="should get a batch error for non-empty array response with new_edits false" time="0.007">

@@ -1,190 +0,424 @@

// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Small script which backs up a Cloudant or CouchDB database to an S3
// bucket, using an intermediary file on disk.
//
// The script generates the backup object name by combining together the path
// part of the database URL and the current time.
'use strict';
const stream = require('stream');
const fs = require('fs');
const url = require('url');
const AWS = require('aws-sdk');
const couchbackup = require('@cloudant/couchbackup');
const debug = require('debug')('s3-backup');
const tmp = require('tmp');
const VError = require('verror').VError;
/*
Main function, run from base of file.
*/
function main() {
const argv = require('yargs')
.usage('Usage: $0 [options]')
.example('$0 -s https://user:pass@host/db -b <bucket>', 'Backup db to bucket')
.options({
source: { alias: 's', nargs: 1, demandOption: true, describe: 'Source database URL' },
bucket: { alias: 'b', nargs: 1, demandOption: true, describe: 'Destination bucket' },
prefix: { alias: 'p', nargs: 1, describe: 'Prefix for backup object key', default: 'couchbackup' },
s3url: { nargs: 1, describe: 'S3 endpoint URL' },
awsprofile: { nargs: 1, describe: 'The profile section to use in the ~/.aws/credentials file', default: 'default' }
})
.help('h').alias('h', 'help')
.epilog('Copyright (C) IBM 2017')
.argv;
const sourceUrl = argv.source;
const backupBucket = argv.bucket;
const backupName = new url.URL(sourceUrl).pathname.split('/').filter(function(x) { return x; }).join('-');
const backupKeyPrefix = `${argv.prefix}-${backupName}`;
const backupKey = `${backupKeyPrefix}-${new Date().toISOString()}`;
const backupTmpFile = tmp.fileSync();
const s3Endpoint = argv.s3url;
const awsProfile = argv.awsprofile;
// Creds are from ~/.aws/credentials, environment etc. (see S3 docs).
const awsOpts = {
signatureVersion: 'v4',
credentials: new AWS.SharedIniFileCredentials({ profile: awsProfile })
};
if (typeof s3Endpoint !== 'undefined') {
awsOpts.endpoint = new AWS.Endpoint(s3Endpoint);
}
const s3 = new AWS.S3(awsOpts);
debug(`Creating a new backup of ${s(sourceUrl)} at ${backupBucket}/${backupKey}...`);
bucketAccessible(s3, backupBucket)
.then(() => {
return createBackupFile(sourceUrl, backupTmpFile.name);
})
.then(() => {
return uploadNewBackup(s3, backupTmpFile.name, backupBucket, backupKey);
})
.then(() => {
debug('Backup successful!');
backupTmpFile.removeCallback();
debug('done.');
})
.catch((reason) => {
debug(`Error: ${reason}`);
});
}
/**
* Return a promise that resolves if the bucket is available and
* rejects if not.
*
* @param {any} s3 S3 client object
* @param {any} bucketName Bucket name
* @returns Promise
*/
function bucketAccessible(s3, bucketName) {
return new Promise(function(resolve, reject) {
const params = {
Bucket: bucketName
};
s3.headBucket(params, function(err, data) {
if (err) {
reject(new VError(err, 'S3 bucket not accessible'));
} else {
resolve();
}
});
});
}
/**
* Use couchbackup to create a backup of the specified database to a file path.
*
* @param {any} sourceUrl Database URL
* @param {any} backupTmpFilePath Path to write file
* @returns Promise
*/
function createBackupFile(sourceUrl, backupTmpFilePath) {
return new Promise((resolve, reject) => {
couchbackup.backup(
sourceUrl,
fs.createWriteStream(backupTmpFilePath),
(err) => {
if (err) {
return reject(new VError(err, 'CouchBackup process failed'));
}
debug('couchbackup to file done; uploading to S3');
resolve('creating backup file complete');
}
);
});
}
/**
* Upload a backup file to an S3 bucket.
*
* @param {any} s3 Object store client
* @param {any} backupTmpFilePath Path of backup file to write.
* @param {any} bucket Object store bucket name
* @param {any} key Object store key name
* @returns Promise
*/
function uploadNewBackup(s3, backupTmpFilePath, bucket, key) {
return new Promise((resolve, reject) => {
debug(`Uploading from ${backupTmpFilePath} to ${bucket}/${key}`);
function uploadFromStream(s3, bucket, key) {
const pass = new stream.PassThrough();
const params = {
Bucket: bucket,
Key: key,
Body: pass
};
s3.upload(params, function(err, data) {
debug('S3 upload done');
if (err) {
debug(err);
reject(new VError(err, 'Upload failed'));
return;
}
debug('Upload succeeded');
debug(data);
resolve();
}).httpUploadProgress = (progress) => {
debug(`S3 upload progress: ${progress}`);
};
return pass;
}
const inputStream = fs.createReadStream(backupTmpFilePath);
const s3Stream = uploadFromStream(s3, bucket, key);
inputStream.pipe(s3Stream);
});
}
/**
* Remove creds from a URL, e.g., before logging
*
* @param {string} url URL to safen
*/
function s(originalUrl) {
const parts = new url.URL(originalUrl);
return url.format(parts, { auth: false });
}
main();
<testsuites name="test">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:33:14" time="0.16">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.002">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.032">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no host) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid buffer size type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero buffer size" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float buffer size" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.009">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid log type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid log type" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode string" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.008">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid output type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid output type" time="0.008">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid parallelism type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero parallelism" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid request timeout type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero request timeout" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float request timout" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid resume type" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid key type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.013">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for resume arg in shallow mode" time="0.007">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for parallelism arg in shallow mode" time="0.006">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:33:14" time="4.547">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.943">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.822">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.63">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:33:18" time="11.703">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="4.035">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.382">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.505">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:33:30" time="5.955">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.533">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.39">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.253">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:33:36" time="15.322">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="4.012">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="5.632">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.898">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:33:51" time="5.42">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.907">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.495">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.241">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:33:57" time="8.21">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.726">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.799">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.904">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:34:05" time="314.609">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.193">
</testcase>
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="311.644">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:39:19" time="504.493">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="3.08">
</testcase>
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="500.631">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:47:44" time="3.209">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.945">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:47:47" time="0.278">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.013">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:47:47" time="2.236">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.842">
</testcase>
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.868">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:47:50" time="5.245">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.902">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.824">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.721">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:47:55" time="7.165">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.496">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.462">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.42">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:02" time="35.259">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="18.205">
</testcase>
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.523">
</testcase>
</testsuite>
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:37" time="0.007">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_DATABASE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_BUFFER_SIZE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_PARALLELISM env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_REQUEST_TIMEOUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_API_KEY env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_TOKEN_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_LOG env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_RESUME env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_OUTPUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_MODE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:37" time="0.089">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.019">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on NoLogFileName" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on LogDoesNotExist" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on IncompleteChangesInLogFile" time="0.011">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.013">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.011">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:37" time="0.151">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on RestoreDatabaseNotFound" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.007">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.035">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.067">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.011">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:38" time="7.186">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.62">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.668">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.588">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.701">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.645">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.988">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.653">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.656">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.744">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.903">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:45" time="6.374">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.646">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.685">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.616">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.652">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.631">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.637">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="1.068">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.809">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.62">
</testcase>
</testsuite>
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:51" time="0.004">
<testcase classname="test.#unit Fetching batches from a log file" name="should fetch multiple batches correctly" time="0.002">
</testcase>
</testsuite>
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:51" time="0.002">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0.002">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:51" time="0.039">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.012">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing" time="0.002">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --db command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --iam-api-key command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode full command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode shallow command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --quiet command-line parameter" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:51" time="0.014">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --url command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --quiet command-line parameter" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:51" time="0.005">
<testcase classname="test.#unit Check request headers" name="should have a couchbackup user-agent" time="0.005">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:48:51" time="10.59">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.003">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.016">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.021">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.017">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with fatal error for 404 response" time="0.003">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with same error for no status code error response" time="2.01">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.508">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.009">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:49:02" time="0.015">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.013">
</testcase>
</testsuite>
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:49:02" time="0.561">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.019">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.52">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.017">
</testcase>
</testsuite>
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:49:02" time="15.438">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.014">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.014">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="2.023">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="9.384">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:49:18" time="44.169">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="43.905">
</testcase>
</testsuite>
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2023-11-29T15:50:02" time="4.098">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.023">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should terminate on a fatal error" time="0.007">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.018">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.018">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.021">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should get a batch error for non-empty array response with new_edits false" time="0.007">
</testcase>
</testsuite>
</testsuites>

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -14,402 +14,317 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe afterEach before after it */
'use strict';
/**
* CouchBackup module.
* @module couchbackup
* @see module:couchbackup
*/
const assert = require('assert');
const parser = require('../includes/parser.js');
const backupFull = require('./includes/backup.js');
const defaults = require('./includes/config.js').apiDefaults;
const error = require('./includes/error.js');
const request = require('./includes/request.js');
const restoreInternal = require('./includes/restore.js');
const backupShallow = require('./includes/shallowbackup.js');
const debug = require('debug')('couchbackup:app');
const events = require('events');
const fs = require('fs');
const URL = require('url').URL;
describe('#unit Default parameters', function() {
let processEnvCopy;
let processArgvCopy;
/**
* Test for a positive, safe integer.
*
* @param {object} x - Object under test.
*/
function isSafePositiveInteger(x) {
// https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER
const MAX_SAFE_INTEGER = Number.MAX_SAFE_INTEGER || 9007199254740991;
// Is it a number?
return Object.prototype.toString.call(x) === '[object Number]' &&
// Is it an integer?
x % 1 === 0 &&
// Is it positive?
x > 0 &&
// Is it less than the maximum safe integer?
x <= MAX_SAFE_INTEGER;
}
before('Set process data for test', function() {
// Copy env and argv so we can reset them after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
processArgvCopy = JSON.parse(JSON.stringify(process.argv));
/**
* Validate arguments.
*
* @param {object} url - URL of database.
* @param {object} opts - Options.
* @param {function} cb - Callback to be called on error.
*/
function validateArgs(url, opts, cb) {
if (typeof url !== 'string') {
cb(new error.BackupError('InvalidOption', 'Invalid URL, must be type string'), null);
return;
}
if (opts && typeof opts.bufferSize !== 'undefined' && !isSafePositiveInteger(opts.bufferSize)) {
cb(new error.BackupError('InvalidOption', 'Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'), null);
return;
}
if (opts && typeof opts.iamApiKey !== 'undefined' && typeof opts.iamApiKey !== 'string') {
cb(new error.BackupError('InvalidOption', 'Invalid iamApiKey option, must be type string'), null);
return;
}
if (opts && typeof opts.log !== 'undefined' && typeof opts.log !== 'string') {
cb(new error.BackupError('InvalidOption', 'Invalid log option, must be type string'), null);
return;
}
if (opts && typeof opts.mode !== 'undefined' && ['full', 'shallow'].indexOf(opts.mode) === -1) {
cb(new error.BackupError('InvalidOption', 'Invalid mode option, must be either "full" or "shallow"'), null);
return;
}
if (opts && typeof opts.output !== 'undefined' && typeof opts.output !== 'string') {
cb(new error.BackupError('InvalidOption', 'Invalid output option, must be type string'), null);
return;
}
if (opts && typeof opts.parallelism !== 'undefined' && !isSafePositiveInteger(opts.parallelism)) {
cb(new error.BackupError('InvalidOption', 'Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'), null);
return;
}
if (opts && typeof opts.requestTimeout !== 'undefined' && !isSafePositiveInteger(opts.requestTimeout)) {
cb(new error.BackupError('InvalidOption', 'Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'), null);
return;
}
if (opts && typeof opts.resume !== 'undefined' && typeof opts.resume !== 'boolean') {
cb(new error.BackupError('InvalidOption', 'Invalid resume option, must be type boolean'), null);
return;
}
// setup environment variables
process.env.COUCH_URL = 'http://user:pass@myurl.com';
process.env.COUCH_DATABASE = 'mydb';
process.env.COUCH_BUFFER_SIZE = '1000';
process.env.COUCH_PARALLELISM = '20';
process.env.COUCH_REQUEST_TIMEOUT = '20000';
process.env.COUCH_LOG = 'my.log';
process.env.COUCH_RESUME = 'true';
process.env.COUCH_OUTPUT = 'myfile.txt';
process.env.COUCH_MODE = 'shallow';
process.env.CLOUDANT_IAM_API_KEY = 'ABC123-ZYX987_cba789-xyz321';
process.env.COUCH_QUIET = 'true';
});
// Validate URL and ensure no auth if using key
try {
const urlObject = new URL(url);
// We require a protocol, host and path (for db), fail if any is missing.
if (urlObject.protocol !== 'https:' && urlObject.protocol !== 'http:') {
cb(new error.BackupError('InvalidOption', 'Invalid URL protocol.'));
return;
}
if (!urlObject.pathname || urlObject.pathname === '/') {
cb(new error.BackupError('InvalidOption', 'Invalid URL, missing path element (no database).'));
return;
}
if (opts && opts.iamApiKey && (urlObject.username || urlObject.password)) {
cb(new error.BackupError('InvalidOption', 'URL user information must not be supplied when using IAM API key.'));
return;
}
} catch (err) {
cb(error.wrapPossibleInvalidUrlError(err));
return;
}
after('Reset process data', function() {
process.env = processEnvCopy;
process.argv = processArgvCopy;
});
// Perform validation of invalid options for shallow mode and WARN
// We don't error for backwards compatibility with scripts that may have been
// written passing complete sets of options through
if (opts && opts.mode === 'shallow') {
if (opts.log || opts.resume) {
console.warn('WARNING: the options "log" and "resume" are invalid when using shallow mode.');
}
if (opts.parallelism) {
console.warn('WARNING: the option "parallelism" has no effect when using shallow mode.');
}
}
afterEach(function() {
delete require.cache[require.resolve('commander')];
});
if (opts && opts.resume) {
if (!opts.log) {
// This is the second place we check for the presence of the log option in conjunction with resume
// It has to be here for the API case
cb(new error.BackupError('NoLogFileName', 'To resume a backup, a log file must be specified'), null);
return;
} else if (!fs.existsSync(opts.log)) {
cb(new error.BackupError('LogDoesNotExist', 'To resume a backup, the log file must exist'), null);
return;
}
}
return true;
}
describe('Backup command-line', function() {
it('respects the COUCH_URL env variable if the --url backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, process.env.COUCH_URL);
});
function addEventListener(indicator, emitter, event, f) {
emitter.on(event, function(...args) {
if (!indicator.errored) {
if (event === 'error') indicator.errored = true;
f(...args);
}
});
}
it('respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, process.env.COUCH_DATABASE);
});
/*
Check the backup database exists and that the credentials used have
visibility. Callback with a fatal error if there is a problem with the DB.
@param {string} db - database object
@param {function(err)} callback - error is undefined if DB exists
*/
function proceedIfBackupDbValid(db, callback) {
db.service.headDatabase({ db: db.db }).then(() => callback()).catch(err => {
err = error.convertResponseError(err, err => parseIfDbValidResponseError(db, err));
callback(err);
});
}
it('respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, parseInt(process.env.COUCH_BUFFER_SIZE, 10));
});
/*
Check that the restore database exists, is new and is empty. Also verify that the credentials used have
visibility. Callback with a fatal error if there is a problem with the DB.
@param {string} db - database object
@param {function(err)} callback - error is undefined if DB exists, new and empty
*/
function proceedIfRestoreDbValid(db, callback) {
db.service.getDatabaseInformation({ db: db.db }).then(response => {
const { doc_count: docCount, doc_del_count: deletedDocCount } = response.result;
// The system databases can have a validation ddoc(s) injected in them on creation.
// This sets the doc count off, so we just complitely exclude the system databases from this check.
// The assumption here is that users restoring system databases know what they are doing.
if (!db.db.startsWith('_') && (docCount !== 0 || deletedDocCount !== 0)) {
const notEmptyDBErr = new Error(`Target database ${db.url}${db.db} is not empty.`);
notEmptyDBErr.name = 'DatabaseNotEmpty';
callback(notEmptyDBErr);
} else {
callback();
}
}).catch(err => {
err = error.convertResponseError(err, err => parseIfDbValidResponseError(db, err));
callback(err);
});
}
it('respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parseInt(process.env.COUCH_PARALLELISM, 10));
});
/*
Convert the database validation response error to a special DatabaseNotFound error
in case the database is missing. Otherwise delegate to the default error factory.
@param {object} db - database object
@param {object} err - HTTP response error
*/
function parseIfDbValidResponseError(db, err) {
if (err && err.status === 404) {
// Override the error type and message for the DB not found case
const msg = `Database ${db.url}` +
`${db.db} does not exist. ` +
'Check the URL and database name have been specified correctly.';
const noDBErr = new Error(msg);
noDBErr.name = 'DatabaseNotFound';
return noDBErr;
}
// Delegate to the default error factory if it wasn't a 404
return error.convertResponseError(err);
}
it('respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, parseInt(process.env.COUCH_REQUEST_TIMEOUT, 10));
});
module.exports = {
it('respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, process.env.CLOUDANT_IAM_API_KEY);
});
/**
* Backup a Cloudant database to a stream.
*
* @param {string} srcUrl - URL of database to backup.
* @param {stream.Writable} targetStream - Stream to write content to.
* @param {object} opts - Backup options.
* @param {number} [opts.parallelism=5] - Number of parallel HTTP requests to use.
* @param {number} [opts.bufferSize=500] - Number of documents per batch request.
* @param {number} [opts.requestTimeout=120000] - Milliseconds to wait before retrying a HTTP request.
* @param {string} [opts.iamApiKey] - IAM API key to use to access Cloudant database.
* @param {string} [opts.log] - Log file name. Default uses a temporary file.
* @param {boolean} [opts.resume] - Whether to resume from existing log.
* @param {string} [opts.mode=full] - Use `full` or `shallow` mode.
* @param {backupRestoreCallback} callback - Called on completion.
*/
backup: function(srcUrl, targetStream, opts, callback) {
const listenerErrorIndicator = { errored: false };
if (typeof callback === 'undefined' && typeof opts === 'function') {
callback = opts;
opts = {};
}
if (!validateArgs(srcUrl, opts, callback)) {
// bad args, bail
return;
}
it('respects the COUCH_LOG env variable if the --log backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.log, 'string');
assert.strictEqual(program.log, process.env.COUCH_LOG);
});
// if there is an error writing to the stream, call the completion
// callback with the error set
addEventListener(listenerErrorIndicator, targetStream, 'error', function(err) {
debug('Error ' + JSON.stringify(err));
if (callback) callback(err);
it('respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.resume, 'boolean');
assert.strictEqual(program.resume, true);
});
opts = Object.assign({}, defaults(), opts);
it('respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.output, 'string');
assert.strictEqual(program.output, process.env.COUCH_OUTPUT);
});
const ee = new events.EventEmitter();
it('respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, process.env.COUCH_MODE);
});
// Set up the DB client
const backupDB = request.client(srcUrl, opts);
it('respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
// Validate the DB exists, before proceeding to backup
proceedIfBackupDbValid(backupDB, function(err) {
if (err) {
if (err.name === 'DatabaseNotFound') {
err.message = `${err.message} Ensure the backup source database exists.`;
}
// Didn't exist, or another fatal error, exit
callback(err);
return;
}
let backup = null;
if (opts.mode === 'shallow') {
backup = backupShallow;
} else { // full mode
backup = backupFull;
}
it('respects the backup --url command-line parameter', function() {
const url = 'http://user:pass@myurl2.com';
process.argv = ['node', 'test', '--url', url];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, url);
});
// If resuming write a newline as it's possible one would be missing from
// an interruption of the previous backup. If the backup was clean this
// will cause an empty line that will be gracefully handled by the restore.
if (opts.resume) {
targetStream.write('\n');
}
it('respects the backup --db command-line parameter', function() {
const db = 'mydb2';
process.argv = ['node', 'test', '--db', db];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, db);
});
// Get the event emitter from the backup process so we can handle events
// before passing them on to the app's event emitter if needed.
const internalEE = backup(backupDB, opts);
addEventListener(listenerErrorIndicator, internalEE, 'changes', function(batch) {
ee.emit('changes', batch);
});
addEventListener(listenerErrorIndicator, internalEE, 'received', function(obj, q, logCompletedBatch) {
// this may be too verbose to have as well as the "backed up" message
// debug(' received batch', obj.batch, ' docs: ', obj.total, 'Time', obj.time);
// Callback to emit the written event when the content is flushed
function writeFlushed() {
ee.emit('written', { total: obj.total, time: obj.time, batch: obj.batch });
if (logCompletedBatch) {
logCompletedBatch(obj.batch);
}
debug(' backed up batch', obj.batch, ' docs: ', obj.total, 'Time', obj.time);
}
// Write the received content to the targetStream
const continueWriting = targetStream.write(JSON.stringify(obj.data) + '\n',
'utf8',
writeFlushed);
if (!continueWriting) {
// The buffer was full, pause the queue to stop the writes until we
// get a drain event
if (q && !q.paused) {
q.pause();
targetStream.once('drain', function() {
q.resume();
});
}
}
});
// For errors we expect, may or may not be fatal
addEventListener(listenerErrorIndicator, internalEE, 'error', function(err) {
debug('Error ' + JSON.stringify(err));
callback(err);
});
addEventListener(listenerErrorIndicator, internalEE, 'finished', function(obj) {
function emitFinished() {
debug('Backup complete - written ' + JSON.stringify(obj));
const summary = { total: obj.total };
ee.emit('finished', summary);
if (callback) callback(null, summary);
}
if (targetStream === process.stdout) {
// stdout cannot emit a finish event so use a final write + callback
targetStream.write('', 'utf8', emitFinished);
} else {
// If we're writing to a file, end the writes and register the
// emitFinished function for a callback when the file stream's finish
// event is emitted.
targetStream.end('', 'utf8', emitFinished);
}
});
it('respects the backup --buffer-size command-line parameter', function() {
const bufferSize = 500;
process.argv = ['node', 'test', '--buffer-size', bufferSize];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, bufferSize);
});
return ee;
},
/**
* Restore a backup from a stream.
*
* @param {stream.Readable} srcStream - Stream containing backed up data.
* @param {string} targetUrl - Target database.
* @param {object} opts - Restore options.
* @param {number} opts.parallelism - Number of parallel HTTP requests to use. Default 5.
* @param {number} opts.bufferSize - Number of documents per batch request. Default 500.
* @param {number} opts.requestTimeout - Milliseconds to wait before retrying a HTTP request. Default 120000.
* @param {string} opts.iamApiKey - IAM API key to use to access Cloudant database.
* @param {backupRestoreCallback} callback - Called on completion.
*/
restore: function(srcStream, targetUrl, opts, callback) {
const listenerErrorIndicator = { errored: false };
if (typeof callback === 'undefined' && typeof opts === 'function') {
callback = opts;
opts = {};
}
validateArgs(targetUrl, opts, callback);
opts = Object.assign({}, defaults(), opts);
it('respects the backup --parallelism command-line parameter', function() {
const parallelism = 10;
process.argv = ['node', 'test', '--parallelism', parallelism];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parallelism);
});
const ee = new events.EventEmitter();
it('respects the backup --request-timeout command-line parameter', function() {
const requestTimeout = 10000;
process.argv = ['node', 'test', '--request-timeout', requestTimeout];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, requestTimeout);
});
// Set up the DB client
const restoreDB = request.client(targetUrl, opts);
it('respects the backup --iam-api-key command-line parameter', function() {
const key = '123abc-789zyx_CBA987-XYZ321';
process.argv = ['node', 'test', '--iam-api-key', key];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, key);
});
// Validate the DB exists, before proceeding to restore
proceedIfRestoreDbValid(restoreDB, function(err) {
if (err) {
if (err.name === 'DatabaseNotFound') {
err.message = `${err.message} Create the target database before restoring.`;
} else if (err.name === 'DatabaseNotEmpty') {
err.message = `${err.message} A target database must be a new and empty database.`;
}
// Didn't exist, or another fatal error, exit
callback(err);
return;
}
it('respects the backup --log command-line parameter', function() {
const filename = 'my2.log';
process.argv = ['node', 'test', '--log', filename];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.log, 'string');
assert.strictEqual(program.log, filename);
});
restoreInternal(
restoreDB,
opts,
srcStream,
ee,
function(err, writer) {
if (err) {
callback(err, null);
return;
}
if (writer != null) {
addEventListener(listenerErrorIndicator, writer, 'restored', function(obj) {
debug(' restored ', obj.total);
ee.emit('restored', { documents: obj.documents, total: obj.total });
});
addEventListener(listenerErrorIndicator, writer, 'error', function(err) {
debug('Error ' + JSON.stringify(err));
// Only call destroy if it is available on the stream
if (srcStream.destroy && srcStream.destroy instanceof Function) {
srcStream.destroy();
}
callback(err);
});
addEventListener(listenerErrorIndicator, writer, 'finished', function(obj) {
debug('restore complete');
ee.emit('finished', { total: obj.total });
callback(null, obj);
});
}
}
);
it('respects the backup --resume command-line parameter', function() {
process.argv = ['node', 'test', '--resume'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.resume, 'boolean');
assert.strictEqual(program.resume, true);
});
return ee;
}
};
/**
* Backup/restore callback
* @callback backupRestoreCallback
* @param {Error} err - Error object if operation failed.
* @param {object} data - summary data for backup/restore
*/
it('respects the backup --output command-line parameter', function() {
const filename = 'myfile2.txt';
process.argv = ['node', 'test', '--output', filename];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.output, 'string');
assert.strictEqual(program.output, filename);
});
it('respects the backup --mode full command-line parameter', function() {
process.argv = ['node', 'test', '--mode', 'full'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, 'full');
});
it('respects the backup --mode shallow command-line parameter', function() {
process.argv = ['node', 'test', '--mode', 'shallow'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, 'shallow');
});
it('respects the backup --quiet command-line parameter', function() {
process.argv = ['node', 'test', '--quiet'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
});
describe('Restore command-line', function() {
it('respects the COUCH_URL env variable if the --url restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, process.env.COUCH_URL);
});
it('respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, process.env.COUCH_DATABASE);
});
it('respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, parseInt(process.env.COUCH_BUFFER_SIZE, 10));
});
it('respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parseInt(process.env.COUCH_PARALLELISM, 10));
});
it('respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, parseInt(process.env.COUCH_REQUEST_TIMEOUT, 10));
});
it('respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, process.env.CLOUDANT_IAM_API_KEY);
});
it('respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
it('respects the restore --url command-line parameter', function() {
const url = 'https://a:b@myurl3.com';
process.argv = ['node', 'test', '--url', url];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, url);
});
it('respects the restore --db command-line parameter', function() {
const db = 'mydb3';
process.argv = ['node', 'test', '--db', db];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, db);
});
it('respects the restore --buffer-size command-line parameter', function() {
const bufferSize = 250;
process.argv = ['node', 'test', '--buffer-size', bufferSize];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, bufferSize);
});
it('respects the restore --parallelism command-line parameter', function() {
const parallelism = 5;
process.argv = ['node', 'test', '--parallelism', parallelism];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parallelism);
});
it('respects the restore --request-timeout command-line parameter', function() {
const requestTimeout = 10000;
process.argv = ['node', 'test', '--request-timeout', requestTimeout];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, requestTimeout);
});
it('respects the restore --iam-api-key command-line parameter', function() {
const key = '123abc-789zyx_CBA987-XYZ321';
process.argv = ['node', 'test', '--iam-api-key', key];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, key);
});
it('respects the restore --quiet command-line parameter', function() {
process.argv = ['node', 'test', '--quiet'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
});
});

@@ -1,3 +0,2 @@

#!/usr/bin/env node
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -15,68 +14,20 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const error = require('../includes/error.js');
const fs = require('fs');
const cliutils = require('../includes/cliutils.js');
const couchbackup = require('../app.js');
const parser = require('../includes/parser.js');
const debug = require('debug');
const backupDebug = debug('couchbackup:backup');
const backupBatchDebug = debug('couchbackup:backup:batch');
const u = require('./citestutils.js');
backupDebug.enabled = true;
describe('Encryption tests', function() {
// Note CLI only to use openssl command
const p = { useApi: false, encryption: true };
try {
const program = parser.parseBackupArgs();
const databaseUrl = cliutils.databaseUrl(program.url, program.db);
const opts = {
bufferSize: program.bufferSize,
log: program.log,
mode: program.mode,
parallelism: program.parallelism,
requestTimeout: program.requestTimeout,
resume: program.resume,
iamApiKey: program.iamApiKey,
iamTokenUrl: program.iamTokenUrl
};
// log configuration to console
console.error('='.repeat(80));
console.error('Performing backup on ' + databaseUrl.replace(/\/\/.+@/g, '//****:****@') + ' using configuration:');
console.error(JSON.stringify(opts, null, 2).replace(/"iamApiKey": "[^"]+"/, '"iamApiKey": "****"'));
console.error('='.repeat(80));
backupBatchDebug.enabled = !program.quiet;
let ws = process.stdout;
// open output file
if (program.output) {
let flags = 'w';
if (program.log && program.resume) {
flags = 'a';
}
const fd = fs.openSync(program.output, flags);
ws = fs.createWriteStream(null, { fd });
}
backupDebug('Fetching all database changes...');
return couchbackup.backup(
databaseUrl,
ws,
opts,
error.terminationCallback
).on('changes', function(batch) {
backupBatchDebug('Total batches received:', batch + 1);
}).on('written', function(obj) {
backupBatchDebug('Written batch ID:', obj.batch, 'Total document revisions written:', obj.total, 'Time:', obj.time);
}).on('error', function(e) {
backupDebug('ERROR', e);
}).on('finished', function(obj) {
backupDebug('Finished - Total document revisions written:', obj.total);
it('should backup and restore animaldb via an encrypted file', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const encryptedBackup = `./${this.fileName}`;
return u.testBackupAndRestoreViaFile(p, 'animaldb', encryptedBackup, this.dbName).then(() => {
return u.assertEncryptedFile(encryptedBackup);
});
});
} catch (err) {
error.terminationCallback(err);
}
});

@@ -1,3 +0,2 @@

#!/usr/bin/env node
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -15,47 +14,437 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global */
'use strict';
const error = require('../includes/error.js');
const cliutils = require('../includes/cliutils.js');
const couchbackup = require('../app.js');
const parser = require('../includes/parser.js');
const assert = require('node:assert');
const { once } = require('node:events');
const fs = require('node:fs');
const { PassThrough } = require('node:stream');
const { pipeline } = require('node:stream/promises');
const { createGzip, createGunzip } = require('node:zlib');
const debug = require('debug');
const restoreDebug = debug('couchbackup:restore');
const restoreBatchDebug = debug('couchbackup:restore:batch');
const { Tail } = require('tail');
const app = require('../app.js');
const dbUrl = require('../includes/cliutils.js').databaseUrl;
const compare = require('./compare.js');
const request = require('../includes/request.js');
const { cliBackup, cliDecrypt, cliEncrypt, cliGzip, cliGunzip, cliRestore } = require('./test_process.js');
const testLogger = debug('couchbackup:test:utils');
restoreDebug.enabled = true;
function scenario(test, params) {
return `${test} ${(params.useApi) ? 'using API' : 'using CLI'}`;
}
try {
const program = parser.parseRestoreArgs();
const databaseUrl = cliutils.databaseUrl(program.url, program.db);
const opts = {
bufferSize: program.bufferSize,
parallelism: program.parallelism,
requestTimeout: program.requestTimeout,
iamApiKey: program.iamApiKey,
iamTokenUrl: program.iamTokenUrl
};
function params() {
const p = {};
for (let i = 0; i < arguments.length; i++) {
Object.assign(p, arguments[i]);
}
return p;
}
// log configuration to console
console.error('='.repeat(80));
console.error('Performing restore on ' + databaseUrl.replace(/\/\/.+@/g, '//****:****@') + ' using configuration:');
console.error(JSON.stringify(opts, null, 2).replace(/"iamApiKey": "[^"]+"/, '"iamApiKey": "****"'));
console.error('='.repeat(80));
// Returns the event emitter for API calls, or the child process for CLI calls
async function testBackup(params, databaseName, outputStream) {
const pipelineStreams = [];
const promises = [];
restoreBatchDebug.enabled = !program.quiet;
// Configure API key if needed
augmentParamsWithApiKey(params);
return couchbackup.restore(
process.stdin, // restore from stdin
databaseUrl,
opts,
error.terminationCallback
).on('restored', function(obj) {
restoreBatchDebug('restored', obj.total);
}).on('error', function(e) {
restoreDebug('ERROR', e);
}).on('finished', function(obj) {
restoreDebug('finished', obj);
let backup;
let backupStream;
let backupPromise;
let tail;
if (params.useApi) {
if (params.useStdOut) {
backupStream = outputStream;
} else {
backupStream = new PassThrough();
}
const backupCallbackPromise = new Promise((resolve, reject) => {
backup = app.backup(
dbUrl(process.env.COUCH_URL, databaseName),
backupStream,
params.opts,
(err, data) => {
if (err) {
testLogger(`API backup callback with ${JSON.stringify(err)}, will reject.`);
reject(err);
} else {
testLogger(`API backup callback with ${JSON.stringify(data)}, will resolve.`);
resolve(data);
}
});
});
const backupFinshedPromise = once(backup, 'finished')
.then((summary) => {
testLogger(`Resolving API backup event promise with ${JSON.stringify(summary)}`);
if (params.resume) {
assertWrittenFewerThan(summary.total, params.exclusiveMaxExpected);
}
})
.catch((err) => {
testLogger(`Rejecting API backup event promise with error ${JSON.stringify(err)}`);
throw err;
});
backupPromise = Promise.all([backupCallbackPromise, backupFinshedPromise])
.then(() => {
testLogger('Both API backup promises resolved.');
});
} else {
backup = cliBackup(databaseName, params);
backupStream = backup.stream;
backupPromise = backup.childProcessPromise;
if (params.abort) {
// Create the log file for abort tests so we can tail it, other tests assert
// the log file is usually created normally by the backup process.
const f = fs.openSync(params.opts.log, 'w');
fs.closeSync(f);
// Use tail to watch the log file for a batch to be completed then abort
tail = new Tail(params.opts.log, { useWatchFile: true, fsWatchOptions: { interval: 500 }, follow: false });
tail.on('line', function(data) {
const matches = data.match(/:d batch\d+/);
if (matches !== null) {
// Turn off the tail.
tail.unwatch();
// Abort the backup
backup.childProcess.kill();
}
});
}
if (params.resume) {
const listenerPromise = new Promise((resolve, reject) => {
const listener = function(data) {
const matches = data.toString().match(/.*Finished - Total document revisions written: (\d+).*/);
if (matches !== null) {
try {
assertWrittenFewerThan(matches[1], params.exclusiveMaxExpected);
resolve();
} catch (err) {
reject(err);
}
process.stderr.removeListener('data', listener);
}
};
backup.childProcess.stderr.on('data', listener);
});
promises.push(listenerPromise);
}
}
promises.push(backupPromise);
if (!params.useStdOut) {
pipelineStreams.push(backupStream);
}
if (params.compression) {
if (params.useApi) {
pipelineStreams.push(createGzip());
} else {
const gzipProcess = cliGzip();
pipelineStreams.push(gzipProcess.stream);
promises.push(gzipProcess.childProcessPromise);
}
}
// Pipe via encryption if requested
if (params.encryption) {
if (params.useApi) {
// Currently only CLI support for testing encryption
return Promise.reject(new Error('Not implemented: cannot test encrypted API backups at this time.'));
} else {
const encryptProcess = cliEncrypt();
pipelineStreams.push(encryptProcess.stream);
promises.push(encryptProcess.childProcessPromise);
}
}
if (!params.useStdOut) {
// Finally add the outputStream to the list we want to pipeline
pipelineStreams.push(outputStream);
// Create the promisified pipeline and add it to the array of promises we'll wait for
promises.unshift(pipeline(pipelineStreams));
}
// Wait for the promises and then assert
return Promise.all(promises)
.then(() => testLogger('All backup promises resolved.'))
.then(() => {
if (params.expectedBackupError) {
return Promise.reject(new Error('Backup passed when it should have failed.'));
}
})
.catch((err) => {
if (params.expectedBackupError || params.abort) {
if (params.useApi) {
assert.strictEqual(err.name, params.expectedBackupError.name, 'The backup should receive the expected error.');
} else {
if (params.abort) {
// The tail should be stopped when we match a line and abort, but if
// something didn't work we need to make sure the tail is stopped
tail.unwatch();
// Assert that the process was aborted as expected
assert.strictEqual(err.signal, 'SIGTERM', `The backup should have terminated with SIGTERM, but was ${err.signal}.`);
} else if (params.expectedBackupError) {
assert.strictEqual(err.code, params.expectedBackupError.code, `The backup exited with unexpected code ${err.code} and signal ${err.signal}.`);
}
}
} else {
return Promise.reject(err);
}
});
}
async function testRestore(params, inputStream, databaseName) {
const pipelineStreams = [inputStream];
const promises = [];
// Configure API key if needed
augmentParamsWithApiKey(params);
let restore;
let restoreStream;
let restorePromise;
if (params.useApi) {
restoreStream = new PassThrough();
const restoreCallbackPromise = new Promise((resolve, reject) => {
restore = app.restore(
restoreStream,
dbUrl(process.env.COUCH_URL, databaseName),
params.opts,
(err, data) => {
if (err) {
testLogger(`API restore callback with ${err}, will reject.`);
reject(err);
} else {
resolve(data);
}
});
});
const restoreFinshedPromise = once(restore, 'finished')
.then((summary) => {
testLogger(`Resolving API restore promise with ${summary}`);
})
.catch((err) => {
testLogger(`Handling API restore error event ${JSON.stringify(err)}`);
if (params.expectedRestoreErrorRecoverable) {
testLogger(`Expecting restore error ${params.expectedRestoreErrorRecoverable.name}`);
assert.strictEqual(err.name, params.expectedRestoreErrorRecoverable.name, 'The restore should receive the expected recoverable error.');
} else {
testLogger(`API restore will reject by throwing error event ${JSON.stringify(err)}`);
return Promise.reject(err);
}
});
restorePromise = Promise.all([restoreCallbackPromise, restoreFinshedPromise]);
} else {
restore = cliRestore(databaseName, params);
restoreStream = restore.stream;
restorePromise = restore.childProcessPromise;
}
promises.push(restorePromise);
// Pipe via decompression if requested
if (params.compression) {
if (params.useApi) {
pipelineStreams.push(createGunzip());
} else {
const gunzipProcess = cliGunzip();
pipelineStreams.push(gunzipProcess.stream);
promises.push(gunzipProcess.childProcessPromise);
}
}
// Pipe via decryption if requested
if (params.encryption) {
if (params.useApi) {
// Currently only CLI support for testing encryption
return Promise.reject(new Error('Not implemented: cannot test encrypted API backups at this time.'));
} else {
const decryptProcess = cliDecrypt();
pipelineStreams.push(decryptProcess.stream);
promises.push(decryptProcess.childProcessPromise);
}
}
// pipeline everything into the restoreStream
pipelineStreams.push(restoreStream);
// Create the promisified pipeline and add it to the array of promises we'll wait for
promises.unshift(pipeline(pipelineStreams));
// Wait for the all the promises to settle and then assert based on the process promise
return Promise.allSettled(promises)
.then(() => { return restorePromise; })
.then((summary) => {
testLogger(`Restore promise resolved with ${summary}.`);
if (params.expectedRestoreError) {
return Promise.reject(new Error('Restore passed when it should have failed.'));
}
})
.catch((err) => {
testLogger(`Restore promise rejected with ${err}.`);
if (params.expectedRestoreError) {
if (params.useApi) {
assert.strictEqual(err.name, params.expectedRestoreError.name, 'The restore should receive the expected error.');
} else {
assert.strictEqual(err.code, params.expectedRestoreError.code, `The restore exited with unexpected code ${err.code} and signal ${err.signal}.`);
}
} else {
return Promise.reject(err);
}
});
}
// Serial backup and restore via a file on disk
async function testBackupAndRestoreViaFile(params, srcDb, backupFile, targetDb) {
return testBackupToFile(params, srcDb, backupFile).then(() => {
return testRestoreFromFile(params, backupFile, targetDb);
});
} catch (err) {
error.terminationCallback(err);
}
async function testBackupToFile(params, srcDb, backupFile) {
// Open the file for appending if this is a resume
const output = fs.createWriteStream(backupFile, { flags: (params.opts && params.opts.resume) ? 'a' : 'w' });
return once(output, 'open')
.then(() => {
return testBackup(params, srcDb, output);
});
}
async function testRestoreFromFile(params, backupFile, targetDb) {
const input = fs.createReadStream(backupFile);
return once(input, 'open')
.then(() => {
return testRestore(params, input, targetDb);
});
}
async function testDirectBackupAndRestore(params, srcDb, targetDb) {
// Allow a 64 MB highWaterMark for the passthrough during testing
const passthrough = new PassThrough({ highWaterMark: 67108864 });
const backup = testBackup(params, srcDb, passthrough);
const restore = testRestore(params, passthrough, targetDb);
return Promise.all([backup, restore]).then(() => {
return dbCompare(srcDb, targetDb);
});
}
async function testBackupAbortResumeRestore(params, srcDb, backupFile, targetDb) {
return Promise.resolve()
.then(() => {
// First backup with an abort
if (params.opts && params.opts.output) {
return testBackup(params, srcDb, new PassThrough());
} else {
return testBackupToFile(params, srcDb, backupFile);
}
}).then(() => {
// Remove the abort parameter and add the resume parameter
delete params.abort;
params.opts.resume = true;
// Resume the backup
if (params.opts && params.opts.output) {
return testBackup(params, srcDb, new PassThrough());
} else {
return testBackupToFile(params, srcDb, backupFile);
}
}).then(() => {
// Restore the backup
return testRestoreFromFile(params, backupFile, targetDb);
}).then(() => {
// Now compare the restored to the original for validation
return dbCompare(srcDb, targetDb);
});
}
async function dbCompare(db1Name, db2Name) {
const client = request.client(process.env.COUCH_BACKEND_URL, {});
return compare.compare(db1Name, db2Name, client.service)
.then(result => {
return assert.strictEqual(result, true, 'The database comparison should succeed, but failed');
});
}
function sortByIdThenRev(o1, o2) {
if (o1._id < o2._id) return -1;
if (o1._id > o2._id) return 1;
if (o1._rev < o2._rev) return -1;
if (o1._rev > o2._rev) return 1;
return 0;
}
function readSortAndDeepEqual(actualContentPath, expectedContentPath) {
const backupContent = JSON.parse(fs.readFileSync(actualContentPath, 'utf8'));
const expectedContent = JSON.parse(fs.readFileSync(expectedContentPath, 'utf8'));
// Array order of the docs is important for equality, but not for backup
backupContent.sort(sortByIdThenRev);
expectedContent.sort(sortByIdThenRev);
// Assert that the backup matches the expected
assert.deepStrictEqual(backupContent, expectedContent);
}
function setTimeout(context, timeout) {
// Increase timeout using TEST_TIMEOUT_MULTIPLIER
const multiplier = (typeof process.env.TEST_TIMEOUT_MULTIPLIER !== 'undefined') ? parseInt(process.env.TEST_TIMEOUT_MULTIPLIER) : 1;
timeout *= multiplier;
// Set the mocha timeout
context.timeout(timeout * 1000);
}
function assertGzipFile(path) {
// 1f 8b is the gzip magic number
const expectedBytes = Buffer.from([0x1f, 0x8b]);
const buffer = Buffer.alloc(2);
const fd = fs.openSync(path, 'r');
// Read the first two bytes
fs.readSync(fd, buffer, 0, 2, 0);
fs.closeSync(fd);
// Assert the magic number corresponds to gz extension
assert.deepStrictEqual(buffer, expectedBytes, 'The backup file should be gz compressed.');
}
function assertEncryptedFile(path) {
// Openssl encrypted files start with Salted
const expectedBytes = Buffer.from('Salted');
const buffer = Buffer.alloc(6);
const fd = fs.openSync(path, 'r');
// Read the first six bytes
fs.readSync(fd, buffer, 0, 6, 0);
fs.closeSync(fd);
// Assert first 6 characters of the file are "Salted"
assert.deepStrictEqual(buffer, expectedBytes, 'The backup file should be encrypted.');
}
function assertWrittenFewerThan(total, number) {
assert(total < number && total > 0, `Saw ${total} but expected between 1 and ${number - 1} documents for the resumed backup.`);
}
function augmentParamsWithApiKey(params) {
if (process.env.COUCHBACKUP_TEST_IAM_API_KEY) {
if (!params.opts) {
params.opts = {};
}
params.opts.iamApiKey = process.env.COUCHBACKUP_TEST_IAM_API_KEY;
params.opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
module.exports = {
scenario,
p: params,
setTimeout,
dbCompare,
readSortAndDeepEqual,
assertGzipFile,
assertEncryptedFile,
testBackup,
testRestore,
testDirectBackupAndRestore,
testBackupToFile,
testRestoreFromFile,
testBackupAndRestoreViaFile,
testBackupAbortResumeRestore
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2022 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -14,68 +14,53 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global beforeEach afterEach */
'use strict';
const async = require('async');
const error = require('./error.js');
const events = require('events');
const { CloudantV1 } = require('@ibm-cloud/cloudant');
const url = new URL((process.env.COUCH_BACKEND_URL) ? process.env.COUCH_BACKEND_URL : 'https://no-couch-backend-url-set.test');
const { BasicAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const authenticator = (url.username) ? new BasicAuthenticator({ username: url.username, password: decodeURIComponent(url.password) }) : new NoAuthAuthenticator();
const serviceOpts = {
authenticator: authenticator
};
const cloudant = new CloudantV1(serviceOpts);
// Remove auth from URL before using for service
cloudant.setServiceUrl(new URL(url.pathname, url.origin).toString());
const uuid = require('uuid').v4;
const fs = require('fs');
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime();
let batch = 0;
let hasErrored = false;
let startKey = null;
let total = 0;
// Mocha hooks that will be at the root context so run for all tests
async.doUntil(
function(callback) {
// Note, include_docs: true is set automatically when using the
// fetch function.
const opts = { db: db.db, limit: options.bufferSize, includeDocs: true };
beforeEach('Create test database', async function() {
// Don't run hook for unit tests, just for CI
if (!this.currentTest.fullTitle().includes('#unit')) {
// Allow 10 seconds to create the DB
this.timeout(10 * 1000);
const unique = uuid();
this.fileName = `${unique}`;
this.dbName = 'couchbackup_test_' + unique;
// To avoid double fetching a document solely for the purposes of getting
// the next ID to use as a startKey for the next page we instead use the
// last ID of the current page and append the lowest unicode sort
// character.
if (startKey) opts.startKey = `${startKey}\0`;
db.service.postAllDocs(opts).then(response => {
const body = response.result;
if (!body.rows) {
ee.emit('error', new error.BackupError(
'AllDocsError', 'ERROR: Invalid all docs response'));
callback();
} else {
if (body.rows.length < opts.limit) {
startKey = null; // last batch
} else {
startKey = body.rows[opts.limit - 1].id;
}
return cloudant.putDatabase({ db: this.dbName });
}
});
const docs = [];
body.rows.forEach(function(doc) {
docs.push(doc.doc);
});
afterEach('Delete test database', async function() {
// Don't run hook for unit tests, just for CI
if (!this.currentTest.fullTitle().includes('#unit')) {
// Allow 10 seconds to delete the DB
this.timeout(10 * 1000);
deleteIfExists(this.fileName);
deleteIfExists(`${this.fileName}.log`);
return cloudant.deleteDatabase({ db: this.dbName });
}
});
if (docs.length > 0) {
ee.emit('received', {
batch: batch++,
data: docs,
length: docs.length,
time: (new Date().getTime() - start) / 1000,
total: total += docs.length
});
}
callback();
}
}).catch(err => {
err = error.convertResponseError(err);
ee.emit('error', err);
hasErrored = true;
callback();
});
},
function(callback) { callback(null, hasErrored || startKey == null); },
function() { ee.emit('finished', { total: total }); }
);
return ee;
};
function deleteIfExists(fileName) {
fs.unlink(fileName, function(err) {
if (err) {
if (err.code !== 'ENOENT') {
console.error(`${err.code} ${err.message}`);
}
}
});
}

@@ -16,150 +16,104 @@ // Copyright © 2017, 2021 IBM Corp. All rights reserved.

const async = require('async');
const stream = require('stream');
const error = require('./error.js');
const debug = require('debug')('couchbackup:writer');
const path = require('path');
const tmp = require('tmp');
module.exports = function(db, bufferSize, parallelism, ee) {
const writer = new stream.Transform({ objectMode: true });
let buffer = [];
let written = 0;
let linenumber = 0;
/**
Return API default settings.
*/
function apiDefaults() {
return {
parallelism: 5,
bufferSize: 500,
requestTimeout: 120000,
log: tmp.tmpNameSync(),
resume: false,
mode: 'full'
};
}
// this is the queue of chunks that are written to the database
// the queue's payload will be an array of documents to be written,
// the size of the array will be bufferSize. The variable parallelism
// determines how many HTTP requests will occur at any one time.
const q = async.queue(function(payload, cb) {
// if we are restoring known revisions, we need to supply new_edits=false
if (payload.docs && payload.docs[0] && payload.docs[0]._rev) {
payload.new_edits = false;
debug('Using new_edits false mode.');
}
/**
Return CLI default settings.
*/
function cliDefaults() {
const defaults = apiDefaults();
if (!didError) {
db.service.postBulkDocs({
db: db.db,
bulkDocs: payload
}).then(response => {
if (!response.result || (payload.new_edits === false && response.result.length > 0)) {
throw new Error(`Error writing batch with new_edits:${payload.new_edits !== false}` +
` and ${response.result ? response.result.length : 'unavailable'} items`);
}
written += payload.docs.length;
writer.emit('restored', { documents: payload.docs.length, total: written });
cb();
}).catch(err => {
err = error.convertResponseError(err);
debug(`Error writing docs ${err.name} ${err.message}`);
cb(err, payload);
});
}
}, parallelism);
// add additional legacy settings
defaults.db = 'test';
defaults.url = 'http://localhost:5984';
let didError = false;
// add CLI only option
defaults.quiet = false;
// write the contents of the buffer to CouchDB in blocks of bufferSize
function processBuffer(flush, callback) {
function taskCallback(err, payload) {
if (err && !didError) {
debug(`Queue task failed with error ${err.name}`);
didError = true;
q.kill();
writer.emit('error', err);
}
}
return defaults;
}
if (flush || buffer.length >= bufferSize) {
// work through the buffer to break off bufferSize chunks
// and feed the chunks to the queue
do {
// split the buffer into bufferSize chunks
const toSend = buffer.splice(0, bufferSize);
/**
Override settings **in-place** with environment variables.
*/
function applyEnvironmentVariables(opts) {
// if we have a custom CouchDB url
if (typeof process.env.COUCH_URL !== 'undefined') {
opts.url = process.env.COUCH_URL;
}
// and add the chunk to the queue
debug(`Adding ${toSend.length} to the write queue.`);
q.push({ docs: toSend }, taskCallback);
} while (buffer.length >= bufferSize);
// if we have a specified databases
if (typeof process.env.COUCH_DATABASE !== 'undefined') {
opts.db = process.env.COUCH_DATABASE;
}
// send any leftover documents to the queue
if (flush && buffer.length > 0) {
debug(`Adding remaining ${buffer.length} to the write queue.`);
q.push({ docs: buffer }, taskCallback);
}
// if we have a specified buffer size
if (typeof process.env.COUCH_BUFFER_SIZE !== 'undefined') {
opts.bufferSize = parseInt(process.env.COUCH_BUFFER_SIZE);
}
// wait until the queue size falls to a reasonable level
async.until(
// wait until the queue length drops to twice the paralellism
// or until empty on the last write
function(callback) {
// if we encountered an error, stop this until loop
if (didError) {
return callback(null, true);
}
if (flush) {
callback(null, q.idle() && q.length() === 0);
} else {
callback(null, q.length() <= parallelism * 2);
}
},
function(cb) {
setTimeout(cb, 20);
},
// if we have a specified parallelism
if (typeof process.env.COUCH_PARALLELISM !== 'undefined') {
opts.parallelism = parseInt(process.env.COUCH_PARALLELISM);
}
function() {
if (flush && !didError) {
writer.emit('finished', { total: written });
}
// callback when we're happy with the queue size
callback();
});
} else {
callback();
}
// if we have a specified request timeout
if (typeof process.env.COUCH_REQUEST_TIMEOUT !== 'undefined') {
opts.requestTimeout = parseInt(process.env.COUCH_REQUEST_TIMEOUT);
}
// take an object
writer._transform = function(obj, encoding, done) {
// each obj that arrives here is a line from the backup file
// it should contain an array of objects. The length of the array
// depends on the bufferSize at backup time.
linenumber++;
if (!didError && obj !== '') {
// see if it parses as JSON
try {
const arr = JSON.parse(obj);
// if we have a specified log file
if (typeof process.env.COUCH_LOG !== 'undefined') {
opts.log = path.normalize(process.env.COUCH_LOG);
}
// if it's an array with a length
if (typeof arr === 'object' && arr.length > 0) {
// push each document into a buffer
buffer = buffer.concat(arr);
// if we are instructed to resume
if (typeof process.env.COUCH_RESUME !== 'undefined' && process.env.COUCH_RESUME === 'true') {
opts.resume = true;
}
// pause the stream
// it's likely that the speed with which data can be read from disk
// may exceed the rate it can be written to CouchDB. To prevent
// the whole file being buffered in memory, we pause the stream here.
// it is resumed, when processBuffer calls back and we call done()
this.pause();
// if we are given an output filename
if (typeof process.env.COUCH_OUTPUT !== 'undefined') {
opts.output = path.normalize(process.env.COUCH_OUTPUT);
}
// break the buffer in to bufferSize chunks to be written to the database
processBuffer(false, done);
} else {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - not an array`));
done();
}
} catch (e) {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - cannot parse as JSON`));
// Could be an incomplete write that was subsequently resumed
done();
}
} else {
done();
}
};
// if we only want a shallow copy
if (typeof process.env.COUCH_MODE !== 'undefined' && process.env.COUCH_MODE === 'shallow') {
opts.mode = 'shallow';
}
// called when we need to flush everything
writer._flush = function(done) {
processBuffer(true, done);
};
return writer;
// if we are instructed to be quiet
if (typeof process.env.COUCH_QUIET !== 'undefined' && process.env.COUCH_QUIET === 'true') {
opts.quiet = true;
}
// if we have a specified API key
if (typeof process.env.CLOUDANT_IAM_API_KEY !== 'undefined') {
opts.iamApiKey = process.env.CLOUDANT_IAM_API_KEY;
}
// if we have a specified IAM token endpoint
if (typeof process.env.CLOUDANT_IAM_TOKEN_URL !== 'undefined') {
opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
module.exports = {
apiDefaults: apiDefaults,
cliDefaults: cliDefaults,
applyEnvironmentVariables: applyEnvironmentVariables
};

@@ -16,27 +16,78 @@ // Copyright © 2017 IBM Corp. All rights reserved.

// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const fs = require('fs');
const stream = require('stream');
const liner = require('./liner.js');
module.exports = function(onChange) {
const onLine = function(onCommand, getDocs) {
const change = new stream.Transform({ objectMode: true });
change._transform = function(line, encoding, done) {
let obj = null;
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
// one change per line - remove the trailing comma
line = line.trim().replace(/,$/, '');
let matches;
// extract thee last_seq at the end of the changes feed
if (line.match(/^"last_seq":/)) {
line = '{' + line;
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
// extract doc ids
if (getDocs && obj.command === 't') {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
}
onCommand(obj);
}
try {
obj = JSON.parse(line);
} catch (e) {
}
onChange(obj);
done();
};
return change;
};
/**
* Generate a list of remaining batches from a download file.
*
* @param {string} log - log file name
* @param {function} callback - callback with err, {changesComplete: N, batches: N}.
* changesComplete signifies whether the log file appeared to
* have completed reading the changes feed (contains :changes_complete).
* batches are remaining batch IDs for download.
*/
module.exports = function(log, callback) {
// our sense of state
const state = {
};
let changesComplete = false;
// called with each line from the log file
const onCommand = function(obj) {
if (obj.command === 't') {
state[obj.batch] = true;
} else if (obj.command === 'd') {
delete state[obj.batch];
} else if (obj.command === 'changes_complete') {
changesComplete = true;
}
};
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, false))
.on('finish', function() {
const obj = { changesComplete: changesComplete, batches: state };
callback(null, obj);
});
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2018 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -16,17 +16,150 @@ // Licensed under the Apache License, Version 2.0 (the "License");

module.exports = function(db, options, readstream, ee, callback) {
const liner = require('../includes/liner.js')();
const writer = require('../includes/writer.js')(db, options.bufferSize, options.parallelism, ee);
const async = require('async');
const stream = require('stream');
const error = require('./error.js');
const debug = require('debug')('couchbackup:writer');
// pipe the input to the output, via transformation functions
readstream
.pipe(liner) // transform the input stream into per-line
.on('error', function(err) {
// Forward the error to the writer event emitter where we already have
// listeners on for handling errors
writer.emit('error', err);
})
.pipe(writer); // transform the data
module.exports = function(db, bufferSize, parallelism, ee) {
const writer = new stream.Transform({ objectMode: true });
let buffer = [];
let written = 0;
let linenumber = 0;
callback(null, writer);
// this is the queue of chunks that are written to the database
// the queue's payload will be an array of documents to be written,
// the size of the array will be bufferSize. The variable parallelism
// determines how many HTTP requests will occur at any one time.
const q = async.queue(function(payload, cb) {
// if we are restoring known revisions, we need to supply new_edits=false
if (payload.docs && payload.docs[0] && payload.docs[0]._rev) {
payload.new_edits = false;
debug('Using new_edits false mode.');
}
if (!didError) {
db.service.postBulkDocs({
db: db.db,
bulkDocs: payload
}).then(response => {
if (!response.result || (payload.new_edits === false && response.result.length > 0)) {
throw new Error(`Error writing batch with new_edits:${payload.new_edits !== false}` +
` and ${response.result ? response.result.length : 'unavailable'} items`);
}
written += payload.docs.length;
writer.emit('restored', { documents: payload.docs.length, total: written });
cb();
}).catch(err => {
err = error.convertResponseError(err);
debug(`Error writing docs ${err.name} ${err.message}`);
cb(err, payload);
});
}
}, parallelism);
let didError = false;
// write the contents of the buffer to CouchDB in blocks of bufferSize
function processBuffer(flush, callback) {
function taskCallback(err, payload) {
if (err && !didError) {
debug(`Queue task failed with error ${err.name}`);
didError = true;
q.kill();
writer.emit('error', err);
}
}
if (flush || buffer.length >= bufferSize) {
// work through the buffer to break off bufferSize chunks
// and feed the chunks to the queue
do {
// split the buffer into bufferSize chunks
const toSend = buffer.splice(0, bufferSize);
// and add the chunk to the queue
debug(`Adding ${toSend.length} to the write queue.`);
q.push({ docs: toSend }, taskCallback);
} while (buffer.length >= bufferSize);
// send any leftover documents to the queue
if (flush && buffer.length > 0) {
debug(`Adding remaining ${buffer.length} to the write queue.`);
q.push({ docs: buffer }, taskCallback);
}
// wait until the queue size falls to a reasonable level
async.until(
// wait until the queue length drops to twice the paralellism
// or until empty on the last write
function(callback) {
// if we encountered an error, stop this until loop
if (didError) {
return callback(null, true);
}
if (flush) {
callback(null, q.idle() && q.length() === 0);
} else {
callback(null, q.length() <= parallelism * 2);
}
},
function(cb) {
setTimeout(cb, 20);
},
function() {
if (flush && !didError) {
writer.emit('finished', { total: written });
}
// callback when we're happy with the queue size
callback();
});
} else {
callback();
}
}
// take an object
writer._transform = function(obj, encoding, done) {
// each obj that arrives here is a line from the backup file
// it should contain an array of objects. The length of the array
// depends on the bufferSize at backup time.
linenumber++;
if (!didError && obj !== '') {
// see if it parses as JSON
try {
const arr = JSON.parse(obj);
// if it's an array with a length
if (typeof arr === 'object' && arr.length > 0) {
// push each document into a buffer
buffer = buffer.concat(arr);
// pause the stream
// it's likely that the speed with which data can be read from disk
// may exceed the rate it can be written to CouchDB. To prevent
// the whole file being buffered in memory, we pause the stream here.
// it is resumed, when processBuffer calls back and we call done()
this.pause();
// break the buffer in to bufferSize chunks to be written to the database
processBuffer(false, done);
} else {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - not an array`));
done();
}
} catch (e) {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - cannot parse as JSON`));
// Could be an incomplete write that was subsequently resumed
done();
}
} else {
done();
}
};
// called when we need to flush everything
writer._flush = function(done) {
processBuffer(true, done);
};
return writer;
};

@@ -1,2 +0,3 @@

// Copyright © 2017, 2018 IBM Corp. All rights reserved.
#!/usr/bin/env node
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,163 +15,68 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
// Small script which backs up a Cloudant or CouchDB database to an S3
// bucket via a stream rather than on-disk file.
//
// The script generates the backup object name by combining together the path
// part of the database URL and the current time.
'use strict';
const stream = require('stream');
const url = require('url');
const error = require('../includes/error.js');
const fs = require('fs');
const cliutils = require('../includes/cliutils.js');
const couchbackup = require('../app.js');
const parser = require('../includes/parser.js');
const debug = require('debug');
const backupDebug = debug('couchbackup:backup');
const backupBatchDebug = debug('couchbackup:backup:batch');
const AWS = require('aws-sdk');
const couchbackup = require('@cloudant/couchbackup');
const debug = require('debug')('s3-backup');
const VError = require('verror').VError;
backupDebug.enabled = true;
/*
Main function, run from base of file.
*/
function main() {
const argv = require('yargs')
.usage('Usage: $0 [options]')
.example('$0 -s https://user:pass@host/db -b <bucket>', 'Backup db to bucket')
.options({
source: { alias: 's', nargs: 1, demandOption: true, describe: 'Source database URL' },
bucket: { alias: 'b', nargs: 1, demandOption: true, describe: 'Destination bucket' },
prefix: { alias: 'p', nargs: 1, describe: 'Prefix for backup object key', default: 'couchbackup' },
s3url: { nargs: 1, describe: 'S3 endpoint URL' },
awsprofile: { nargs: 1, describe: 'The profile section to use in the ~/.aws/credentials file', default: 'default' }
})
.help('h').alias('h', 'help')
.epilog('Copyright (C) IBM 2017')
.argv;
try {
const program = parser.parseBackupArgs();
const databaseUrl = cliutils.databaseUrl(program.url, program.db);
const sourceUrl = argv.source;
const backupName = new url.URL(sourceUrl).pathname.split('/').filter(function(x) { return x; }).join('-');
const backupBucket = argv.bucket;
const backupKeyPrefix = `${argv.prefix}-${backupName}`;
const shallow = argv.shallow;
const backupKey = `${backupKeyPrefix}-${new Date().toISOString()}`;
const s3Endpoint = argv.s3url;
const awsProfile = argv.awsprofile;
// Creds are from ~/.aws/credentials, environment etc. (see S3 docs).
const awsOpts = {
signatureVersion: 'v4',
credentials: new AWS.SharedIniFileCredentials({ profile: awsProfile })
const opts = {
bufferSize: program.bufferSize,
log: program.log,
mode: program.mode,
parallelism: program.parallelism,
requestTimeout: program.requestTimeout,
resume: program.resume,
iamApiKey: program.iamApiKey,
iamTokenUrl: program.iamTokenUrl
};
if (typeof s3Endpoint !== 'undefined') {
awsOpts.endpoint = new AWS.Endpoint(s3Endpoint);
}
const s3 = new AWS.S3(awsOpts);
debug(`Creating a new backup of ${s(sourceUrl)} at ${backupBucket}/${backupKey}...`);
bucketAccessible(s3, backupBucket)
.then(() => {
return backupToS3(sourceUrl, s3, backupBucket, backupKey, shallow);
})
.then(() => {
debug('done.');
})
.catch((reason) => {
debug(`Error: ${reason}`);
process.exit(1);
});
}
// log configuration to console
console.error('='.repeat(80));
console.error('Performing backup on ' + databaseUrl.replace(/\/\/.+@/g, '//****:****@') + ' using configuration:');
console.error(JSON.stringify(opts, null, 2).replace(/"iamApiKey": "[^"]+"/, '"iamApiKey": "****"'));
console.error('='.repeat(80));
/**
* Return a promise that resolves if the bucket is available and
* rejects if not.
*
* @param {any} s3 S3 client object
* @param {any} bucketName Bucket name
* @returns Promise
*/
function bucketAccessible(s3, bucketName) {
return new Promise(function(resolve, reject) {
const params = {
Bucket: bucketName
};
s3.headBucket(params, function(err, data) {
if (err) {
reject(new VError(err, 'S3 bucket not accessible'));
} else {
resolve();
}
});
});
}
backupBatchDebug.enabled = !program.quiet;
/**
* Backup directly from Cloudant to an object store object via a stream.
*
* @param {any} sourceUrl URL of database
* @param {any} s3Client Object store client
* @param {any} s3Bucket Backup destination bucket
* @param {any} s3Key Backup destination key name (shouldn't exist)
* @param {any} shallow Whether to use the couchbackup `shallow` mode
* @returns Promise
*/
function backupToS3(sourceUrl, s3Client, s3Bucket, s3Key, shallow) {
return new Promise((resolve, reject) => {
debug(`Setting up S3 upload to ${s3Bucket}/${s3Key}`);
let ws = process.stdout;
// A pass through stream that has couchbackup's output
// written to it and it then read by the S3 upload client.
// It has a 64MB highwater mark to allow for fairly
// uneven network connectivity.
const streamToUpload = new stream.PassThrough({ highWaterMark: 67108864 });
// open output file
if (program.output) {
let flags = 'w';
if (program.log && program.resume) {
flags = 'a';
}
const fd = fs.openSync(program.output, flags);
ws = fs.createWriteStream(null, { fd });
}
// Set up S3 upload.
const params = {
Bucket: s3Bucket,
Key: s3Key,
Body: streamToUpload
};
s3Client.upload(params, function(err, data) {
debug('Object store upload done');
if (err) {
debug(err);
reject(new VError(err, 'Object store upload failed'));
return;
}
debug('Object store upload succeeded');
debug(data);
resolve();
}).httpUploadProgress = (progress) => {
debug(`Object store upload progress: ${progress}`);
};
backupDebug('Fetching all database changes...');
debug(`Starting streaming data from ${s(sourceUrl)}`);
couchbackup.backup(
sourceUrl,
streamToUpload,
(err, obj) => {
if (err) {
debug(err);
reject(new VError(err, 'CouchBackup failed with an error'));
return;
}
debug(`Download from ${s(sourceUrl)} complete.`);
streamToUpload.end(); // must call end() to complete upload.
// resolve() is called by the upload
}
);
return couchbackup.backup(
databaseUrl,
ws,
opts,
error.terminationCallback
).on('changes', function(batch) {
backupBatchDebug('Total batches received:', batch + 1);
}).on('written', function(obj) {
backupBatchDebug('Written batch ID:', obj.batch, 'Total document revisions written:', obj.total, 'Time:', obj.time);
}).on('error', function(e) {
backupDebug('ERROR', e);
}).on('finished', function(obj) {
backupDebug('Finished - Total document revisions written:', obj.total);
});
} catch (err) {
error.terminationCallback(err);
}
/**
* Remove creds from a URL, e.g., before logging
*
* @param {string} url URL to safen
*/
function s(originalUrl) {
const parts = new url.URL(originalUrl);
return url.format(parts, { auth: false });
}
main();

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -16,99 +16,61 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// fatal errors
const codes = {
Error: 1,
InvalidOption: 2,
DatabaseNotFound: 10,
Unauthorized: 11,
Forbidden: 12,
DatabaseNotEmpty: 13,
NoLogFileName: 20,
LogDoesNotExist: 21,
IncompleteChangesInLogFile: 22,
SpoolChangesError: 30,
HTTPFatalError: 40,
BulkGetError: 50
};
const fs = require('fs');
const stream = require('stream');
const liner = require('./liner.js');
class BackupError extends Error {
constructor(name, message) {
super(message);
this.name = name;
}
}
const onLine = function(onCommand, batches) {
const change = new stream.Transform({ objectMode: true });
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
class HTTPError extends BackupError {
constructor(responseError, name) {
// Special case some names for more useful error messages
switch (responseError.status) {
case 401:
name = 'Unauthorized';
break;
case 403:
name = 'Forbidden';
break;
default:
name = name || 'HTTPFatalError';
}
super(name, responseError.message);
}
}
let matches;
// Default function to return an error for HTTP status codes
// < 400 -> OK
// 4XX (except 429) -> Fatal
// 429 & >=500 -> Transient
function checkResponse(err) {
if (err) {
// Construct an HTTPError if there is request information on the error
// Codes < 400 are considered OK
if (err.status >= 400) {
return new HTTPError(err);
} else {
// Send it back again if there was no status code, e.g. a cxn error
return augmentMessage(err);
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
// if this is one we want
if (obj.command === 't' && batches.indexOf(obj.batch) > -1) {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
onCommand(obj);
}
}
}
}
done();
};
return change;
};
function convertResponseError(responseError, errorFactory) {
if (!errorFactory) {
errorFactory = checkResponse;
}
return errorFactory(responseError);
}
module.exports = function(log, batches, callback) {
// our sense of state
const retval = { };
function augmentMessage(err) {
// For errors that don't have a status code, we are likely looking at a cxn
// error.
// Try to augment the message with more detail (core puts the code in statusText)
if (err && err.statusText) {
err.message = `${err.message} ${err.statusText}`;
}
if (err && err.description) {
err.message = `${err.message} ${err.description}`;
}
return err;
}
// called with each line from the log file
const onCommand = function(obj) {
retval[obj.batch] = obj;
};
function wrapPossibleInvalidUrlError(err) {
if (err.code === 'ERR_INVALID_URL') {
// Wrap ERR_INVALID_URL in our own InvalidOption
return new BackupError('InvalidOption', err.message);
}
return err;
}
module.exports = {
BackupError,
HTTPError,
wrapPossibleInvalidUrlError,
convertResponseError,
terminationCallback: function terminationCallback(err, data) {
if (err) {
console.error(`ERROR: ${err.message}`);
process.exitCode = codes[err.name] || 1;
process.exit();
}
}
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, batches))
.on('error', function(err) {
callback(err);
})
.on('finish', function() {
callback(null, retval);
});
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//

@@ -16,267 +16,45 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const async = require('async');
const events = require('events');
const fs = require('fs');
const error = require('./error.js');
const spoolchanges = require('./spoolchanges.js');
const logfilesummary = require('./logfilesummary.js');
const logfilegetbatches = require('./logfilegetbatches.js');
/**
* Read documents from a database to be backed up.
*
* @param {string} db - `@cloudant/cloudant` DB object for source database.
* @param {number} blocksize - number of documents to download in single request
* @param {number} parallelism - number of concurrent downloads
* @param {string} log - path to log file to use
* @param {boolean} resume - whether to resume from an existing log file
* @returns EventEmitter with following events:
* - `received` - called with a block of documents to write to backup
* - `error` - on error
* - `finished` - when backup process is finished (either complete or errored)
* Utility methods for the command line interface.
* @module cliutils
* @see module:cliutils
*/
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime(); // backup start time
const batchesPerDownloadSession = 50; // max batches to read from log file for download at a time (prevent OOM)
function proceedWithBackup() {
if (options.resume) {
// pick up from existing log file from previous run
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
} else {
// create new log file and process
spoolchanges(db, options.log, options.bufferSize, ee, function(err) {
if (err) {
ee.emit('error', err);
} else {
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
}
});
}
}
const url = require('url');
const error = require('./error.js');
validateBulkGetSupport(db, function(err) {
if (err) {
return ee.emit('error', err);
} else {
proceedWithBackup();
}
});
module.exports = {
return ee;
};
/**
* Validate /_bulk_get support for a specified database.
*
* @param {string} db - nodejs-cloudant db
* @param {function} callback - called on completion with signature (err)
*/
function validateBulkGetSupport(db, callback) {
db.service.postBulkGet({ db: db.db, docs: [] }).then(() => { callback(); }).catch(err => {
err = error.convertResponseError(err, function(err) {
switch (err.status) {
case undefined:
// There was no status code on the error
return err;
case 404:
return new error.BackupError('BulkGetError', 'Database does not support /_bulk_get endpoint');
default:
return new error.HTTPError(err);
}
});
callback(err);
});
}
/**
* Download remaining batches in a log file, splitting batches into sets
* to avoid enqueueing too many in one go.
*
* @param {string} log - log file name to maintain download state
* @param {string} db - nodejs-cloudant db
* @param {events.EventEmitter} ee - event emitter to emit received events on
* @param {time} startTime - start time for backup process
* @param {number} batchesPerDownloadSession - max batches to enqueue for
* download at a time. As batches contain many doc IDs, this helps avoid
* exhausting memory.
* @param {number} parallelism - number of concurrent downloads
* @returns function to call do download remaining batches with signature
* (err, {batches: batch, docs: doccount}) {@see spoolchanges}.
*/
function downloadRemainingBatches(log, db, ee, startTime, batchesPerDownloadSession, parallelism) {
let total = 0; // running total of documents downloaded so far
let noRemainingBatches = false;
// Generate a set of batches (up to batchesPerDownloadSession) to download from the
// log file and download them. Set noRemainingBatches to `true` for last batch.
function downloadSingleBatchSet(done) {
// Fetch the doc IDs for the batches in the current set to
// download them.
function batchSetComplete(err, data) {
if (!err) {
total = data.total;
}
done(err);
/**
* Combine a base URL and a database name, ensuring at least single slash
* between root and database name. This allows users to have Couch behind
* proxies that mount Couch's / endpoint at some other mount point.
* @param {string} root - root URL
* @param {string} databaseName - database name
* @return concatenated URL.
*
* @private
*/
databaseUrl: function databaseUrl(root, databaseName) {
if (!root.endsWith('/')) {
root = root + '/';
}
function processRetrievedBatches(err, batches) {
if (!err) {
// process them in parallelised queue
processBatchSet(db, parallelism, log, batches, ee, startTime, total, batchSetComplete);
} else {
batchSetComplete(err);
}
try {
return new url.URL(encodeURIComponent(databaseName), root).toString();
} catch (err) {
throw error.wrapPossibleInvalidUrlError(err);
}
},
readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, function(err, batchSetIds) {
if (err) {
ee.emit('error', err);
// Stop processing changes file for fatal errors
noRemainingBatches = true;
done();
} else {
if (batchSetIds.length === 0) {
noRemainingBatches = true;
return done();
}
logfilegetbatches(log, batchSetIds, processRetrievedBatches);
}
});
/**
* Generate CLI argument usage text.
*
* @param {string} description - argument description.
* @param {string} defaultValue - default argument value.
*
* @private
*/
getUsage: function getUsage(description, defaultValue) {
return `${description} ${defaultValue !== undefined ? ` (default: ${defaultValue})` : ''}`;
}
// Return true if all batches in log file have been downloaded
function isFinished(callback) { callback(null, noRemainingBatches); }
function onComplete() {
ee.emit('finished', { total: total });
}
async.doUntil(downloadSingleBatchSet, isFinished, onComplete);
}
/**
* Return a set of uncompleted download batch IDs from the log file.
*
* @param {string} log - log file path
* @param {number} batchesPerDownloadSession - maximum IDs to return
* @param {function} callback - sign (err, batchSetIds array)
*/
function readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, callback) {
logfilesummary(log, function processSummary(err, summary) {
if (!err) {
if (!summary.changesComplete) {
callback(new error.BackupError('IncompleteChangesInLogFile',
'WARNING: Changes did not finish spooling'));
return;
}
if (Object.keys(summary.batches).length === 0) {
return callback(null, []);
}
// batch IDs are the property names of summary.batches
const batchSetIds = getPropertyNames(summary.batches, batchesPerDownloadSession);
callback(null, batchSetIds);
} else {
callback(err);
}
});
}
/**
* Download a set of batches retrieved from a log file. When a download is
* complete, add a line to the logfile indicating such.
*
* @param {any} db - nodejs-cloudant database
* @param {any} parallelism - number of concurrent requests to make
* @param {any} log - log file to drive downloads from
* @param {any} batches - batches to download
* @param {any} ee - event emitter for progress. This funciton emits
* received and error events.
* @param {any} start - time backup started, to report deltas
* @param {any} grandtotal - count of documents downloaded prior to this set
* of batches
* @param {any} callback - completion callback, (err, {total: number}).
*/
function processBatchSet(db, parallelism, log, batches, ee, start, grandtotal, callback) {
let hasErrored = false;
let total = grandtotal;
// queue to process the fetch requests in an orderly fashion using _bulk_get
const q = async.queue(function(payload, done) {
const output = [];
const thisBatch = payload.batch;
delete payload.batch;
delete payload.command;
function logCompletedBatch(batch) {
if (log) {
fs.appendFile(log, ':d batch' + thisBatch + '\n', done);
} else {
done();
}
}
// do the /db/_bulk_get request
db.service.postBulkGet({
db: db.db,
revs: true,
docs: payload.docs
}).then(response => {
// create an output array with the docs returned
response.result.results.forEach(function(d) {
if (d.docs) {
d.docs.forEach(function(doc) {
if (doc.ok) {
output.push(doc.ok);
}
});
}
});
total += output.length;
const t = (new Date().getTime() - start) / 1000;
ee.emit('received', {
batch: thisBatch,
data: output,
length: output.length,
time: t,
total: total
}, q, logCompletedBatch);
}).catch(err => {
if (!hasErrored) {
hasErrored = true;
err = error.convertResponseError(err);
// Kill the queue for fatal errors
q.kill();
ee.emit('error', err);
}
done();
});
}, parallelism);
for (const i in batches) {
q.push(batches[i]);
}
q.drain(function() {
callback(null, { total: total });
});
}
/**
* Returns first N properties on an object.
*
* @param {object} obj - object with properties
* @param {number} count - number of properties to return
*/
function getPropertyNames(obj, count) {
// decide which batch numbers to deal with
const batchestofetch = [];
let j = 0;
for (const i in obj) {
batchestofetch.push(parseInt(i));
j++;
if (j >= count) break;
}
return batchestofetch;
}
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -16,164 +16,32 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const pkg = require('../package.json');
// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
const { CloudantV1, CouchdbSessionAuthenticator } = require('@ibm-cloud/cloudant');
const { IamAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const retryPlugin = require('retry-axios');
const userAgent = 'couchbackup-cloudant/' + pkg.version + ' (Node.js ' +
process.version + ')';
module.exports = function() {
const liner = new stream.Transform({ objectMode: true });
// Class for streaming _changes error responses into
// In general the response is a small error/reason JSON object
// so it is OK to have this in memory.
class ResponseWriteable extends stream.Writable {
constructor(options) {
super(options);
this.data = [];
}
liner._transform = function(chunk, encoding, done) {
let data = chunk.toString();
if (this._lastLineData) {
data = this._lastLineData + data;
}
_write(chunk, encoding, callback) {
this.data.push(chunk);
callback();
}
const lines = data.split('\n');
this._lastLineData = lines.splice(lines.length - 1, 1)[0];
stringBody() {
return Buffer.concat(this.data).toString();
}
}
// An interceptor function to help augment error bodies with a little
// extra information so we can continue to use consistent messaging
// after the ugprade to @ibm-cloud/cloudant
const errorHelper = async function(err) {
let method;
let requestUrl;
if (err.response) {
if (err.response.config.url) {
requestUrl = err.response.config.url;
method = err.response.config.method;
for (const i in lines) {
this.push(lines[i]);
}
// Override the status text with an improved message
let errorMsg = `${err.response.status} ${err.response.statusText || ''}: ` +
`${method} ${requestUrl}`;
if (err.response.data) {
// Check if we have a JSON response and try to get the error/reason
if (err.response.headers['content-type'] === 'application/json') {
if (!err.response.data.error && err.response.data.pipe) {
// If we didn't find a JSON object with `error` then we might have a stream response.
// Detect the stream by the presence of `pipe` and use it to get the body and parse
// the error information.
const p = new Promise((resolve, reject) => {
const errorBody = new ResponseWriteable();
err.response.data.pipe(errorBody)
.on('finish', () => { resolve(JSON.parse(errorBody.stringBody())); })
.on('error', () => { reject(err); });
});
// Replace the stream on the response with the parsed object
err.response.data = await p;
}
// Append the error/reason if available
if (err.response.data.error) {
// Override the status text with our more complete message
errorMsg += ` - Error: ${err.response.data.error}`;
if (err.response.data.reason) {
errorMsg += `, Reason: ${err.response.data.reason}`;
}
}
} else {
errorMsg += err.response.data;
}
// Set a new message for use by the node-sdk-core
// We use the errors array because it gets processed
// ahead of all other service errors.
err.response.data.errors = [{ message: errorMsg }];
}
} else if (err.request) {
if (!err.message.includes(err.config.url)) {
// Augment the message with the URL and method
// but don't do it again if we already have the URL.
err.message = `${err.message}: ${err.config.method} ${err.config.url}`;
}
}
return Promise.reject(err);
};
done();
};
module.exports = {
client: function(rawUrl, opts) {
const url = new URL(rawUrl);
// Split the URL to separate service from database
// Use origin as the "base" to remove auth elements
const actUrl = new URL(url.pathname.substring(0, url.pathname.lastIndexOf('/')), url.origin);
const dbName = url.pathname.substring(url.pathname.lastIndexOf('/') + 1);
let authenticator;
// Default to cookieauth unless an IAM key is provided
if (opts.iamApiKey) {
const iamAuthOpts = { apikey: opts.iamApiKey };
if (opts.iamTokenUrl) {
iamAuthOpts.url = opts.iamTokenUrl;
}
authenticator = new IamAuthenticator(iamAuthOpts);
} else if (url.username) {
authenticator = new CouchdbSessionAuthenticator({
username: decodeURIComponent(url.username),
password: decodeURIComponent(url.password)
});
} else {
authenticator = new NoAuthAuthenticator();
liner._flush = function(done) {
if (this._lastLineData) {
this.push(this._lastLineData);
}
const serviceOpts = {
authenticator: authenticator,
timeout: opts.requestTimeout,
// Axios performance options
maxContentLength: -1
};
this._lastLineData = null;
done();
};
const service = new CloudantV1(serviceOpts);
// Configure retries
const maxRetries = 2; // for 3 total attempts
service.getHttpClient().defaults.raxConfig = {
// retries for status codes
retry: maxRetries,
// retries for non-response e.g. ETIMEDOUT
noResponseRetries: maxRetries,
backoffType: 'exponential',
httpMethodsToRetry: ['GET', 'HEAD', 'POST'],
statusCodesToRetry: [
[429, 429],
[500, 599]
],
shouldRetry: err => {
const cfg = retryPlugin.getConfig(err);
// cap at max retries regardless of response/non-response type
if (cfg.currentRetryAttempt >= maxRetries) {
return false;
} else {
return retryPlugin.shouldRetryRequest(err);
}
},
instance: service.getHttpClient()
};
retryPlugin.attach(service.getHttpClient());
service.setServiceUrl(actUrl.toString());
if (authenticator instanceof CouchdbSessionAuthenticator) {
// Awkward workaround for known Couch issue with compression on _session requests
// It is not feasible to disable compression on all requests with the amount of
// data this lib needs to move, so override the property in the tokenManager instance.
authenticator.tokenManager.requestWrapperInstance.compressRequestData = false;
}
if (authenticator.tokenManager && authenticator.tokenManager.requestWrapperInstance) {
authenticator.tokenManager.requestWrapperInstance.axiosInstance.interceptors.response.use(null, errorHelper);
}
// Add error interceptors to put URLs in error messages
service.getHttpClient().interceptors.response.use(null, errorHelper);
// Add request interceptor to add user-agent (adding it with custom request headers gets overwritten)
service.getHttpClient().interceptors.request.use(function(requestConfig) {
requestConfig.headers['User-Agent'] = userAgent;
return requestConfig;
}, null);
return { service: service, db: dbName, url: actUrl.toString() };
}
return liner;
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//

@@ -16,104 +16,17 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const path = require('path');
const tmp = require('tmp');
module.exports = function(db, options, readstream, ee, callback) {
const liner = require('../includes/liner.js')();
const writer = require('../includes/writer.js')(db, options.bufferSize, options.parallelism, ee);
/**
Return API default settings.
*/
function apiDefaults() {
return {
parallelism: 5,
bufferSize: 500,
requestTimeout: 120000,
log: tmp.tmpNameSync(),
resume: false,
mode: 'full'
};
}
// pipe the input to the output, via transformation functions
readstream
.pipe(liner) // transform the input stream into per-line
.on('error', function(err) {
// Forward the error to the writer event emitter where we already have
// listeners on for handling errors
writer.emit('error', err);
})
.pipe(writer); // transform the data
/**
Return CLI default settings.
*/
function cliDefaults() {
const defaults = apiDefaults();
// add additional legacy settings
defaults.db = 'test';
defaults.url = 'http://localhost:5984';
// add CLI only option
defaults.quiet = false;
return defaults;
}
/**
Override settings **in-place** with environment variables.
*/
function applyEnvironmentVariables(opts) {
// if we have a custom CouchDB url
if (typeof process.env.COUCH_URL !== 'undefined') {
opts.url = process.env.COUCH_URL;
}
// if we have a specified databases
if (typeof process.env.COUCH_DATABASE !== 'undefined') {
opts.db = process.env.COUCH_DATABASE;
}
// if we have a specified buffer size
if (typeof process.env.COUCH_BUFFER_SIZE !== 'undefined') {
opts.bufferSize = parseInt(process.env.COUCH_BUFFER_SIZE);
}
// if we have a specified parallelism
if (typeof process.env.COUCH_PARALLELISM !== 'undefined') {
opts.parallelism = parseInt(process.env.COUCH_PARALLELISM);
}
// if we have a specified request timeout
if (typeof process.env.COUCH_REQUEST_TIMEOUT !== 'undefined') {
opts.requestTimeout = parseInt(process.env.COUCH_REQUEST_TIMEOUT);
}
// if we have a specified log file
if (typeof process.env.COUCH_LOG !== 'undefined') {
opts.log = path.normalize(process.env.COUCH_LOG);
}
// if we are instructed to resume
if (typeof process.env.COUCH_RESUME !== 'undefined' && process.env.COUCH_RESUME === 'true') {
opts.resume = true;
}
// if we are given an output filename
if (typeof process.env.COUCH_OUTPUT !== 'undefined') {
opts.output = path.normalize(process.env.COUCH_OUTPUT);
}
// if we only want a shallow copy
if (typeof process.env.COUCH_MODE !== 'undefined' && process.env.COUCH_MODE === 'shallow') {
opts.mode = 'shallow';
}
// if we are instructed to be quiet
if (typeof process.env.COUCH_QUIET !== 'undefined' && process.env.COUCH_QUIET === 'true') {
opts.quiet = true;
}
// if we have a specified API key
if (typeof process.env.CLOUDANT_IAM_API_KEY !== 'undefined') {
opts.iamApiKey = process.env.CLOUDANT_IAM_API_KEY;
}
// if we have a specified IAM token endpoint
if (typeof process.env.CLOUDANT_IAM_TOKEN_URL !== 'undefined') {
opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
module.exports = {
apiDefaults: apiDefaults,
cliDefaults: cliDefaults,
applyEnvironmentVariables: applyEnvironmentVariables
callback(null, writer);
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2018 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -16,45 +16,267 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const async = require('async');
const events = require('events');
const fs = require('fs');
const error = require('./error.js');
const spoolchanges = require('./spoolchanges.js');
const logfilesummary = require('./logfilesummary.js');
const logfilegetbatches = require('./logfilegetbatches.js');
/**
* Utility methods for the command line interface.
* @module cliutils
* @see module:cliutils
* Read documents from a database to be backed up.
*
* @param {string} db - `@cloudant/cloudant` DB object for source database.
* @param {number} blocksize - number of documents to download in single request
* @param {number} parallelism - number of concurrent downloads
* @param {string} log - path to log file to use
* @param {boolean} resume - whether to resume from an existing log file
* @returns EventEmitter with following events:
* - `received` - called with a block of documents to write to backup
* - `error` - on error
* - `finished` - when backup process is finished (either complete or errored)
*/
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime(); // backup start time
const batchesPerDownloadSession = 50; // max batches to read from log file for download at a time (prevent OOM)
const url = require('url');
const error = require('./error.js');
function proceedWithBackup() {
if (options.resume) {
// pick up from existing log file from previous run
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
} else {
// create new log file and process
spoolchanges(db, options.log, options.bufferSize, ee, function(err) {
if (err) {
ee.emit('error', err);
} else {
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
}
});
}
}
module.exports = {
validateBulkGetSupport(db, function(err) {
if (err) {
return ee.emit('error', err);
} else {
proceedWithBackup();
}
});
/**
* Combine a base URL and a database name, ensuring at least single slash
* between root and database name. This allows users to have Couch behind
* proxies that mount Couch's / endpoint at some other mount point.
* @param {string} root - root URL
* @param {string} databaseName - database name
* @return concatenated URL.
*
* @private
*/
databaseUrl: function databaseUrl(root, databaseName) {
if (!root.endsWith('/')) {
root = root + '/';
return ee;
};
/**
* Validate /_bulk_get support for a specified database.
*
* @param {string} db - nodejs-cloudant db
* @param {function} callback - called on completion with signature (err)
*/
function validateBulkGetSupport(db, callback) {
db.service.postBulkGet({ db: db.db, docs: [] }).then(() => { callback(); }).catch(err => {
err = error.convertResponseError(err, function(err) {
switch (err.status) {
case undefined:
// There was no status code on the error
return err;
case 404:
return new error.BackupError('BulkGetError', 'Database does not support /_bulk_get endpoint');
default:
return new error.HTTPError(err);
}
});
callback(err);
});
}
/**
* Download remaining batches in a log file, splitting batches into sets
* to avoid enqueueing too many in one go.
*
* @param {string} log - log file name to maintain download state
* @param {string} db - nodejs-cloudant db
* @param {events.EventEmitter} ee - event emitter to emit received events on
* @param {time} startTime - start time for backup process
* @param {number} batchesPerDownloadSession - max batches to enqueue for
* download at a time. As batches contain many doc IDs, this helps avoid
* exhausting memory.
* @param {number} parallelism - number of concurrent downloads
* @returns function to call do download remaining batches with signature
* (err, {batches: batch, docs: doccount}) {@see spoolchanges}.
*/
function downloadRemainingBatches(log, db, ee, startTime, batchesPerDownloadSession, parallelism) {
let total = 0; // running total of documents downloaded so far
let noRemainingBatches = false;
// Generate a set of batches (up to batchesPerDownloadSession) to download from the
// log file and download them. Set noRemainingBatches to `true` for last batch.
function downloadSingleBatchSet(done) {
// Fetch the doc IDs for the batches in the current set to
// download them.
function batchSetComplete(err, data) {
if (!err) {
total = data.total;
}
done(err);
}
try {
return new url.URL(encodeURIComponent(databaseName), root).toString();
} catch (err) {
throw error.wrapPossibleInvalidUrlError(err);
function processRetrievedBatches(err, batches) {
if (!err) {
// process them in parallelised queue
processBatchSet(db, parallelism, log, batches, ee, startTime, total, batchSetComplete);
} else {
batchSetComplete(err);
}
}
},
/**
* Generate CLI argument usage text.
*
* @param {string} description - argument description.
* @param {string} defaultValue - default argument value.
*
* @private
*/
getUsage: function getUsage(description, defaultValue) {
return `${description} ${defaultValue !== undefined ? ` (default: ${defaultValue})` : ''}`;
readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, function(err, batchSetIds) {
if (err) {
ee.emit('error', err);
// Stop processing changes file for fatal errors
noRemainingBatches = true;
done();
} else {
if (batchSetIds.length === 0) {
noRemainingBatches = true;
return done();
}
logfilegetbatches(log, batchSetIds, processRetrievedBatches);
}
});
}
};
// Return true if all batches in log file have been downloaded
function isFinished(callback) { callback(null, noRemainingBatches); }
function onComplete() {
ee.emit('finished', { total: total });
}
async.doUntil(downloadSingleBatchSet, isFinished, onComplete);
}
/**
* Return a set of uncompleted download batch IDs from the log file.
*
* @param {string} log - log file path
* @param {number} batchesPerDownloadSession - maximum IDs to return
* @param {function} callback - sign (err, batchSetIds array)
*/
function readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, callback) {
logfilesummary(log, function processSummary(err, summary) {
if (!err) {
if (!summary.changesComplete) {
callback(new error.BackupError('IncompleteChangesInLogFile',
'WARNING: Changes did not finish spooling'));
return;
}
if (Object.keys(summary.batches).length === 0) {
return callback(null, []);
}
// batch IDs are the property names of summary.batches
const batchSetIds = getPropertyNames(summary.batches, batchesPerDownloadSession);
callback(null, batchSetIds);
} else {
callback(err);
}
});
}
/**
* Download a set of batches retrieved from a log file. When a download is
* complete, add a line to the logfile indicating such.
*
* @param {any} db - nodejs-cloudant database
* @param {any} parallelism - number of concurrent requests to make
* @param {any} log - log file to drive downloads from
* @param {any} batches - batches to download
* @param {any} ee - event emitter for progress. This funciton emits
* received and error events.
* @param {any} start - time backup started, to report deltas
* @param {any} grandtotal - count of documents downloaded prior to this set
* of batches
* @param {any} callback - completion callback, (err, {total: number}).
*/
function processBatchSet(db, parallelism, log, batches, ee, start, grandtotal, callback) {
let hasErrored = false;
let total = grandtotal;
// queue to process the fetch requests in an orderly fashion using _bulk_get
const q = async.queue(function(payload, done) {
const output = [];
const thisBatch = payload.batch;
delete payload.batch;
delete payload.command;
function logCompletedBatch(batch) {
if (log) {
fs.appendFile(log, ':d batch' + thisBatch + '\n', done);
} else {
done();
}
}
// do the /db/_bulk_get request
db.service.postBulkGet({
db: db.db,
revs: true,
docs: payload.docs
}).then(response => {
// create an output array with the docs returned
response.result.results.forEach(function(d) {
if (d.docs) {
d.docs.forEach(function(doc) {
if (doc.ok) {
output.push(doc.ok);
}
});
}
});
total += output.length;
const t = (new Date().getTime() - start) / 1000;
ee.emit('received', {
batch: thisBatch,
data: output,
length: output.length,
time: t,
total: total
}, q, logCompletedBatch);
}).catch(err => {
if (!hasErrored) {
hasErrored = true;
err = error.convertResponseError(err);
// Kill the queue for fatal errors
q.kill();
ee.emit('error', err);
}
done();
});
}, parallelism);
for (const i in batches) {
q.push(batches[i]);
}
q.drain(function() {
callback(null, { total: total });
});
}
/**
* Returns first N properties on an object.
*
* @param {object} obj - object with properties
* @param {number} count - number of properties to return
*/
function getPropertyNames(obj, count) {
// decide which batch numbers to deal with
const batchestofetch = [];
let j = 0;
for (const i in obj) {
batchestofetch.push(parseInt(i));
j++;
if (j >= count) break;
}
return batchestofetch;
}

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -16,78 +16,164 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const fs = require('fs');
const pkg = require('../package.json');
const stream = require('stream');
const liner = require('./liner.js');
const { CloudantV1, CouchdbSessionAuthenticator } = require('@ibm-cloud/cloudant');
const { IamAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const retryPlugin = require('retry-axios');
const onLine = function(onCommand, getDocs) {
const change = new stream.Transform({ objectMode: true });
const userAgent = 'couchbackup-cloudant/' + pkg.version + ' (Node.js ' +
process.version + ')';
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
// Class for streaming _changes error responses into
// In general the response is a small error/reason JSON object
// so it is OK to have this in memory.
class ResponseWriteable extends stream.Writable {
constructor(options) {
super(options);
this.data = [];
}
let matches;
_write(chunk, encoding, callback) {
this.data.push(chunk);
callback();
}
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
stringBody() {
return Buffer.concat(this.data).toString();
}
}
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
// An interceptor function to help augment error bodies with a little
// extra information so we can continue to use consistent messaging
// after the ugprade to @ibm-cloud/cloudant
const errorHelper = async function(err) {
let method;
let requestUrl;
if (err.response) {
if (err.response.config.url) {
requestUrl = err.response.config.url;
method = err.response.config.method;
}
// Override the status text with an improved message
let errorMsg = `${err.response.status} ${err.response.statusText || ''}: ` +
`${method} ${requestUrl}`;
if (err.response.data) {
// Check if we have a JSON response and try to get the error/reason
if (err.response.headers['content-type'] === 'application/json') {
if (!err.response.data.error && err.response.data.pipe) {
// If we didn't find a JSON object with `error` then we might have a stream response.
// Detect the stream by the presence of `pipe` and use it to get the body and parse
// the error information.
const p = new Promise((resolve, reject) => {
const errorBody = new ResponseWriteable();
err.response.data.pipe(errorBody)
.on('finish', () => { resolve(JSON.parse(errorBody.stringBody())); })
.on('error', () => { reject(err); });
});
// Replace the stream on the response with the parsed object
err.response.data = await p;
}
// Append the error/reason if available
if (err.response.data.error) {
// Override the status text with our more complete message
errorMsg += ` - Error: ${err.response.data.error}`;
if (err.response.data.reason) {
errorMsg += `, Reason: ${err.response.data.reason}`;
}
}
} else {
errorMsg += err.response.data;
}
// Set a new message for use by the node-sdk-core
// We use the errors array because it gets processed
// ahead of all other service errors.
err.response.data.errors = [{ message: errorMsg }];
}
} else if (err.request) {
if (!err.message.includes(err.config.url)) {
// Augment the message with the URL and method
// but don't do it again if we already have the URL.
err.message = `${err.message}: ${err.config.method} ${err.config.url}`;
}
}
return Promise.reject(err);
};
// extract doc ids
if (getDocs && obj.command === 't') {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
module.exports = {
client: function(rawUrl, opts) {
const url = new URL(rawUrl);
// Split the URL to separate service from database
// Use origin as the "base" to remove auth elements
const actUrl = new URL(url.pathname.substring(0, url.pathname.lastIndexOf('/')), url.origin);
const dbName = url.pathname.substring(url.pathname.lastIndexOf('/') + 1);
let authenticator;
// Default to cookieauth unless an IAM key is provided
if (opts.iamApiKey) {
const iamAuthOpts = { apikey: opts.iamApiKey };
if (opts.iamTokenUrl) {
iamAuthOpts.url = opts.iamTokenUrl;
}
onCommand(obj);
authenticator = new IamAuthenticator(iamAuthOpts);
} else if (url.username) {
authenticator = new CouchdbSessionAuthenticator({
username: decodeURIComponent(url.username),
password: decodeURIComponent(url.password)
});
} else {
authenticator = new NoAuthAuthenticator();
}
done();
};
return change;
};
const serviceOpts = {
authenticator: authenticator,
timeout: opts.requestTimeout,
// Axios performance options
maxContentLength: -1
};
/**
* Generate a list of remaining batches from a download file.
*
* @param {string} log - log file name
* @param {function} callback - callback with err, {changesComplete: N, batches: N}.
* changesComplete signifies whether the log file appeared to
* have completed reading the changes feed (contains :changes_complete).
* batches are remaining batch IDs for download.
*/
module.exports = function(log, callback) {
// our sense of state
const state = {
const service = new CloudantV1(serviceOpts);
// Configure retries
const maxRetries = 2; // for 3 total attempts
service.getHttpClient().defaults.raxConfig = {
// retries for status codes
retry: maxRetries,
// retries for non-response e.g. ETIMEDOUT
noResponseRetries: maxRetries,
backoffType: 'exponential',
httpMethodsToRetry: ['GET', 'HEAD', 'POST'],
statusCodesToRetry: [
[429, 429],
[500, 599]
],
shouldRetry: err => {
const cfg = retryPlugin.getConfig(err);
// cap at max retries regardless of response/non-response type
if (cfg.currentRetryAttempt >= maxRetries) {
return false;
} else {
return retryPlugin.shouldRetryRequest(err);
}
},
instance: service.getHttpClient()
};
retryPlugin.attach(service.getHttpClient());
};
let changesComplete = false;
// called with each line from the log file
const onCommand = function(obj) {
if (obj.command === 't') {
state[obj.batch] = true;
} else if (obj.command === 'd') {
delete state[obj.batch];
} else if (obj.command === 'changes_complete') {
changesComplete = true;
service.setServiceUrl(actUrl.toString());
if (authenticator instanceof CouchdbSessionAuthenticator) {
// Awkward workaround for known Couch issue with compression on _session requests
// It is not feasible to disable compression on all requests with the amount of
// data this lib needs to move, so override the property in the tokenManager instance.
authenticator.tokenManager.requestWrapperInstance.compressRequestData = false;
}
};
if (authenticator.tokenManager && authenticator.tokenManager.requestWrapperInstance) {
authenticator.tokenManager.requestWrapperInstance.axiosInstance.interceptors.response.use(null, errorHelper);
}
// Add error interceptors to put URLs in error messages
service.getHttpClient().interceptors.response.use(null, errorHelper);
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, false))
.on('finish', function() {
const obj = { changesComplete: changesComplete, batches: state };
callback(null, obj);
});
// Add request interceptor to add user-agent (adding it with custom request headers gets overwritten)
service.getHttpClient().interceptors.request.use(function(requestConfig) {
requestConfig.headers['User-Agent'] = userAgent;
return requestConfig;
}, null);
return { service: service, db: dbName, url: actUrl.toString() };
}
};

@@ -16,61 +16,27 @@ // Copyright © 2017 IBM Corp. All rights reserved.

const fs = require('fs');
// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
const liner = require('./liner.js');
const onLine = function(onCommand, batches) {
module.exports = function(onChange) {
const change = new stream.Transform({ objectMode: true });
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
let obj = null;
let matches;
// one change per line - remove the trailing comma
line = line.trim().replace(/,$/, '');
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
// if this is one we want
if (obj.command === 't' && batches.indexOf(obj.batch) > -1) {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
onCommand(obj);
}
// extract thee last_seq at the end of the changes feed
if (line.match(/^"last_seq":/)) {
line = '{' + line;
}
try {
obj = JSON.parse(line);
} catch (e) {
}
onChange(obj);
done();
};
return change;
};
module.exports = function(log, batches, callback) {
// our sense of state
const retval = { };
// called with each line from the log file
const onCommand = function(obj) {
retval[obj.batch] = obj;
};
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, batches))
.on('error', function(err) {
callback(err);
})
.on('finish', function() {
callback(null, retval);
});
};

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2022 IBM Corp. All rights reserved.
//

@@ -16,32 +16,66 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
const async = require('async');
const error = require('./error.js');
const events = require('events');
module.exports = function() {
const liner = new stream.Transform({ objectMode: true });
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime();
let batch = 0;
let hasErrored = false;
let startKey = null;
let total = 0;
liner._transform = function(chunk, encoding, done) {
let data = chunk.toString();
if (this._lastLineData) {
data = this._lastLineData + data;
}
async.doUntil(
function(callback) {
// Note, include_docs: true is set automatically when using the
// fetch function.
const opts = { db: db.db, limit: options.bufferSize, includeDocs: true };
const lines = data.split('\n');
this._lastLineData = lines.splice(lines.length - 1, 1)[0];
// To avoid double fetching a document solely for the purposes of getting
// the next ID to use as a startKey for the next page we instead use the
// last ID of the current page and append the lowest unicode sort
// character.
if (startKey) opts.startKey = `${startKey}\0`;
db.service.postAllDocs(opts).then(response => {
const body = response.result;
if (!body.rows) {
ee.emit('error', new error.BackupError(
'AllDocsError', 'ERROR: Invalid all docs response'));
callback();
} else {
if (body.rows.length < opts.limit) {
startKey = null; // last batch
} else {
startKey = body.rows[opts.limit - 1].id;
}
for (const i in lines) {
this.push(lines[i]);
}
done();
};
const docs = [];
body.rows.forEach(function(doc) {
docs.push(doc.doc);
});
liner._flush = function(done) {
if (this._lastLineData) {
this.push(this._lastLineData);
}
this._lastLineData = null;
done();
};
if (docs.length > 0) {
ee.emit('received', {
batch: batch++,
data: docs,
length: docs.length,
time: (new Date().getTime() - start) / 1000,
total: total += docs.length
});
}
callback();
}
}).catch(err => {
err = error.convertResponseError(err);
ee.emit('error', err);
hasErrored = true;
callback();
});
},
function(callback) { callback(null, hasErrored || startKey == null); },
function() { ee.emit('finished', { total: total }); }
);
return liner;
return ee;
};
{
"name": "@cloudant/couchbackup",
"version": "2.9.15-SNAPSHOT.174",
"version": "2.9.15-SNAPSHOT.175",
"description": "CouchBackup - command-line backup utility for Cloudant/CouchDB",

@@ -20,3 +20,3 @@ "homepage": "https://github.com/IBM/couchbackup",

"engines": {
"node": "^18"
"node": "^18 || ^20"
},

@@ -23,0 +23,0 @@ "dependencies": {

@@ -36,3 +36,3 @@ # CouchBackup

### Requirements
* Node.js LTS version 18.
* Node.js LTS version 18 or 20.
* The minimum required CouchDB version is 2.0.0.

@@ -39,0 +39,0 @@

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc