Socket
Socket
Sign inDemoInstall

@cloudant/couchbackup

Package Overview
Dependencies
Maintainers
6
Versions
479
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@cloudant/couchbackup - npm Package Compare versions

Comparing version 2.9.15-SNAPSHOT.167 to 2.9.15-SNAPSHOT.168

.scannerwork/scanner-report/changesets-10.pb

4

.scannerwork/report-task.txt

@@ -6,3 +6,3 @@ projectKey=couchbackup

dashboardUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/dashboard?id=couchbackup&branch=main
ceTaskId=AYvdzGEBDXQaJHzPkRnb
ceTaskUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/api/ce/task?id=AYvdzGEBDXQaJHzPkRnb
ceTaskId=AYvsJGEhDXQaJHzPkRrz
ceTaskUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/api/ce/task?id=AYvsJGEhDXQaJHzPkRrz

@@ -1,2 +0,2 @@

// Copyright © 2017, 2018 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,178 +14,166 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
// Small script which backs up a Cloudant or CouchDB database to an S3
// bucket, using an intermediary file on disk.
//
// The script generates the backup object name by combining together the path
// part of the database URL and the current time.
'use strict';
const pkg = require('../package.json');
const stream = require('stream');
const fs = require('fs');
const url = require('url');
const { CloudantV1, CouchdbSessionAuthenticator } = require('@ibm-cloud/cloudant');
const { IamAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const retryPlugin = require('retry-axios');
const AWS = require('aws-sdk');
const couchbackup = require('@cloudant/couchbackup');
const debug = require('debug')('s3-backup');
const tmp = require('tmp');
const VError = require('verror').VError;
const userAgent = 'couchbackup-cloudant/' + pkg.version + ' (Node.js ' +
process.version + ')';
/*
Main function, run from base of file.
*/
function main() {
const argv = require('yargs')
.usage('Usage: $0 [options]')
.example('$0 -s https://user:pass@host/db -b <bucket>', 'Backup db to bucket')
.options({
source: { alias: 's', nargs: 1, demandOption: true, describe: 'Source database URL' },
bucket: { alias: 'b', nargs: 1, demandOption: true, describe: 'Destination bucket' },
prefix: { alias: 'p', nargs: 1, describe: 'Prefix for backup object key', default: 'couchbackup' },
s3url: { nargs: 1, describe: 'S3 endpoint URL' },
awsprofile: { nargs: 1, describe: 'The profile section to use in the ~/.aws/credentials file', default: 'default' }
})
.help('h').alias('h', 'help')
.epilog('Copyright (C) IBM 2017')
.argv;
// Class for streaming _changes error responses into
// In general the response is a small error/reason JSON object
// so it is OK to have this in memory.
class ResponseWriteable extends stream.Writable {
constructor(options) {
super(options);
this.data = [];
}
const sourceUrl = argv.source;
const backupBucket = argv.bucket;
const backupName = new url.URL(sourceUrl).pathname.split('/').filter(function(x) { return x; }).join('-');
const backupKeyPrefix = `${argv.prefix}-${backupName}`;
_write(chunk, encoding, callback) {
this.data.push(chunk);
callback();
}
const backupKey = `${backupKeyPrefix}-${new Date().toISOString()}`;
const backupTmpFile = tmp.fileSync();
const s3Endpoint = argv.s3url;
const awsProfile = argv.awsprofile;
// Creds are from ~/.aws/credentials, environment etc. (see S3 docs).
const awsOpts = {
signatureVersion: 'v4',
credentials: new AWS.SharedIniFileCredentials({ profile: awsProfile })
};
if (typeof s3Endpoint !== 'undefined') {
awsOpts.endpoint = new AWS.Endpoint(s3Endpoint);
stringBody() {
return Buffer.concat(this.data).toString();
}
const s3 = new AWS.S3(awsOpts);
debug(`Creating a new backup of ${s(sourceUrl)} at ${backupBucket}/${backupKey}...`);
bucketAccessible(s3, backupBucket)
.then(() => {
return createBackupFile(sourceUrl, backupTmpFile.name);
})
.then(() => {
return uploadNewBackup(s3, backupTmpFile.name, backupBucket, backupKey);
})
.then(() => {
debug('Backup successful!');
backupTmpFile.removeCallback();
debug('done.');
})
.catch((reason) => {
debug(`Error: ${reason}`);
});
}
/**
* Return a promise that resolves if the bucket is available and
* rejects if not.
*
* @param {any} s3 S3 client object
* @param {any} bucketName Bucket name
* @returns Promise
*/
function bucketAccessible(s3, bucketName) {
return new Promise(function(resolve, reject) {
const params = {
Bucket: bucketName
};
s3.headBucket(params, function(err, data) {
if (err) {
reject(new VError(err, 'S3 bucket not accessible'));
// An interceptor function to help augment error bodies with a little
// extra information so we can continue to use consistent messaging
// after the ugprade to @ibm-cloud/cloudant
const errorHelper = async function(err) {
let method;
let requestUrl;
if (err.response) {
if (err.response.config.url) {
requestUrl = err.response.config.url;
method = err.response.config.method;
}
// Override the status text with an improved message
let errorMsg = `${err.response.status} ${err.response.statusText || ''}: ` +
`${method} ${requestUrl}`;
if (err.response.data) {
// Check if we have a JSON response and try to get the error/reason
if (err.response.headers['content-type'] === 'application/json') {
if (!err.response.data.error && err.response.data.pipe) {
// If we didn't find a JSON object with `error` then we might have a stream response.
// Detect the stream by the presence of `pipe` and use it to get the body and parse
// the error information.
const p = new Promise((resolve, reject) => {
const errorBody = new ResponseWriteable();
err.response.data.pipe(errorBody)
.on('finish', () => { resolve(JSON.parse(errorBody.stringBody())); })
.on('error', () => { reject(err); });
});
// Replace the stream on the response with the parsed object
err.response.data = await p;
}
// Append the error/reason if available
if (err.response.data.error) {
// Override the status text with our more complete message
errorMsg += ` - Error: ${err.response.data.error}`;
if (err.response.data.reason) {
errorMsg += `, Reason: ${err.response.data.reason}`;
}
}
} else {
resolve();
errorMsg += err.response.data;
}
});
});
}
// Set a new message for use by the node-sdk-core
// We use the errors array because it gets processed
// ahead of all other service errors.
err.response.data.errors = [{ message: errorMsg }];
}
} else if (err.request) {
if (!err.message.includes(err.config.url)) {
// Augment the message with the URL and method
// but don't do it again if we already have the URL.
err.message = `${err.message}: ${err.config.method} ${err.config.url}`;
}
}
return Promise.reject(err);
};
/**
* Use couchbackup to create a backup of the specified database to a file path.
*
* @param {any} sourceUrl Database URL
* @param {any} backupTmpFilePath Path to write file
* @returns Promise
*/
function createBackupFile(sourceUrl, backupTmpFilePath) {
return new Promise((resolve, reject) => {
couchbackup.backup(
sourceUrl,
fs.createWriteStream(backupTmpFilePath),
(err) => {
if (err) {
return reject(new VError(err, 'CouchBackup process failed'));
}
debug('couchbackup to file done; uploading to S3');
resolve('creating backup file complete');
module.exports = {
client: function(rawUrl, opts) {
const url = new URL(rawUrl);
// Split the URL to separate service from database
// Use origin as the "base" to remove auth elements
const actUrl = new URL(url.pathname.substring(0, url.pathname.lastIndexOf('/')), url.origin);
const dbName = url.pathname.substring(url.pathname.lastIndexOf('/') + 1);
let authenticator;
// Default to cookieauth unless an IAM key is provided
if (opts.iamApiKey) {
const iamAuthOpts = { apikey: opts.iamApiKey };
if (opts.iamTokenUrl) {
iamAuthOpts.url = opts.iamTokenUrl;
}
);
});
}
authenticator = new IamAuthenticator(iamAuthOpts);
} else if (url.username) {
authenticator = new CouchdbSessionAuthenticator({
username: decodeURIComponent(url.username),
password: decodeURIComponent(url.password)
});
} else {
authenticator = new NoAuthAuthenticator();
}
const serviceOpts = {
authenticator: authenticator,
timeout: opts.requestTimeout,
// Axios performance options
maxContentLength: -1
};
/**
* Upload a backup file to an S3 bucket.
*
* @param {any} s3 Object store client
* @param {any} backupTmpFilePath Path of backup file to write.
* @param {any} bucket Object store bucket name
* @param {any} key Object store key name
* @returns Promise
*/
function uploadNewBackup(s3, backupTmpFilePath, bucket, key) {
return new Promise((resolve, reject) => {
debug(`Uploading from ${backupTmpFilePath} to ${bucket}/${key}`);
function uploadFromStream(s3, bucket, key) {
const pass = new stream.PassThrough();
const params = {
Bucket: bucket,
Key: key,
Body: pass
};
s3.upload(params, function(err, data) {
debug('S3 upload done');
if (err) {
debug(err);
reject(new VError(err, 'Upload failed'));
return;
const service = new CloudantV1(serviceOpts);
// Configure retries
const maxRetries = 2; // for 3 total attempts
service.getHttpClient().defaults.raxConfig = {
// retries for status codes
retry: maxRetries,
// retries for non-response e.g. ETIMEDOUT
noResponseRetries: maxRetries,
backoffType: 'exponential',
httpMethodsToRetry: ['GET', 'HEAD', 'POST'],
statusCodesToRetry: [
[429, 429],
[500, 599]
],
shouldRetry: err => {
const cfg = retryPlugin.getConfig(err);
// cap at max retries regardless of response/non-response type
if (cfg.currentRetryAttempt >= maxRetries) {
return false;
} else {
return retryPlugin.shouldRetryRequest(err);
}
debug('Upload succeeded');
debug(data);
resolve();
}).httpUploadProgress = (progress) => {
debug(`S3 upload progress: ${progress}`);
};
},
instance: service.getHttpClient()
};
retryPlugin.attach(service.getHttpClient());
return pass;
service.setServiceUrl(actUrl.toString());
if (authenticator instanceof CouchdbSessionAuthenticator) {
// Awkward workaround for known Couch issue with compression on _session requests
// It is not feasible to disable compression on all requests with the amount of
// data this lib needs to move, so override the property in the tokenManager instance.
authenticator.tokenManager.requestWrapperInstance.compressRequestData = false;
}
if (authenticator.tokenManager && authenticator.tokenManager.requestWrapperInstance) {
authenticator.tokenManager.requestWrapperInstance.axiosInstance.interceptors.response.use(null, errorHelper);
}
// Add error interceptors to put URLs in error messages
service.getHttpClient().interceptors.response.use(null, errorHelper);
const inputStream = fs.createReadStream(backupTmpFilePath);
const s3Stream = uploadFromStream(s3, bucket, key);
inputStream.pipe(s3Stream);
});
}
// Add request interceptor to add user-agent (adding it with custom request headers gets overwritten)
service.getHttpClient().interceptors.request.use(function(requestConfig) {
requestConfig.headers['User-Agent'] = userAgent;
return requestConfig;
}, null);
/**
* Remove creds from a URL, e.g., before logging
*
* @param {string} url URL to safen
*/
function s(originalUrl) {
const parts = new url.URL(originalUrl);
return url.format(parts, { auth: false });
}
main();
return { service: service, db: dbName, url: actUrl.toString() };
}
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2018 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,163 +14,117 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
'use strict';
// Small script which backs up a Cloudant or CouchDB database to an S3
// bucket via a stream rather than on-disk file.
//
// The script generates the backup object name by combining together the path
// part of the database URL and the current time.
const cliutils = require('./cliutils.js');
const config = require('./config.js');
const error = require('./error.js');
const path = require('path');
const pkg = require('../package.json');
'use strict';
function parseBackupArgs() {
const program = require('commander');
const stream = require('stream');
const url = require('url');
// Option CLI defaults
const defaults = config.cliDefaults();
const AWS = require('aws-sdk');
const couchbackup = require('@cloudant/couchbackup');
const debug = require('debug')('s3-backup');
const VError = require('verror').VError;
// Options set by environment variables
const envVarOptions = {};
config.applyEnvironmentVariables(envVarOptions);
/*
Main function, run from base of file.
*/
function main() {
const argv = require('yargs')
.usage('Usage: $0 [options]')
.example('$0 -s https://user:pass@host/db -b <bucket>', 'Backup db to bucket')
.options({
source: { alias: 's', nargs: 1, demandOption: true, describe: 'Source database URL' },
bucket: { alias: 'b', nargs: 1, demandOption: true, describe: 'Destination bucket' },
prefix: { alias: 'p', nargs: 1, describe: 'Prefix for backup object key', default: 'couchbackup' },
s3url: { nargs: 1, describe: 'S3 endpoint URL' },
awsprofile: { nargs: 1, describe: 'The profile section to use in the ~/.aws/credentials file', default: 'default' }
})
.help('h').alias('h', 'help')
.epilog('Copyright (C) IBM 2017')
.argv;
program
.version(pkg.version)
.description('Backup a CouchDB/Cloudant database to a backup text file.')
.usage('[options...]')
.option('-b, --buffer-size <n>',
cliutils.getUsage('number of documents fetched at once', defaults.bufferSize),
Number)
.option('-d, --db <db>',
cliutils.getUsage('name of the database to backup', defaults.db))
.option('-k, --iam-api-key <API key>',
cliutils.getUsage('IAM API key to access the Cloudant server'))
.option('-l, --log <file>',
cliutils.getUsage('file to store logging information during backup; invalid in "shallow" mode', 'a temporary file'),
path.normalize)
.option('-m, --mode <mode>',
cliutils.getUsage('"shallow" if only a superficial backup is done (ignoring conflicts and revision tokens), else "full" for complete backup', defaults.mode),
(mode) => { return mode.toLowerCase(); })
.option('-o, --output <file>',
cliutils.getUsage('file name to store the backup data', 'stdout'),
path.normalize)
.option('-p, --parallelism <n>',
cliutils.getUsage('number of HTTP requests to perform in parallel when performing a backup; ignored in "shallow" mode', defaults.parallelism),
Number)
.option('-q, --quiet',
cliutils.getUsage('suppress batch messages', defaults.quiet))
.option('-r, --resume',
cliutils.getUsage('continue a previous backup from its last known position; invalid in "shallow" mode', defaults.resume))
.option('-t, --request-timeout <n>',
cliutils.getUsage('milliseconds to wait for a response to a HTTP request before retrying the request', defaults.requestTimeout),
Number)
.option('-u, --url <url>',
cliutils.getUsage('URL of the CouchDB/Cloudant server', defaults.url))
.parse(process.argv);
const sourceUrl = argv.source;
const backupName = new url.URL(sourceUrl).pathname.split('/').filter(function(x) { return x; }).join('-');
const backupBucket = argv.bucket;
const backupKeyPrefix = `${argv.prefix}-${backupName}`;
const shallow = argv.shallow;
// Remove defaults that don't apply when using shallow mode
if (program.opts().mode === 'shallow' || envVarOptions.mode === 'shallow') {
delete defaults.parallelism;
delete defaults.log;
delete defaults.resume;
}
const backupKey = `${backupKeyPrefix}-${new Date().toISOString()}`;
// Apply the options in order so that the CLI overrides env vars and env variables
// override defaults.
const opts = Object.assign({}, defaults, envVarOptions, program.opts());
const s3Endpoint = argv.s3url;
const awsProfile = argv.awsprofile;
// Creds are from ~/.aws/credentials, environment etc. (see S3 docs).
const awsOpts = {
signatureVersion: 'v4',
credentials: new AWS.SharedIniFileCredentials({ profile: awsProfile })
};
if (typeof s3Endpoint !== 'undefined') {
awsOpts.endpoint = new AWS.Endpoint(s3Endpoint);
if (opts.resume && (opts.log === defaults.log)) {
// If resuming and the log file arg is the newly generated tmp name from defaults then we know that --log wasn't specified.
// We have to do this check here for the CLI case because of the default.
error.terminationCallback(new error.BackupError('NoLogFileName', 'To resume a backup, a log file must be specified'));
}
const s3 = new AWS.S3(awsOpts);
debug(`Creating a new backup of ${s(sourceUrl)} at ${backupBucket}/${backupKey}...`);
bucketAccessible(s3, backupBucket)
.then(() => {
return backupToS3(sourceUrl, s3, backupBucket, backupKey, shallow);
})
.then(() => {
debug('done.');
})
.catch((reason) => {
debug(`Error: ${reason}`);
process.exit(1);
});
return opts;
}
/**
* Return a promise that resolves if the bucket is available and
* rejects if not.
*
* @param {any} s3 S3 client object
* @param {any} bucketName Bucket name
* @returns Promise
*/
function bucketAccessible(s3, bucketName) {
return new Promise(function(resolve, reject) {
const params = {
Bucket: bucketName
};
s3.headBucket(params, function(err, data) {
if (err) {
reject(new VError(err, 'S3 bucket not accessible'));
} else {
resolve();
}
});
});
}
function parseRestoreArgs() {
const program = require('commander');
/**
* Backup directly from Cloudant to an object store object via a stream.
*
* @param {any} sourceUrl URL of database
* @param {any} s3Client Object store client
* @param {any} s3Bucket Backup destination bucket
* @param {any} s3Key Backup destination key name (shouldn't exist)
* @param {any} shallow Whether to use the couchbackup `shallow` mode
* @returns Promise
*/
function backupToS3(sourceUrl, s3Client, s3Bucket, s3Key, shallow) {
return new Promise((resolve, reject) => {
debug(`Setting up S3 upload to ${s3Bucket}/${s3Key}`);
// Option CLI defaults
const defaults = config.cliDefaults();
// A pass through stream that has couchbackup's output
// written to it and it then read by the S3 upload client.
// It has a 64MB highwater mark to allow for fairly
// uneven network connectivity.
const streamToUpload = new stream.PassThrough({ highWaterMark: 67108864 });
// Options set by environment variables
const envVarOptions = {};
config.applyEnvironmentVariables(envVarOptions);
// Set up S3 upload.
const params = {
Bucket: s3Bucket,
Key: s3Key,
Body: streamToUpload
};
s3Client.upload(params, function(err, data) {
debug('Object store upload done');
if (err) {
debug(err);
reject(new VError(err, 'Object store upload failed'));
return;
}
debug('Object store upload succeeded');
debug(data);
resolve();
}).httpUploadProgress = (progress) => {
debug(`Object store upload progress: ${progress}`);
};
program
.version(pkg.version)
.description('Restore a CouchDB/Cloudant database from a backup text file.')
.usage('[options...]')
.option('-b, --buffer-size <n>',
cliutils.getUsage('number of documents restored at once', defaults.bufferSize),
Number)
.option('-d, --db <db>',
cliutils.getUsage('name of the new, existing database to restore to', defaults.db))
.option('-k, --iam-api-key <API key>',
cliutils.getUsage('IAM API key to access the Cloudant server'))
.option('-p, --parallelism <n>',
cliutils.getUsage('number of HTTP requests to perform in parallel when restoring a backup', defaults.parallelism),
Number)
.option('-q, --quiet',
cliutils.getUsage('suppress batch messages', defaults.quiet))
.option('-t, --request-timeout <n>',
cliutils.getUsage('milliseconds to wait for a response to a HTTP request before retrying the request', defaults.requestTimeout),
Number)
.option('-u, --url <url>',
cliutils.getUsage('URL of the CouchDB/Cloudant server', defaults.url))
.parse(process.argv);
debug(`Starting streaming data from ${s(sourceUrl)}`);
couchbackup.backup(
sourceUrl,
streamToUpload,
(err, obj) => {
if (err) {
debug(err);
reject(new VError(err, 'CouchBackup failed with an error'));
return;
}
debug(`Download from ${s(sourceUrl)} complete.`);
streamToUpload.end(); // must call end() to complete upload.
// resolve() is called by the upload
}
);
});
}
// Apply the options in order so that the CLI overrides env vars and env variables
// override defaults.
const opts = Object.assign({}, defaults, envVarOptions, program.opts());
/**
* Remove creds from a URL, e.g., before logging
*
* @param {string} url URL to safen
*/
function s(originalUrl) {
const parts = new url.URL(originalUrl);
return url.format(parts, { auth: false });
return opts;
}
main();
module.exports = {
parseBackupArgs: parseBackupArgs,
parseRestoreArgs: parseRestoreArgs
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2022 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -17,96 +17,60 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const fs = require('fs');
const stream = require('stream');
const liner = require('./liner.js');
const change = require('./change.js');
const error = require('./error.js');
const debug = require('debug')('couchbackup:spoolchanges');
/**
* Write log file for all changes from a database, ready for downloading
* in batches.
*
* @param {string} dbUrl - URL of database
* @param {string} log - path to log file to use
* @param {number} bufferSize - the number of changes per batch/log line
* @param {function(err)} callback - a callback to run on completion
*/
module.exports = function(db, log, bufferSize, ee, callback) {
// list of document ids to process
const buffer = [];
let batch = 0;
let lastSeq = null;
const logStream = fs.createWriteStream(log);
let pending = 0;
// The number of changes to fetch per request
const limit = 100000;
const onLine = function(onCommand, batches) {
const change = new stream.Transform({ objectMode: true });
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
// send documents ids to the queue in batches of bufferSize + the last batch
const processBuffer = function(lastOne) {
if (buffer.length >= bufferSize || (lastOne && buffer.length > 0)) {
debug('writing', buffer.length, 'changes to the backup file');
const b = { docs: buffer.splice(0, bufferSize), batch: batch };
logStream.write(':t batch' + batch + ' ' + JSON.stringify(b.docs) + '\n');
ee.emit('changes', batch);
batch++;
}
};
let matches;
// called once per received change
const onChange = function(c) {
if (c) {
if (c.error) {
ee.emit('error', new error.BackupError('InvalidChange', `Received invalid change: ${c}`));
} else if (c.changes) {
const obj = { id: c.id };
buffer.push(obj);
processBuffer(false);
} else if (c.last_seq) {
lastSeq = c.last_seq;
pending = c.pending;
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
// if this is one we want
if (obj.command === 't' && batches.indexOf(obj.batch) > -1) {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
onCommand(obj);
}
}
done();
};
return change;
};
function getChanges(since = 0) {
debug('making changes request since ' + since);
return db.service.postChangesAsStream({ db: db.db, since: since, limit: limit, seqInterval: limit })
.then(response => {
response.result.pipe(liner())
.on('error', function(err) {
logStream.end();
callback(err);
})
.pipe(change(onChange))
.on('error', function(err) {
logStream.end();
callback(err);
})
.on('finish', function() {
processBuffer(true);
if (!lastSeq) {
logStream.end();
debug('changes request terminated before last_seq was sent');
callback(new error.BackupError('SpoolChangesError', 'Changes request terminated before last_seq was sent'));
} else {
debug(`changes request completed with last_seq: ${lastSeq} and ${pending} changes pending.`);
if (pending > 0) {
// Return the next promise
return getChanges(lastSeq);
} else {
debug('finished streaming database changes');
logStream.end(':changes_complete ' + lastSeq + '\n', 'utf8', callback);
}
}
});
})
.catch(err => {
logStream.end();
if (err.status && err.status >= 400) {
callback(error.convertResponseError(err));
} else if (err.name !== 'SpoolChangesError') {
callback(new error.BackupError('SpoolChangesError', `Failed changes request - ${err.message}`));
}
});
}
module.exports = function(log, batches, callback) {
// our sense of state
const retval = { };
getChanges();
// called with each line from the log file
const onCommand = function(obj) {
retval[obj.batch] = obj;
};
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, batches))
.on('error', function(err) {
callback(err);
})
.on('finish', function() {
callback(null, retval);
});
};

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -16,27 +16,99 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
// fatal errors
const codes = {
Error: 1,
InvalidOption: 2,
DatabaseNotFound: 10,
Unauthorized: 11,
Forbidden: 12,
DatabaseNotEmpty: 13,
NoLogFileName: 20,
LogDoesNotExist: 21,
IncompleteChangesInLogFile: 22,
SpoolChangesError: 30,
HTTPFatalError: 40,
BulkGetError: 50
};
module.exports = function(onChange) {
const change = new stream.Transform({ objectMode: true });
class BackupError extends Error {
constructor(name, message) {
super(message);
this.name = name;
}
}
change._transform = function(line, encoding, done) {
let obj = null;
class HTTPError extends BackupError {
constructor(responseError, name) {
// Special case some names for more useful error messages
switch (responseError.status) {
case 401:
name = 'Unauthorized';
break;
case 403:
name = 'Forbidden';
break;
default:
name = name || 'HTTPFatalError';
}
super(name, responseError.message);
}
}
// one change per line - remove the trailing comma
line = line.trim().replace(/,$/, '');
// Default function to return an error for HTTP status codes
// < 400 -> OK
// 4XX (except 429) -> Fatal
// 429 & >=500 -> Transient
function checkResponse(err) {
if (err) {
// Construct an HTTPError if there is request information on the error
// Codes < 400 are considered OK
if (err.status >= 400) {
return new HTTPError(err);
} else {
// Send it back again if there was no status code, e.g. a cxn error
return augmentMessage(err);
}
}
}
// extract thee last_seq at the end of the changes feed
if (line.match(/^"last_seq":/)) {
line = '{' + line;
function convertResponseError(responseError, errorFactory) {
if (!errorFactory) {
errorFactory = checkResponse;
}
return errorFactory(responseError);
}
function augmentMessage(err) {
// For errors that don't have a status code, we are likely looking at a cxn
// error.
// Try to augment the message with more detail (core puts the code in statusText)
if (err && err.statusText) {
err.message = `${err.message} ${err.statusText}`;
}
if (err && err.description) {
err.message = `${err.message} ${err.description}`;
}
return err;
}
function wrapPossibleInvalidUrlError(err) {
if (err.code === 'ERR_INVALID_URL') {
// Wrap ERR_INVALID_URL in our own InvalidOption
return new BackupError('InvalidOption', err.message);
}
return err;
}
module.exports = {
BackupError,
HTTPError,
wrapPossibleInvalidUrlError,
convertResponseError,
terminationCallback: function terminationCallback(err, data) {
if (err) {
console.error(`ERROR: ${err.message}`);
process.exitCode = codes[err.name] || 1;
process.exit();
}
try {
obj = JSON.parse(line);
} catch (e) {
}
onChange(obj);
done();
};
return change;
}
};

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -16,78 +16,150 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const fs = require('fs');
const async = require('async');
const stream = require('stream');
const liner = require('./liner.js');
const error = require('./error.js');
const debug = require('debug')('couchbackup:writer');
const onLine = function(onCommand, getDocs) {
const change = new stream.Transform({ objectMode: true });
module.exports = function(db, bufferSize, parallelism, ee) {
const writer = new stream.Transform({ objectMode: true });
let buffer = [];
let written = 0;
let linenumber = 0;
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
// this is the queue of chunks that are written to the database
// the queue's payload will be an array of documents to be written,
// the size of the array will be bufferSize. The variable parallelism
// determines how many HTTP requests will occur at any one time.
const q = async.queue(function(payload, cb) {
// if we are restoring known revisions, we need to supply new_edits=false
if (payload.docs && payload.docs[0] && payload.docs[0]._rev) {
payload.new_edits = false;
debug('Using new_edits false mode.');
}
let matches;
if (!didError) {
db.service.postBulkDocs({
db: db.db,
bulkDocs: payload
}).then(response => {
if (!response.result || (payload.new_edits === false && response.result.length > 0)) {
throw new Error(`Error writing batch with new_edits:${payload.new_edits !== false}` +
` and ${response.result ? response.result.length : 'unavailable'} items`);
}
written += payload.docs.length;
writer.emit('restored', { documents: payload.docs.length, total: written });
cb();
}).catch(err => {
err = error.convertResponseError(err);
debug(`Error writing docs ${err.name} ${err.message}`);
cb(err, payload);
});
}
}, parallelism);
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
let didError = false;
// write the contents of the buffer to CouchDB in blocks of bufferSize
function processBuffer(flush, callback) {
function taskCallback(err, payload) {
if (err && !didError) {
debug(`Queue task failed with error ${err.name}`);
didError = true;
q.kill();
writer.emit('error', err);
}
}
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
if (flush || buffer.length >= bufferSize) {
// work through the buffer to break off bufferSize chunks
// and feed the chunks to the queue
do {
// split the buffer into bufferSize chunks
const toSend = buffer.splice(0, bufferSize);
// and add the chunk to the queue
debug(`Adding ${toSend.length} to the write queue.`);
q.push({ docs: toSend }, taskCallback);
} while (buffer.length >= bufferSize);
// send any leftover documents to the queue
if (flush && buffer.length > 0) {
debug(`Adding remaining ${buffer.length} to the write queue.`);
q.push({ docs: buffer }, taskCallback);
}
// extract doc ids
if (getDocs && obj.command === 't') {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
}
onCommand(obj);
// wait until the queue size falls to a reasonable level
async.until(
// wait until the queue length drops to twice the paralellism
// or until empty on the last write
function(callback) {
// if we encountered an error, stop this until loop
if (didError) {
return callback(null, true);
}
if (flush) {
callback(null, q.idle() && q.length() === 0);
} else {
callback(null, q.length() <= parallelism * 2);
}
},
function(cb) {
setTimeout(cb, 20);
},
function() {
if (flush && !didError) {
writer.emit('finished', { total: written });
}
// callback when we're happy with the queue size
callback();
});
} else {
callback();
}
done();
};
return change;
};
}
/**
* Generate a list of remaining batches from a download file.
*
* @param {string} log - log file name
* @param {function} callback - callback with err, {changesComplete: N, batches: N}.
* changesComplete signifies whether the log file appeared to
* have completed reading the changes feed (contains :changes_complete).
* batches are remaining batch IDs for download.
*/
module.exports = function(log, callback) {
// our sense of state
const state = {
// take an object
writer._transform = function(obj, encoding, done) {
// each obj that arrives here is a line from the backup file
// it should contain an array of objects. The length of the array
// depends on the bufferSize at backup time.
linenumber++;
if (!didError && obj !== '') {
// see if it parses as JSON
try {
const arr = JSON.parse(obj);
};
let changesComplete = false;
// if it's an array with a length
if (typeof arr === 'object' && arr.length > 0) {
// push each document into a buffer
buffer = buffer.concat(arr);
// called with each line from the log file
const onCommand = function(obj) {
if (obj.command === 't') {
state[obj.batch] = true;
} else if (obj.command === 'd') {
delete state[obj.batch];
} else if (obj.command === 'changes_complete') {
changesComplete = true;
// pause the stream
// it's likely that the speed with which data can be read from disk
// may exceed the rate it can be written to CouchDB. To prevent
// the whole file being buffered in memory, we pause the stream here.
// it is resumed, when processBuffer calls back and we call done()
this.pause();
// break the buffer in to bufferSize chunks to be written to the database
processBuffer(false, done);
} else {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - not an array`));
done();
}
} catch (e) {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - cannot parse as JSON`));
// Could be an incomplete write that was subsequently resumed
done();
}
} else {
done();
}
};
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, false))
.on('finish', function() {
const obj = { changesComplete: changesComplete, batches: state };
callback(null, obj);
});
// called when we need to flush everything
writer._flush = function(done) {
processBuffer(true, done);
};
return writer;
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -16,267 +16,32 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const async = require('async');
const events = require('events');
const fs = require('fs');
const error = require('./error.js');
const spoolchanges = require('./spoolchanges.js');
const logfilesummary = require('./logfilesummary.js');
const logfilegetbatches = require('./logfilegetbatches.js');
// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
/**
* Read documents from a database to be backed up.
*
* @param {string} db - `@cloudant/cloudant` DB object for source database.
* @param {number} blocksize - number of documents to download in single request
* @param {number} parallelism - number of concurrent downloads
* @param {string} log - path to log file to use
* @param {boolean} resume - whether to resume from an existing log file
* @returns EventEmitter with following events:
* - `received` - called with a block of documents to write to backup
* - `error` - on error
* - `finished` - when backup process is finished (either complete or errored)
*/
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime(); // backup start time
const batchesPerDownloadSession = 50; // max batches to read from log file for download at a time (prevent OOM)
module.exports = function() {
const liner = new stream.Transform({ objectMode: true });
function proceedWithBackup() {
if (options.resume) {
// pick up from existing log file from previous run
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
} else {
// create new log file and process
spoolchanges(db, options.log, options.bufferSize, ee, function(err) {
if (err) {
ee.emit('error', err);
} else {
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
}
});
liner._transform = function(chunk, encoding, done) {
let data = chunk.toString();
if (this._lastLineData) {
data = this._lastLineData + data;
}
}
validateBulkGetSupport(db, function(err) {
if (err) {
return ee.emit('error', err);
} else {
proceedWithBackup();
}
});
const lines = data.split('\n');
this._lastLineData = lines.splice(lines.length - 1, 1)[0];
return ee;
};
/**
* Validate /_bulk_get support for a specified database.
*
* @param {string} db - nodejs-cloudant db
* @param {function} callback - called on completion with signature (err)
*/
function validateBulkGetSupport(db, callback) {
db.service.postBulkGet({ db: db.db, docs: [] }).then(() => { callback(); }).catch(err => {
err = error.convertResponseError(err, function(err) {
switch (err.status) {
case undefined:
// There was no status code on the error
return err;
case 404:
return new error.BackupError('BulkGetError', 'Database does not support /_bulk_get endpoint');
default:
return new error.HTTPError(err);
}
});
callback(err);
});
}
/**
* Download remaining batches in a log file, splitting batches into sets
* to avoid enqueueing too many in one go.
*
* @param {string} log - log file name to maintain download state
* @param {string} db - nodejs-cloudant db
* @param {events.EventEmitter} ee - event emitter to emit received events on
* @param {time} startTime - start time for backup process
* @param {number} batchesPerDownloadSession - max batches to enqueue for
* download at a time. As batches contain many doc IDs, this helps avoid
* exhausting memory.
* @param {number} parallelism - number of concurrent downloads
* @returns function to call do download remaining batches with signature
* (err, {batches: batch, docs: doccount}) {@see spoolchanges}.
*/
function downloadRemainingBatches(log, db, ee, startTime, batchesPerDownloadSession, parallelism) {
let total = 0; // running total of documents downloaded so far
let noRemainingBatches = false;
// Generate a set of batches (up to batchesPerDownloadSession) to download from the
// log file and download them. Set noRemainingBatches to `true` for last batch.
function downloadSingleBatchSet(done) {
// Fetch the doc IDs for the batches in the current set to
// download them.
function batchSetComplete(err, data) {
if (!err) {
total = data.total;
}
done(err);
for (const i in lines) {
this.push(lines[i]);
}
function processRetrievedBatches(err, batches) {
if (!err) {
// process them in parallelised queue
processBatchSet(db, parallelism, log, batches, ee, startTime, total, batchSetComplete);
} else {
batchSetComplete(err);
}
}
done();
};
readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, function(err, batchSetIds) {
if (err) {
ee.emit('error', err);
// Stop processing changes file for fatal errors
noRemainingBatches = true;
done();
} else {
if (batchSetIds.length === 0) {
noRemainingBatches = true;
return done();
}
logfilegetbatches(log, batchSetIds, processRetrievedBatches);
}
});
}
// Return true if all batches in log file have been downloaded
function isFinished(callback) { callback(null, noRemainingBatches); }
function onComplete() {
ee.emit('finished', { total: total });
}
async.doUntil(downloadSingleBatchSet, isFinished, onComplete);
}
/**
* Return a set of uncompleted download batch IDs from the log file.
*
* @param {string} log - log file path
* @param {number} batchesPerDownloadSession - maximum IDs to return
* @param {function} callback - sign (err, batchSetIds array)
*/
function readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, callback) {
logfilesummary(log, function processSummary(err, summary) {
if (!err) {
if (!summary.changesComplete) {
callback(new error.BackupError('IncompleteChangesInLogFile',
'WARNING: Changes did not finish spooling'));
return;
}
if (Object.keys(summary.batches).length === 0) {
return callback(null, []);
}
// batch IDs are the property names of summary.batches
const batchSetIds = getPropertyNames(summary.batches, batchesPerDownloadSession);
callback(null, batchSetIds);
} else {
callback(err);
liner._flush = function(done) {
if (this._lastLineData) {
this.push(this._lastLineData);
}
});
}
this._lastLineData = null;
done();
};
/**
* Download a set of batches retrieved from a log file. When a download is
* complete, add a line to the logfile indicating such.
*
* @param {any} db - nodejs-cloudant database
* @param {any} parallelism - number of concurrent requests to make
* @param {any} log - log file to drive downloads from
* @param {any} batches - batches to download
* @param {any} ee - event emitter for progress. This funciton emits
* received and error events.
* @param {any} start - time backup started, to report deltas
* @param {any} grandtotal - count of documents downloaded prior to this set
* of batches
* @param {any} callback - completion callback, (err, {total: number}).
*/
function processBatchSet(db, parallelism, log, batches, ee, start, grandtotal, callback) {
let hasErrored = false;
let total = grandtotal;
// queue to process the fetch requests in an orderly fashion using _bulk_get
const q = async.queue(function(payload, done) {
const output = [];
const thisBatch = payload.batch;
delete payload.batch;
delete payload.command;
function logCompletedBatch(batch) {
if (log) {
fs.appendFile(log, ':d batch' + thisBatch + '\n', done);
} else {
done();
}
}
// do the /db/_bulk_get request
db.service.postBulkGet({
db: db.db,
revs: true,
docs: payload.docs
}).then(response => {
// create an output array with the docs returned
response.result.results.forEach(function(d) {
if (d.docs) {
d.docs.forEach(function(doc) {
if (doc.ok) {
output.push(doc.ok);
}
});
}
});
total += output.length;
const t = (new Date().getTime() - start) / 1000;
ee.emit('received', {
batch: thisBatch,
data: output,
length: output.length,
time: t,
total: total
}, q, logCompletedBatch);
}).catch(err => {
if (!hasErrored) {
hasErrored = true;
err = error.convertResponseError(err);
// Kill the queue for fatal errors
q.kill();
ee.emit('error', err);
}
done();
});
}, parallelism);
for (const i in batches) {
q.push(batches[i]);
}
q.drain(function() {
callback(null, { total: total });
});
}
/**
* Returns first N properties on an object.
*
* @param {object} obj - object with properties
* @param {number} count - number of properties to return
*/
function getPropertyNames(obj, count) {
// decide which batch numbers to deal with
const batchestofetch = [];
let j = 0;
for (const i in obj) {
batchestofetch.push(parseInt(i));
j++;
if (j >= count) break;
}
return batchestofetch;
}
return liner;
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2018 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -16,17 +16,104 @@ // Licensed under the Apache License, Version 2.0 (the "License");

module.exports = function(db, options, readstream, ee, callback) {
const liner = require('../includes/liner.js')();
const writer = require('../includes/writer.js')(db, options.bufferSize, options.parallelism, ee);
const path = require('path');
const tmp = require('tmp');
// pipe the input to the output, via transformation functions
readstream
.pipe(liner) // transform the input stream into per-line
.on('error', function(err) {
// Forward the error to the writer event emitter where we already have
// listeners on for handling errors
writer.emit('error', err);
})
.pipe(writer); // transform the data
/**
Return API default settings.
*/
function apiDefaults() {
return {
parallelism: 5,
bufferSize: 500,
requestTimeout: 120000,
log: tmp.tmpNameSync(),
resume: false,
mode: 'full'
};
}
callback(null, writer);
/**
Return CLI default settings.
*/
function cliDefaults() {
const defaults = apiDefaults();
// add additional legacy settings
defaults.db = 'test';
defaults.url = 'http://localhost:5984';
// add CLI only option
defaults.quiet = false;
return defaults;
}
/**
Override settings **in-place** with environment variables.
*/
function applyEnvironmentVariables(opts) {
// if we have a custom CouchDB url
if (typeof process.env.COUCH_URL !== 'undefined') {
opts.url = process.env.COUCH_URL;
}
// if we have a specified databases
if (typeof process.env.COUCH_DATABASE !== 'undefined') {
opts.db = process.env.COUCH_DATABASE;
}
// if we have a specified buffer size
if (typeof process.env.COUCH_BUFFER_SIZE !== 'undefined') {
opts.bufferSize = parseInt(process.env.COUCH_BUFFER_SIZE);
}
// if we have a specified parallelism
if (typeof process.env.COUCH_PARALLELISM !== 'undefined') {
opts.parallelism = parseInt(process.env.COUCH_PARALLELISM);
}
// if we have a specified request timeout
if (typeof process.env.COUCH_REQUEST_TIMEOUT !== 'undefined') {
opts.requestTimeout = parseInt(process.env.COUCH_REQUEST_TIMEOUT);
}
// if we have a specified log file
if (typeof process.env.COUCH_LOG !== 'undefined') {
opts.log = path.normalize(process.env.COUCH_LOG);
}
// if we are instructed to resume
if (typeof process.env.COUCH_RESUME !== 'undefined' && process.env.COUCH_RESUME === 'true') {
opts.resume = true;
}
// if we are given an output filename
if (typeof process.env.COUCH_OUTPUT !== 'undefined') {
opts.output = path.normalize(process.env.COUCH_OUTPUT);
}
// if we only want a shallow copy
if (typeof process.env.COUCH_MODE !== 'undefined' && process.env.COUCH_MODE === 'shallow') {
opts.mode = 'shallow';
}
// if we are instructed to be quiet
if (typeof process.env.COUCH_QUIET !== 'undefined' && process.env.COUCH_QUIET === 'true') {
opts.quiet = true;
}
// if we have a specified API key
if (typeof process.env.CLOUDANT_IAM_API_KEY !== 'undefined') {
opts.iamApiKey = process.env.CLOUDANT_IAM_API_KEY;
}
// if we have a specified IAM token endpoint
if (typeof process.env.CLOUDANT_IAM_TOKEN_URL !== 'undefined') {
opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
module.exports = {
apiDefaults: apiDefaults,
cliDefaults: cliDefaults,
applyEnvironmentVariables: applyEnvironmentVariables
};

@@ -16,45 +16,17 @@ // Copyright © 2017, 2018 IBM Corp. All rights reserved.

/**
* Utility methods for the command line interface.
* @module cliutils
* @see module:cliutils
*/
module.exports = function(db, options, readstream, ee, callback) {
const liner = require('../includes/liner.js')();
const writer = require('../includes/writer.js')(db, options.bufferSize, options.parallelism, ee);
const url = require('url');
const error = require('./error.js');
// pipe the input to the output, via transformation functions
readstream
.pipe(liner) // transform the input stream into per-line
.on('error', function(err) {
// Forward the error to the writer event emitter where we already have
// listeners on for handling errors
writer.emit('error', err);
})
.pipe(writer); // transform the data
module.exports = {
/**
* Combine a base URL and a database name, ensuring at least single slash
* between root and database name. This allows users to have Couch behind
* proxies that mount Couch's / endpoint at some other mount point.
* @param {string} root - root URL
* @param {string} databaseName - database name
* @return concatenated URL.
*
* @private
*/
databaseUrl: function databaseUrl(root, databaseName) {
if (!root.endsWith('/')) {
root = root + '/';
}
try {
return new url.URL(encodeURIComponent(databaseName), root).toString();
} catch (err) {
throw error.wrapPossibleInvalidUrlError(err);
}
},
/**
* Generate CLI argument usage text.
*
* @param {string} description - argument description.
* @param {string} defaultValue - default argument value.
*
* @private
*/
getUsage: function getUsage(description, defaultValue) {
return `${description} ${defaultValue !== undefined ? ` (default: ${defaultValue})` : ''}`;
}
callback(null, writer);
};

@@ -16,61 +16,27 @@ // Copyright © 2017 IBM Corp. All rights reserved.

const fs = require('fs');
// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
const liner = require('./liner.js');
const onLine = function(onCommand, batches) {
module.exports = function(onChange) {
const change = new stream.Transform({ objectMode: true });
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
let obj = null;
let matches;
// one change per line - remove the trailing comma
line = line.trim().replace(/,$/, '');
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
// if this is one we want
if (obj.command === 't' && batches.indexOf(obj.batch) > -1) {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
onCommand(obj);
}
// extract thee last_seq at the end of the changes feed
if (line.match(/^"last_seq":/)) {
line = '{' + line;
}
try {
obj = JSON.parse(line);
} catch (e) {
}
onChange(obj);
done();
};
return change;
};
module.exports = function(log, batches, callback) {
// our sense of state
const retval = { };
// called with each line from the log file
const onCommand = function(obj) {
retval[obj.batch] = obj;
};
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, batches))
.on('error', function(err) {
callback(err);
})
.on('finish', function() {
callback(null, retval);
});
};

@@ -16,115 +16,267 @@ // Copyright © 2017, 2021 IBM Corp. All rights reserved.

const cliutils = require('./cliutils.js');
const config = require('./config.js');
const async = require('async');
const events = require('events');
const fs = require('fs');
const error = require('./error.js');
const path = require('path');
const pkg = require('../package.json');
const spoolchanges = require('./spoolchanges.js');
const logfilesummary = require('./logfilesummary.js');
const logfilegetbatches = require('./logfilegetbatches.js');
function parseBackupArgs() {
const program = require('commander');
/**
* Read documents from a database to be backed up.
*
* @param {string} db - `@cloudant/cloudant` DB object for source database.
* @param {number} blocksize - number of documents to download in single request
* @param {number} parallelism - number of concurrent downloads
* @param {string} log - path to log file to use
* @param {boolean} resume - whether to resume from an existing log file
* @returns EventEmitter with following events:
* - `received` - called with a block of documents to write to backup
* - `error` - on error
* - `finished` - when backup process is finished (either complete or errored)
*/
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime(); // backup start time
const batchesPerDownloadSession = 50; // max batches to read from log file for download at a time (prevent OOM)
// Option CLI defaults
const defaults = config.cliDefaults();
function proceedWithBackup() {
if (options.resume) {
// pick up from existing log file from previous run
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
} else {
// create new log file and process
spoolchanges(db, options.log, options.bufferSize, ee, function(err) {
if (err) {
ee.emit('error', err);
} else {
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
}
});
}
}
// Options set by environment variables
const envVarOptions = {};
config.applyEnvironmentVariables(envVarOptions);
validateBulkGetSupport(db, function(err) {
if (err) {
return ee.emit('error', err);
} else {
proceedWithBackup();
}
});
program
.version(pkg.version)
.description('Backup a CouchDB/Cloudant database to a backup text file.')
.usage('[options...]')
.option('-b, --buffer-size <n>',
cliutils.getUsage('number of documents fetched at once', defaults.bufferSize),
Number)
.option('-d, --db <db>',
cliutils.getUsage('name of the database to backup', defaults.db))
.option('-k, --iam-api-key <API key>',
cliutils.getUsage('IAM API key to access the Cloudant server'))
.option('-l, --log <file>',
cliutils.getUsage('file to store logging information during backup; invalid in "shallow" mode', 'a temporary file'),
path.normalize)
.option('-m, --mode <mode>',
cliutils.getUsage('"shallow" if only a superficial backup is done (ignoring conflicts and revision tokens), else "full" for complete backup', defaults.mode),
(mode) => { return mode.toLowerCase(); })
.option('-o, --output <file>',
cliutils.getUsage('file name to store the backup data', 'stdout'),
path.normalize)
.option('-p, --parallelism <n>',
cliutils.getUsage('number of HTTP requests to perform in parallel when performing a backup; ignored in "shallow" mode', defaults.parallelism),
Number)
.option('-q, --quiet',
cliutils.getUsage('suppress batch messages', defaults.quiet))
.option('-r, --resume',
cliutils.getUsage('continue a previous backup from its last known position; invalid in "shallow" mode', defaults.resume))
.option('-t, --request-timeout <n>',
cliutils.getUsage('milliseconds to wait for a response to a HTTP request before retrying the request', defaults.requestTimeout),
Number)
.option('-u, --url <url>',
cliutils.getUsage('URL of the CouchDB/Cloudant server', defaults.url))
.parse(process.argv);
return ee;
};
// Remove defaults that don't apply when using shallow mode
if (program.opts().mode === 'shallow' || envVarOptions.mode === 'shallow') {
delete defaults.parallelism;
delete defaults.log;
delete defaults.resume;
/**
* Validate /_bulk_get support for a specified database.
*
* @param {string} db - nodejs-cloudant db
* @param {function} callback - called on completion with signature (err)
*/
function validateBulkGetSupport(db, callback) {
db.service.postBulkGet({ db: db.db, docs: [] }).then(() => { callback(); }).catch(err => {
err = error.convertResponseError(err, function(err) {
switch (err.status) {
case undefined:
// There was no status code on the error
return err;
case 404:
return new error.BackupError('BulkGetError', 'Database does not support /_bulk_get endpoint');
default:
return new error.HTTPError(err);
}
});
callback(err);
});
}
/**
* Download remaining batches in a log file, splitting batches into sets
* to avoid enqueueing too many in one go.
*
* @param {string} log - log file name to maintain download state
* @param {string} db - nodejs-cloudant db
* @param {events.EventEmitter} ee - event emitter to emit received events on
* @param {time} startTime - start time for backup process
* @param {number} batchesPerDownloadSession - max batches to enqueue for
* download at a time. As batches contain many doc IDs, this helps avoid
* exhausting memory.
* @param {number} parallelism - number of concurrent downloads
* @returns function to call do download remaining batches with signature
* (err, {batches: batch, docs: doccount}) {@see spoolchanges}.
*/
function downloadRemainingBatches(log, db, ee, startTime, batchesPerDownloadSession, parallelism) {
let total = 0; // running total of documents downloaded so far
let noRemainingBatches = false;
// Generate a set of batches (up to batchesPerDownloadSession) to download from the
// log file and download them. Set noRemainingBatches to `true` for last batch.
function downloadSingleBatchSet(done) {
// Fetch the doc IDs for the batches in the current set to
// download them.
function batchSetComplete(err, data) {
if (!err) {
total = data.total;
}
done(err);
}
function processRetrievedBatches(err, batches) {
if (!err) {
// process them in parallelised queue
processBatchSet(db, parallelism, log, batches, ee, startTime, total, batchSetComplete);
} else {
batchSetComplete(err);
}
}
readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, function(err, batchSetIds) {
if (err) {
ee.emit('error', err);
// Stop processing changes file for fatal errors
noRemainingBatches = true;
done();
} else {
if (batchSetIds.length === 0) {
noRemainingBatches = true;
return done();
}
logfilegetbatches(log, batchSetIds, processRetrievedBatches);
}
});
}
// Apply the options in order so that the CLI overrides env vars and env variables
// override defaults.
const opts = Object.assign({}, defaults, envVarOptions, program.opts());
// Return true if all batches in log file have been downloaded
function isFinished(callback) { callback(null, noRemainingBatches); }
if (opts.resume && (opts.log === defaults.log)) {
// If resuming and the log file arg is the newly generated tmp name from defaults then we know that --log wasn't specified.
// We have to do this check here for the CLI case because of the default.
error.terminationCallback(new error.BackupError('NoLogFileName', 'To resume a backup, a log file must be specified'));
function onComplete() {
ee.emit('finished', { total: total });
}
return opts;
async.doUntil(downloadSingleBatchSet, isFinished, onComplete);
}
function parseRestoreArgs() {
const program = require('commander');
/**
* Return a set of uncompleted download batch IDs from the log file.
*
* @param {string} log - log file path
* @param {number} batchesPerDownloadSession - maximum IDs to return
* @param {function} callback - sign (err, batchSetIds array)
*/
function readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, callback) {
logfilesummary(log, function processSummary(err, summary) {
if (!err) {
if (!summary.changesComplete) {
callback(new error.BackupError('IncompleteChangesInLogFile',
'WARNING: Changes did not finish spooling'));
return;
}
if (Object.keys(summary.batches).length === 0) {
return callback(null, []);
}
// Option CLI defaults
const defaults = config.cliDefaults();
// batch IDs are the property names of summary.batches
const batchSetIds = getPropertyNames(summary.batches, batchesPerDownloadSession);
callback(null, batchSetIds);
} else {
callback(err);
}
});
}
// Options set by environment variables
const envVarOptions = {};
config.applyEnvironmentVariables(envVarOptions);
/**
* Download a set of batches retrieved from a log file. When a download is
* complete, add a line to the logfile indicating such.
*
* @param {any} db - nodejs-cloudant database
* @param {any} parallelism - number of concurrent requests to make
* @param {any} log - log file to drive downloads from
* @param {any} batches - batches to download
* @param {any} ee - event emitter for progress. This funciton emits
* received and error events.
* @param {any} start - time backup started, to report deltas
* @param {any} grandtotal - count of documents downloaded prior to this set
* of batches
* @param {any} callback - completion callback, (err, {total: number}).
*/
function processBatchSet(db, parallelism, log, batches, ee, start, grandtotal, callback) {
let hasErrored = false;
let total = grandtotal;
program
.version(pkg.version)
.description('Restore a CouchDB/Cloudant database from a backup text file.')
.usage('[options...]')
.option('-b, --buffer-size <n>',
cliutils.getUsage('number of documents restored at once', defaults.bufferSize),
Number)
.option('-d, --db <db>',
cliutils.getUsage('name of the new, existing database to restore to', defaults.db))
.option('-k, --iam-api-key <API key>',
cliutils.getUsage('IAM API key to access the Cloudant server'))
.option('-p, --parallelism <n>',
cliutils.getUsage('number of HTTP requests to perform in parallel when restoring a backup', defaults.parallelism),
Number)
.option('-q, --quiet',
cliutils.getUsage('suppress batch messages', defaults.quiet))
.option('-t, --request-timeout <n>',
cliutils.getUsage('milliseconds to wait for a response to a HTTP request before retrying the request', defaults.requestTimeout),
Number)
.option('-u, --url <url>',
cliutils.getUsage('URL of the CouchDB/Cloudant server', defaults.url))
.parse(process.argv);
// queue to process the fetch requests in an orderly fashion using _bulk_get
const q = async.queue(function(payload, done) {
const output = [];
const thisBatch = payload.batch;
delete payload.batch;
delete payload.command;
// Apply the options in order so that the CLI overrides env vars and env variables
// override defaults.
const opts = Object.assign({}, defaults, envVarOptions, program.opts());
function logCompletedBatch(batch) {
if (log) {
fs.appendFile(log, ':d batch' + thisBatch + '\n', done);
} else {
done();
}
}
return opts;
// do the /db/_bulk_get request
db.service.postBulkGet({
db: db.db,
revs: true,
docs: payload.docs
}).then(response => {
// create an output array with the docs returned
response.result.results.forEach(function(d) {
if (d.docs) {
d.docs.forEach(function(doc) {
if (doc.ok) {
output.push(doc.ok);
}
});
}
});
total += output.length;
const t = (new Date().getTime() - start) / 1000;
ee.emit('received', {
batch: thisBatch,
data: output,
length: output.length,
time: t,
total: total
}, q, logCompletedBatch);
}).catch(err => {
if (!hasErrored) {
hasErrored = true;
err = error.convertResponseError(err);
// Kill the queue for fatal errors
q.kill();
ee.emit('error', err);
}
done();
});
}, parallelism);
for (const i in batches) {
q.push(batches[i]);
}
q.drain(function() {
callback(null, { total: total });
});
}
module.exports = {
parseBackupArgs: parseBackupArgs,
parseRestoreArgs: parseRestoreArgs
};
/**
* Returns first N properties on an object.
*
* @param {object} obj - object with properties
* @param {number} count - number of properties to return
*/
function getPropertyNames(obj, count) {
// decide which batch numbers to deal with
const batchestofetch = [];
let j = 0;
for (const i in obj) {
batchestofetch.push(parseInt(i));
j++;
if (j >= count) break;
}
return batchestofetch;
}

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//

@@ -16,104 +16,45 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const path = require('path');
const tmp = require('tmp');
/**
Return API default settings.
*/
function apiDefaults() {
return {
parallelism: 5,
bufferSize: 500,
requestTimeout: 120000,
log: tmp.tmpNameSync(),
resume: false,
mode: 'full'
};
}
* Utility methods for the command line interface.
* @module cliutils
* @see module:cliutils
*/
/**
Return CLI default settings.
*/
function cliDefaults() {
const defaults = apiDefaults();
const url = require('url');
const error = require('./error.js');
// add additional legacy settings
defaults.db = 'test';
defaults.url = 'http://localhost:5984';
module.exports = {
// add CLI only option
defaults.quiet = false;
/**
* Combine a base URL and a database name, ensuring at least single slash
* between root and database name. This allows users to have Couch behind
* proxies that mount Couch's / endpoint at some other mount point.
* @param {string} root - root URL
* @param {string} databaseName - database name
* @return concatenated URL.
*
* @private
*/
databaseUrl: function databaseUrl(root, databaseName) {
if (!root.endsWith('/')) {
root = root + '/';
}
try {
return new url.URL(encodeURIComponent(databaseName), root).toString();
} catch (err) {
throw error.wrapPossibleInvalidUrlError(err);
}
},
return defaults;
}
/**
Override settings **in-place** with environment variables.
*/
function applyEnvironmentVariables(opts) {
// if we have a custom CouchDB url
if (typeof process.env.COUCH_URL !== 'undefined') {
opts.url = process.env.COUCH_URL;
/**
* Generate CLI argument usage text.
*
* @param {string} description - argument description.
* @param {string} defaultValue - default argument value.
*
* @private
*/
getUsage: function getUsage(description, defaultValue) {
return `${description} ${defaultValue !== undefined ? ` (default: ${defaultValue})` : ''}`;
}
// if we have a specified databases
if (typeof process.env.COUCH_DATABASE !== 'undefined') {
opts.db = process.env.COUCH_DATABASE;
}
// if we have a specified buffer size
if (typeof process.env.COUCH_BUFFER_SIZE !== 'undefined') {
opts.bufferSize = parseInt(process.env.COUCH_BUFFER_SIZE);
}
// if we have a specified parallelism
if (typeof process.env.COUCH_PARALLELISM !== 'undefined') {
opts.parallelism = parseInt(process.env.COUCH_PARALLELISM);
}
// if we have a specified request timeout
if (typeof process.env.COUCH_REQUEST_TIMEOUT !== 'undefined') {
opts.requestTimeout = parseInt(process.env.COUCH_REQUEST_TIMEOUT);
}
// if we have a specified log file
if (typeof process.env.COUCH_LOG !== 'undefined') {
opts.log = path.normalize(process.env.COUCH_LOG);
}
// if we are instructed to resume
if (typeof process.env.COUCH_RESUME !== 'undefined' && process.env.COUCH_RESUME === 'true') {
opts.resume = true;
}
// if we are given an output filename
if (typeof process.env.COUCH_OUTPUT !== 'undefined') {
opts.output = path.normalize(process.env.COUCH_OUTPUT);
}
// if we only want a shallow copy
if (typeof process.env.COUCH_MODE !== 'undefined' && process.env.COUCH_MODE === 'shallow') {
opts.mode = 'shallow';
}
// if we are instructed to be quiet
if (typeof process.env.COUCH_QUIET !== 'undefined' && process.env.COUCH_QUIET === 'true') {
opts.quiet = true;
}
// if we have a specified API key
if (typeof process.env.CLOUDANT_IAM_API_KEY !== 'undefined') {
opts.iamApiKey = process.env.CLOUDANT_IAM_API_KEY;
}
// if we have a specified IAM token endpoint
if (typeof process.env.CLOUDANT_IAM_TOKEN_URL !== 'undefined') {
opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
module.exports = {
apiDefaults: apiDefaults,
cliDefaults: cliDefaults,
applyEnvironmentVariables: applyEnvironmentVariables
};

@@ -1,2 +0,3 @@

// Copyright © 2017 IBM Corp. All rights reserved.
#!/usr/bin/env node
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -16,32 +17,66 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
const error = require('../includes/error.js');
const fs = require('fs');
const cliutils = require('../includes/cliutils.js');
const couchbackup = require('../app.js');
const parser = require('../includes/parser.js');
const debug = require('debug');
const backupDebug = debug('couchbackup:backup');
const backupBatchDebug = debug('couchbackup:backup:batch');
module.exports = function() {
const liner = new stream.Transform({ objectMode: true });
backupDebug.enabled = true;
liner._transform = function(chunk, encoding, done) {
let data = chunk.toString();
if (this._lastLineData) {
data = this._lastLineData + data;
}
try {
const program = parser.parseBackupArgs();
const databaseUrl = cliutils.databaseUrl(program.url, program.db);
const lines = data.split('\n');
this._lastLineData = lines.splice(lines.length - 1, 1)[0];
for (const i in lines) {
this.push(lines[i]);
}
done();
const opts = {
bufferSize: program.bufferSize,
log: program.log,
mode: program.mode,
parallelism: program.parallelism,
requestTimeout: program.requestTimeout,
resume: program.resume,
iamApiKey: program.iamApiKey,
iamTokenUrl: program.iamTokenUrl
};
liner._flush = function(done) {
if (this._lastLineData) {
this.push(this._lastLineData);
// log configuration to console
console.error('='.repeat(80));
console.error('Performing backup on ' + databaseUrl.replace(/\/\/.+@/g, '//****:****@') + ' using configuration:');
console.error(JSON.stringify(opts, null, 2).replace(/"iamApiKey": "[^"]+"/, '"iamApiKey": "****"'));
console.error('='.repeat(80));
backupBatchDebug.enabled = !program.quiet;
let ws = process.stdout;
// open output file
if (program.output) {
let flags = 'w';
if (program.log && program.resume) {
flags = 'a';
}
this._lastLineData = null;
done();
};
const fd = fs.openSync(program.output, flags);
ws = fs.createWriteStream(null, { fd });
}
return liner;
};
backupDebug('Fetching all database changes...');
return couchbackup.backup(
databaseUrl,
ws,
opts,
error.terminationCallback
).on('changes', function(batch) {
backupBatchDebug('Total batches received:', batch + 1);
}).on('written', function(obj) {
backupBatchDebug('Written batch ID:', obj.batch, 'Total document revisions written:', obj.total, 'Time:', obj.time);
}).on('error', function(e) {
backupDebug('ERROR', e);
}).on('finished', function(obj) {
backupDebug('Finished - Total document revisions written:', obj.total);
});
} catch (err) {
error.terminationCallback(err);
}

@@ -0,1 +1,2 @@

#!/usr/bin/env node
// Copyright © 2017, 2021 IBM Corp. All rights reserved.

@@ -16,99 +17,45 @@ //

// fatal errors
const codes = {
Error: 1,
InvalidOption: 2,
DatabaseNotFound: 10,
Unauthorized: 11,
Forbidden: 12,
DatabaseNotEmpty: 13,
NoLogFileName: 20,
LogDoesNotExist: 21,
IncompleteChangesInLogFile: 22,
SpoolChangesError: 30,
HTTPFatalError: 40,
BulkGetError: 50
};
const error = require('../includes/error.js');
const cliutils = require('../includes/cliutils.js');
const couchbackup = require('../app.js');
const parser = require('../includes/parser.js');
const debug = require('debug');
const restoreDebug = debug('couchbackup:restore');
const restoreBatchDebug = debug('couchbackup:restore:batch');
class BackupError extends Error {
constructor(name, message) {
super(message);
this.name = name;
}
}
restoreDebug.enabled = true;
class HTTPError extends BackupError {
constructor(responseError, name) {
// Special case some names for more useful error messages
switch (responseError.status) {
case 401:
name = 'Unauthorized';
break;
case 403:
name = 'Forbidden';
break;
default:
name = name || 'HTTPFatalError';
}
super(name, responseError.message);
}
}
try {
const program = parser.parseRestoreArgs();
const databaseUrl = cliutils.databaseUrl(program.url, program.db);
const opts = {
bufferSize: program.bufferSize,
parallelism: program.parallelism,
requestTimeout: program.requestTimeout,
iamApiKey: program.iamApiKey,
iamTokenUrl: program.iamTokenUrl
};
// Default function to return an error for HTTP status codes
// < 400 -> OK
// 4XX (except 429) -> Fatal
// 429 & >=500 -> Transient
function checkResponse(err) {
if (err) {
// Construct an HTTPError if there is request information on the error
// Codes < 400 are considered OK
if (err.status >= 400) {
return new HTTPError(err);
} else {
// Send it back again if there was no status code, e.g. a cxn error
return augmentMessage(err);
}
}
}
// log configuration to console
console.error('='.repeat(80));
console.error('Performing restore on ' + databaseUrl.replace(/\/\/.+@/g, '//****:****@') + ' using configuration:');
console.error(JSON.stringify(opts, null, 2).replace(/"iamApiKey": "[^"]+"/, '"iamApiKey": "****"'));
console.error('='.repeat(80));
function convertResponseError(responseError, errorFactory) {
if (!errorFactory) {
errorFactory = checkResponse;
}
return errorFactory(responseError);
}
restoreBatchDebug.enabled = !program.quiet;
function augmentMessage(err) {
// For errors that don't have a status code, we are likely looking at a cxn
// error.
// Try to augment the message with more detail (core puts the code in statusText)
if (err && err.statusText) {
err.message = `${err.message} ${err.statusText}`;
}
if (err && err.description) {
err.message = `${err.message} ${err.description}`;
}
return err;
return couchbackup.restore(
process.stdin, // restore from stdin
databaseUrl,
opts,
error.terminationCallback
).on('restored', function(obj) {
restoreBatchDebug('restored', obj.total);
}).on('error', function(e) {
restoreDebug('ERROR', e);
}).on('finished', function(obj) {
restoreDebug('finished', obj);
});
} catch (err) {
error.terminationCallback(err);
}
function wrapPossibleInvalidUrlError(err) {
if (err.code === 'ERR_INVALID_URL') {
// Wrap ERR_INVALID_URL in our own InvalidOption
return new BackupError('InvalidOption', err.message);
}
return err;
}
module.exports = {
BackupError,
HTTPError,
wrapPossibleInvalidUrlError,
convertResponseError,
terminationCallback: function terminationCallback(err, data) {
if (err) {
console.error(`ERROR: ${err.message}`);
process.exitCode = codes[err.name] || 1;
process.exit();
}
}
};

@@ -1,178 +0,102 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'use strict';
const pkg = require('../package.json');
const stream = require('stream');
const { CloudantV1, CouchdbSessionAuthenticator } = require('@ibm-cloud/cloudant');
const { IamAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const retryPlugin = require('retry-axios');
const userAgent = 'couchbackup-cloudant/' + pkg.version + ' (Node.js ' +
process.version + ')';
// Class for streaming _changes error responses into
// In general the response is a small error/reason JSON object
// so it is OK to have this in memory.
class ResponseWriteable extends stream.Writable {
constructor(options) {
super(options);
this.data = [];
}
_write(chunk, encoding, callback) {
this.data.push(chunk);
callback();
}
stringBody() {
return Buffer.concat(this.data).toString();
}
}
// An interceptor function to help augment error bodies with a little
// extra information so we can continue to use consistent messaging
// after the ugprade to @ibm-cloud/cloudant
const errorHelper = async function(err) {
let method;
let requestUrl;
if (err.response) {
if (err.response.config.url) {
requestUrl = err.response.config.url;
method = err.response.config.method;
}
// Override the status text with an improved message
let errorMsg = `${err.response.status} ${err.response.statusText || ''}: ` +
`${method} ${requestUrl}`;
if (err.response.data) {
// Check if we have a JSON response and try to get the error/reason
if (err.response.headers['content-type'] === 'application/json') {
if (!err.response.data.error && err.response.data.pipe) {
// If we didn't find a JSON object with `error` then we might have a stream response.
// Detect the stream by the presence of `pipe` and use it to get the body and parse
// the error information.
const p = new Promise((resolve, reject) => {
const errorBody = new ResponseWriteable();
err.response.data.pipe(errorBody)
.on('finish', () => { resolve(JSON.parse(errorBody.stringBody())); })
.on('error', () => { reject(err); });
});
// Replace the stream on the response with the parsed object
err.response.data = await p;
}
// Append the error/reason if available
if (err.response.data.error) {
// Override the status text with our more complete message
errorMsg += ` - Error: ${err.response.data.error}`;
if (err.response.data.reason) {
errorMsg += `, Reason: ${err.response.data.reason}`;
}
}
} else {
errorMsg += err.response.data;
}
// Set a new message for use by the node-sdk-core
// We use the errors array because it gets processed
// ahead of all other service errors.
err.response.data.errors = [{ message: errorMsg }];
}
} else if (err.request) {
if (!err.message.includes(err.config.url)) {
// Augment the message with the URL and method
// but don't do it again if we already have the URL.
err.message = `${err.message}: ${err.config.method} ${err.config.url}`;
}
}
return Promise.reject(err);
};
module.exports = {
client: function(rawUrl, opts) {
const url = new URL(rawUrl);
// Split the URL to separate service from database
// Use origin as the "base" to remove auth elements
const actUrl = new URL(url.pathname.substring(0, url.pathname.lastIndexOf('/')), url.origin);
const dbName = url.pathname.substring(url.pathname.lastIndexOf('/') + 1);
let authenticator;
// Default to cookieauth unless an IAM key is provided
if (opts.iamApiKey) {
const iamAuthOpts = { apikey: opts.iamApiKey };
if (opts.iamTokenUrl) {
iamAuthOpts.url = opts.iamTokenUrl;
}
authenticator = new IamAuthenticator(iamAuthOpts);
} else if (url.username) {
authenticator = new CouchdbSessionAuthenticator({
username: decodeURIComponent(url.username),
password: decodeURIComponent(url.password)
});
} else {
authenticator = new NoAuthAuthenticator();
}
const serviceOpts = {
authenticator: authenticator,
timeout: opts.requestTimeout,
// Axios performance options
maxContentLength: -1
};
const service = new CloudantV1(serviceOpts);
// Configure retries
const maxRetries = 2; // for 3 total attempts
service.getHttpClient().defaults.raxConfig = {
// retries for status codes
retry: maxRetries,
// retries for non-response e.g. ETIMEDOUT
noResponseRetries: maxRetries,
backoffType: 'exponential',
httpMethodsToRetry: ['GET', 'HEAD', 'POST'],
statusCodesToRetry: [
[429, 429],
[500, 599]
],
shouldRetry: err => {
const cfg = retryPlugin.getConfig(err);
// cap at max retries regardless of response/non-response type
if (cfg.currentRetryAttempt >= maxRetries) {
return false;
} else {
return retryPlugin.shouldRetryRequest(err);
}
},
instance: service.getHttpClient()
};
retryPlugin.attach(service.getHttpClient());
service.setServiceUrl(actUrl.toString());
if (authenticator instanceof CouchdbSessionAuthenticator) {
// Awkward workaround for known Couch issue with compression on _session requests
// It is not feasible to disable compression on all requests with the amount of
// data this lib needs to move, so override the property in the tokenManager instance.
authenticator.tokenManager.requestWrapperInstance.compressRequestData = false;
}
if (authenticator.tokenManager && authenticator.tokenManager.requestWrapperInstance) {
authenticator.tokenManager.requestWrapperInstance.axiosInstance.interceptors.response.use(null, errorHelper);
}
// Add error interceptors to put URLs in error messages
service.getHttpClient().interceptors.response.use(null, errorHelper);
// Add request interceptor to add user-agent (adding it with custom request headers gets overwritten)
service.getHttpClient().interceptors.request.use(function(requestConfig) {
requestConfig.headers['User-Agent'] = userAgent;
return requestConfig;
}, null);
return { service: service, db: dbName, url: actUrl.toString() };
}
};
<testsuites name="test-iam">
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:34:22" time="4.509">
<testcase classname="test-iam.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.98">
</testcase>
<testcase classname="test-iam.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.79">
</testcase>
<testcase classname="test-iam.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.56">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:34:27" time="10.227">
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="2.481">
</testcase>
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.464">
</testcase>
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.458">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:34:37" time="5.285">
<testcase classname="test-iam.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.289">
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.196">
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.005">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:34:42" time="12.673">
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.335">
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.247">
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.3">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:34:55" time="5.136">
<testcase classname="test-iam.Compression tests using API" name="should backup animaldb to a compressed file" time="0.881">
</testcase>
<testcase classname="test-iam.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.366">
</testcase>
<testcase classname="test-iam.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.094">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:35:00" time="7.226">
<testcase classname="test-iam.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.333">
</testcase>
<testcase classname="test-iam.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.413">
</testcase>
<testcase classname="test-iam.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.67">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:35:07" time="313.563">
<testcase classname="test-iam.End to end backup and restore using API" name="should backup and restore animaldb" time="2.13">
</testcase>
<testcase classname="test-iam.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="310.673">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:40:21" time="493.17">
<testcase classname="test-iam.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.71">
</testcase>
<testcase classname="test-iam.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="489.693">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:48:34" time="2.754">
<testcase classname="test-iam.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.483">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:48:37" time="0.283">
<testcase classname="test-iam.Write error tests" name="calls callback with error set when stream is not writeable" time="0.014">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:48:37" time="2.283">
<testcase classname="test-iam.Event tests" name="should get a finished event when using stdout" time="0.838">
</testcase>
<testcase classname="test-iam.Event tests" name="should get a finished event when using file output" time="0.913">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:48:39" time="5.235">
<testcase classname="test-iam.Resume tests using API" name="should create a log file" time="0.975">
</testcase>
<testcase classname="test-iam.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.741">
</testcase>
<testcase classname="test-iam.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.715">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:48:45" time="6.595">
<testcase classname="test-iam.Resume tests using CLI" name="should create a log file" time="1.322">
</testcase>
<testcase classname="test-iam.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.293">
</testcase>
<testcase classname="test-iam.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.178">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:48:51" time="34.015">
<testcase classname="test-iam.Resume tests" name="should correctly backup and restore backup10m" time="17.089">
</testcase>
<testcase classname="test-iam.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.386">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:25" time="46.397">
<testcase classname="test-iam.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="46.126">
</testcase>
</testsuite>
</testsuites>

@@ -1,424 +0,175 @@

<testsuites name="test">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:43:14" time="0.104">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.002">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.027">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid buffer size type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.007">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid log type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid log type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode string" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid output type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid output type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid parallelism type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid request timeout type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero request timeout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float request timout" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid resume type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid key type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for resume arg in shallow mode" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for parallelism arg in shallow mode" time="0.009">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:43:14" time="4.557">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.92">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.861">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.628">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:43:18" time="10.805">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="3.004">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.473">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.523">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:43:29" time="5.517">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.349">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.261">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.092">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:43:35" time="13.158">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.514">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.388">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.454">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:43:48" time="5.356">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.865">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.507">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.169">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:43:53" time="7.067">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.321">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.355">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.575">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:44:00" time="314.671">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.199">
</testcase>
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="311.693">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:49:15" time="506.727">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.644">
</testcase>
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="503.3">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:57:42" time="2.825">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.552">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:57:44" time="0.281">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.012">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:57:45" time="2.288">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.888">
</testcase>
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.867">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:57:47" time="5.311">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.868">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.779">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.741">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:57:52" time="6.8">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.433">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.255">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.245">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:57:59" time="34.739">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="16.814">
</testcase>
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="17.39">
</testcase>
</testsuite>
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:34" time="0.011">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_DATABASE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_BUFFER_SIZE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_PARALLELISM env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_REQUEST_TIMEOUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_API_KEY env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_TOKEN_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_LOG env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_RESUME env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_OUTPUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_MODE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:34" time="0.097">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.011">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.018">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on NoLogFileName" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on LogDoesNotExist" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on IncompleteChangesInLogFile" time="0.012">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.013">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.012">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:34" time="0.131">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.011">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on RestoreDatabaseNotFound" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.012">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.037">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.037">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.013">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:34" time="3.763">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.396">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.418">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.355">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.38">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.379">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.307">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.301">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.416">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.396">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.405">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:38" time="3.57">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.335">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.459">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.366">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.39">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.394">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.392">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.426">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.405">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.392">
</testcase>
</testsuite>
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:41" time="0.001">
<testcase classname="test.#unit Fetching batches from a log file" name="should fetch multiple batches correctly" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:41" time="0.002">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:41" time="0.045">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.012">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing" time="0.002">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --db command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --iam-api-key command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode full command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode shallow command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --quiet command-line parameter" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:41" time="0.014">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --request-timeout command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --quiet command-line parameter" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:41" time="0.004">
<testcase classname="test.#unit Check request headers" name="should have a couchbackup user-agent" time="0.004">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:41" time="10.585">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.004">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.014">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.013">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.014">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with fatal error for 404 response" time="0.004">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with same error for no status code error response" time="2.011">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.507">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.013">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:52" time="0.013">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.013">
</testcase>
</testsuite>
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:52" time="0.562">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.02">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.524">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.017">
</testcase>
</testsuite>
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:53" time="15.733">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.01">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.014">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="2.703">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="9.003">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:59:08" time="61.688">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="61.42">
</testcase>
</testsuite>
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2023-11-17T15:00:10" time="4.099">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.024">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should terminate on a fatal error" time="0.008">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.017">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.015">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.023">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should get a batch error for non-empty array response with new_edits false" time="0.008">
</testcase>
</testsuite>
</testsuites>
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Small script which backs up a Cloudant or CouchDB database to an S3
// bucket via a stream rather than on-disk file.
//
// The script generates the backup object name by combining together the path
// part of the database URL and the current time.
'use strict';
const stream = require('stream');
const url = require('url');
const AWS = require('aws-sdk');
const couchbackup = require('@cloudant/couchbackup');
const debug = require('debug')('s3-backup');
const VError = require('verror').VError;
/*
Main function, run from base of file.
*/
function main() {
const argv = require('yargs')
.usage('Usage: $0 [options]')
.example('$0 -s https://user:pass@host/db -b <bucket>', 'Backup db to bucket')
.options({
source: { alias: 's', nargs: 1, demandOption: true, describe: 'Source database URL' },
bucket: { alias: 'b', nargs: 1, demandOption: true, describe: 'Destination bucket' },
prefix: { alias: 'p', nargs: 1, describe: 'Prefix for backup object key', default: 'couchbackup' },
s3url: { nargs: 1, describe: 'S3 endpoint URL' },
awsprofile: { nargs: 1, describe: 'The profile section to use in the ~/.aws/credentials file', default: 'default' }
})
.help('h').alias('h', 'help')
.epilog('Copyright (C) IBM 2017')
.argv;
const sourceUrl = argv.source;
const backupName = new url.URL(sourceUrl).pathname.split('/').filter(function(x) { return x; }).join('-');
const backupBucket = argv.bucket;
const backupKeyPrefix = `${argv.prefix}-${backupName}`;
const shallow = argv.shallow;
const backupKey = `${backupKeyPrefix}-${new Date().toISOString()}`;
const s3Endpoint = argv.s3url;
const awsProfile = argv.awsprofile;
// Creds are from ~/.aws/credentials, environment etc. (see S3 docs).
const awsOpts = {
signatureVersion: 'v4',
credentials: new AWS.SharedIniFileCredentials({ profile: awsProfile })
};
if (typeof s3Endpoint !== 'undefined') {
awsOpts.endpoint = new AWS.Endpoint(s3Endpoint);
}
const s3 = new AWS.S3(awsOpts);
debug(`Creating a new backup of ${s(sourceUrl)} at ${backupBucket}/${backupKey}...`);
bucketAccessible(s3, backupBucket)
.then(() => {
return backupToS3(sourceUrl, s3, backupBucket, backupKey, shallow);
})
.then(() => {
debug('done.');
})
.catch((reason) => {
debug(`Error: ${reason}`);
process.exit(1);
});
}
/**
* Return a promise that resolves if the bucket is available and
* rejects if not.
*
* @param {any} s3 S3 client object
* @param {any} bucketName Bucket name
* @returns Promise
*/
function bucketAccessible(s3, bucketName) {
return new Promise(function(resolve, reject) {
const params = {
Bucket: bucketName
};
s3.headBucket(params, function(err, data) {
if (err) {
reject(new VError(err, 'S3 bucket not accessible'));
} else {
resolve();
}
});
});
}
/**
* Backup directly from Cloudant to an object store object via a stream.
*
* @param {any} sourceUrl URL of database
* @param {any} s3Client Object store client
* @param {any} s3Bucket Backup destination bucket
* @param {any} s3Key Backup destination key name (shouldn't exist)
* @param {any} shallow Whether to use the couchbackup `shallow` mode
* @returns Promise
*/
function backupToS3(sourceUrl, s3Client, s3Bucket, s3Key, shallow) {
return new Promise((resolve, reject) => {
debug(`Setting up S3 upload to ${s3Bucket}/${s3Key}`);
// A pass through stream that has couchbackup's output
// written to it and it then read by the S3 upload client.
// It has a 64MB highwater mark to allow for fairly
// uneven network connectivity.
const streamToUpload = new stream.PassThrough({ highWaterMark: 67108864 });
// Set up S3 upload.
const params = {
Bucket: s3Bucket,
Key: s3Key,
Body: streamToUpload
};
s3Client.upload(params, function(err, data) {
debug('Object store upload done');
if (err) {
debug(err);
reject(new VError(err, 'Object store upload failed'));
return;
}
debug('Object store upload succeeded');
debug(data);
resolve();
}).httpUploadProgress = (progress) => {
debug(`Object store upload progress: ${progress}`);
};
debug(`Starting streaming data from ${s(sourceUrl)}`);
couchbackup.backup(
sourceUrl,
streamToUpload,
(err, obj) => {
if (err) {
debug(err);
reject(new VError(err, 'CouchBackup failed with an error'));
return;
}
debug(`Download from ${s(sourceUrl)} complete.`);
streamToUpload.end(); // must call end() to complete upload.
// resolve() is called by the upload
}
);
});
}
/**
* Remove creds from a URL, e.g., before logging
*
* @param {string} url URL to safen
*/
function s(originalUrl) {
const parts = new url.URL(originalUrl);
return url.format(parts, { auth: false });
}
main();

@@ -1,424 +0,190 @@

<testsuites name="test">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:43:13" time="0.099">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.002">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.025">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid buffer size type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.007">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid log type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid log type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode string" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid output type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid output type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid parallelism type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid request timeout type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero request timeout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float request timout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid resume type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid key type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for resume arg in shallow mode" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for parallelism arg in shallow mode" time="0.005">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:43:14" time="4.454">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.919">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.798">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.613">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:43:18" time="10.825">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="3.137">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.427">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.471">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:43:29" time="6.797">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.644">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="3.072">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.292">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:43:36" time="14.888">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.838">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="5.301">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.957">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:43:51" time="5.318">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.873">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.523">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.123">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:43:56" time="8.018">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.668">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.691">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.87">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:44:04" time="309.716">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.253">
</testcase>
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="306.685">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:49:14" time="512.826">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="3.051">
</testcase>
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="509.023">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:57:46" time="3.44">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="3.174">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:57:50" time="0.289">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.013">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:57:50" time="3.093">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.835">
</testcase>
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="1.726">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:57:53" time="5.313">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.913">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.802">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.754">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:57:59" time="7.78">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.536">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.919">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.503">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:06" time="35.534">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="17.602">
</testcase>
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="17.393">
</testcase>
</testsuite>
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:42" time="0.006">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_DATABASE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_BUFFER_SIZE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_PARALLELISM env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_REQUEST_TIMEOUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_API_KEY env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_TOKEN_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_LOG env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_RESUME env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_OUTPUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_MODE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:42" time="0.101">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.016">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on NoLogFileName" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on LogDoesNotExist" time="0">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on IncompleteChangesInLogFile" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.024">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.018">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:42" time="0.148">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.014">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on RestoreDatabaseNotFound" time="0.022">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.006">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.035">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.029">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.013">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:42" time="6.623">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.709">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.621">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.592">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.558">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.61">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.531">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="1.108">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.592">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.681">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.614">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:49" time="5.468">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.566">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.657">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.544">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.678">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.563">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.614">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.622">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.642">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.576">
</testcase>
</testsuite>
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:54" time="0.003">
<testcase classname="test.#unit Fetching batches from a log file" name="should fetch multiple batches correctly" time="0.002">
</testcase>
</testsuite>
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:54" time="0.003">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:54" time="0.037">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.014">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --db command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --request-timeout command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --iam-api-key command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode full command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode shallow command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --quiet command-line parameter" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:54" time="0.013">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --url command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --quiet command-line parameter" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:54" time="0.005">
<testcase classname="test.#unit Check request headers" name="should have a couchbackup user-agent" time="0.004">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:58:54" time="10.606">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.003">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.017">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.033">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.014">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with fatal error for 404 response" time="0.006">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with same error for no status code error response" time="2.012">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.507">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.009">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:59:05" time="0.012">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.011">
</testcase>
</testsuite>
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:59:05" time="0.561">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.018">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.524">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.016">
</testcase>
</testsuite>
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:59:05" time="14.65">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.011">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.012">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="1.834">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="8.79">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-17T14:59:20" time="43.11">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="42.85">
</testcase>
</testsuite>
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2023-11-17T15:00:03" time="4.12">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.024">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should terminate on a fatal error" time="0.01">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.035">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.017">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.023">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should get a batch error for non-empty array response with new_edits false" time="0.007">
</testcase>
</testsuite>
</testsuites>
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Small script which backs up a Cloudant or CouchDB database to an S3
// bucket, using an intermediary file on disk.
//
// The script generates the backup object name by combining together the path
// part of the database URL and the current time.
'use strict';
const stream = require('stream');
const fs = require('fs');
const url = require('url');
const AWS = require('aws-sdk');
const couchbackup = require('@cloudant/couchbackup');
const debug = require('debug')('s3-backup');
const tmp = require('tmp');
const VError = require('verror').VError;
/*
Main function, run from base of file.
*/
function main() {
const argv = require('yargs')
.usage('Usage: $0 [options]')
.example('$0 -s https://user:pass@host/db -b <bucket>', 'Backup db to bucket')
.options({
source: { alias: 's', nargs: 1, demandOption: true, describe: 'Source database URL' },
bucket: { alias: 'b', nargs: 1, demandOption: true, describe: 'Destination bucket' },
prefix: { alias: 'p', nargs: 1, describe: 'Prefix for backup object key', default: 'couchbackup' },
s3url: { nargs: 1, describe: 'S3 endpoint URL' },
awsprofile: { nargs: 1, describe: 'The profile section to use in the ~/.aws/credentials file', default: 'default' }
})
.help('h').alias('h', 'help')
.epilog('Copyright (C) IBM 2017')
.argv;
const sourceUrl = argv.source;
const backupBucket = argv.bucket;
const backupName = new url.URL(sourceUrl).pathname.split('/').filter(function(x) { return x; }).join('-');
const backupKeyPrefix = `${argv.prefix}-${backupName}`;
const backupKey = `${backupKeyPrefix}-${new Date().toISOString()}`;
const backupTmpFile = tmp.fileSync();
const s3Endpoint = argv.s3url;
const awsProfile = argv.awsprofile;
// Creds are from ~/.aws/credentials, environment etc. (see S3 docs).
const awsOpts = {
signatureVersion: 'v4',
credentials: new AWS.SharedIniFileCredentials({ profile: awsProfile })
};
if (typeof s3Endpoint !== 'undefined') {
awsOpts.endpoint = new AWS.Endpoint(s3Endpoint);
}
const s3 = new AWS.S3(awsOpts);
debug(`Creating a new backup of ${s(sourceUrl)} at ${backupBucket}/${backupKey}...`);
bucketAccessible(s3, backupBucket)
.then(() => {
return createBackupFile(sourceUrl, backupTmpFile.name);
})
.then(() => {
return uploadNewBackup(s3, backupTmpFile.name, backupBucket, backupKey);
})
.then(() => {
debug('Backup successful!');
backupTmpFile.removeCallback();
debug('done.');
})
.catch((reason) => {
debug(`Error: ${reason}`);
});
}
/**
* Return a promise that resolves if the bucket is available and
* rejects if not.
*
* @param {any} s3 S3 client object
* @param {any} bucketName Bucket name
* @returns Promise
*/
function bucketAccessible(s3, bucketName) {
return new Promise(function(resolve, reject) {
const params = {
Bucket: bucketName
};
s3.headBucket(params, function(err, data) {
if (err) {
reject(new VError(err, 'S3 bucket not accessible'));
} else {
resolve();
}
});
});
}
/**
* Use couchbackup to create a backup of the specified database to a file path.
*
* @param {any} sourceUrl Database URL
* @param {any} backupTmpFilePath Path to write file
* @returns Promise
*/
function createBackupFile(sourceUrl, backupTmpFilePath) {
return new Promise((resolve, reject) => {
couchbackup.backup(
sourceUrl,
fs.createWriteStream(backupTmpFilePath),
(err) => {
if (err) {
return reject(new VError(err, 'CouchBackup process failed'));
}
debug('couchbackup to file done; uploading to S3');
resolve('creating backup file complete');
}
);
});
}
/**
* Upload a backup file to an S3 bucket.
*
* @param {any} s3 Object store client
* @param {any} backupTmpFilePath Path of backup file to write.
* @param {any} bucket Object store bucket name
* @param {any} key Object store key name
* @returns Promise
*/
function uploadNewBackup(s3, backupTmpFilePath, bucket, key) {
return new Promise((resolve, reject) => {
debug(`Uploading from ${backupTmpFilePath} to ${bucket}/${key}`);
function uploadFromStream(s3, bucket, key) {
const pass = new stream.PassThrough();
const params = {
Bucket: bucket,
Key: key,
Body: pass
};
s3.upload(params, function(err, data) {
debug('S3 upload done');
if (err) {
debug(err);
reject(new VError(err, 'Upload failed'));
return;
}
debug('Upload succeeded');
debug(data);
resolve();
}).httpUploadProgress = (progress) => {
debug(`S3 upload progress: ${progress}`);
};
return pass;
}
const inputStream = fs.createReadStream(backupTmpFilePath);
const s3Stream = uploadFromStream(s3, bucket, key);
inputStream.pipe(s3Stream);
});
}
/**
* Remove creds from a URL, e.g., before logging
*
* @param {string} url URL to safen
*/
function s(originalUrl) {
const parts = new url.URL(originalUrl);
return url.format(parts, { auth: false });
}
main();

@@ -19,16 +19,103 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

const assert = require('assert');
const logfilesummary = require('../includes/logfilesummary.js');
const nock = require('nock');
const request = require('../includes/request.js');
const changes = require('../includes/spoolchanges.js');
describe('#unit Fetching summary from the log file', function() {
it('should fetch a summary correctly', function() {
const url = 'http://localhost:7777';
const dbName = 'fakenockdb';
const longTestTimeout = 3000;
const db = request.client(`${url}/${dbName}`, { parallelism: 1 });
const seqSuffix = Buffer.alloc(124, 'abc123').toString('base64');
function provideChanges(batchSize, totalChanges, fullResponse = false) {
let pending = totalChanges;
const sparseResultsArray = Array(batchSize).fill({
seq: null,
id: 'doc',
changes: [{ rev: '1-abcdef0123456789abcdef0123456789' }]
});
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(totalChanges / batchSize + (totalChanges % batchSize > 0 ? 1 : 0))
.reply(200, (uri, requestBody) => {
pending -= batchSize;
const lastSeq = (totalChanges - pending);
const seq = lastSeq - batchSize;
return {
results: fullResponse
? Array.from(Array(batchSize), (_, i) => {
return {
seq: `${seq + i}-${seqSuffix}`,
id: `doc${seq + i}`,
changes: [{ rev: '1-abcdef0123456789abcdef0123456789' }]
};
})
: sparseResultsArray,
pending: pending,
last_seq: `${lastSeq}-abc`
};
});
}
describe('#unit Check spool changes', function() {
it('should terminate on request error', async function() {
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(3)
.replyWithError({ code: 'ECONNRESET', message: 'socket hang up' });
return new Promise((resolve, reject) => {
logfilesummary('./test/fixtures/test.log', function(err, data) {
changes(db, '/dev/null', 500, null, function(err) {
try {
assert.strictEqual(err.name, 'SpoolChangesError');
assert.strictEqual(err.message, `Failed changes request - socket hang up: post ${url}/${dbName}/_changes`);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
}).timeout(longTestTimeout);
it('should terminate on bad HTTP status code response', async function() {
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(3)
.reply(500, function(uri, requestBody) {
this.req.response.statusMessage = 'Internal Server Error';
return { error: 'foo', reason: 'bar' };
});
return new Promise((resolve, reject) => {
changes(db, '/dev/null', 500, null, function(err) {
try {
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `500 Internal Server Error: post ${url}/${dbName}/_changes - Error: foo, Reason: bar`);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
}).timeout(longTestTimeout);
it('should keep collecting changes', async function() {
// This test validates that spooling will correctly
// continue across multiple requests
// (4 batches of 100000 to be precise).
// This test might take up to 10 seconds
this.timeout(10 * 1000);
// Use full changes for this test
provideChanges(100000, 400000, true);
return new Promise((resolve, reject) => {
changes(db, '/dev/null', 500, null, function(err) {
try {
assert.ok(!err);
assert.ok(data);
assert.strictEqual(data.changesComplete, true);
assert.strictEqual(typeof data.batches, 'object');
assert.strictEqual(Object.keys(data.batches).length, 2);
assert.deepStrictEqual(data.batches['1'], true);
assert.deepStrictEqual(data.batches['4'], true);
assert.ok(nock.isDone());
resolve();

@@ -41,2 +128,66 @@ } catch (err) {

});
it('should keep collecting sparse changes', async function() {
// This test checks that making thousands of requests doesn't
// make anything bad happen.
// This test might take up to 25 seconds
this.timeout(25 * 1000);
// Use sparse changes for this test and a batch size of 1
provideChanges(1, 2500);
return new Promise((resolve, reject) => {
changes(db, '/dev/null', 500, null, function(err) {
try {
assert.ok(!err);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
});
describe('Longer spool changes checks', function() {
it('#slow should keep collecting changes (25M)', async function() {
// This test might take up to 5 minutes
this.timeout(5 * 60 * 1000);
// Note changes spooling uses a constant batch size, we are setting
// a test value here and setting the buffer to match
const batch = 100000;
// Use sparse changes for this test
provideChanges(batch, 25000000);
return new Promise((resolve, reject) => {
changes(db, '/dev/null', batch, null, function(err) {
try {
assert.ok(!err);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
it('#slower should keep collecting changes (500M)', async function() {
// This test might take up to 90 minutes
this.timeout(90 * 60 * 1000);
// Note changes spooling uses a constant batch size, we are setting
// a test value here and setting the buffer to match
const batch = 1000000;
// Use full changes for this test to exercise load
provideChanges(batch, 500000000, true);
return new Promise((resolve, reject) => {
changes(db, '/dev/null', batch, null, function(err) {
try {
assert.ok(!err);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
});

@@ -15,19 +15,200 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it */
/* global describe it beforeEach */
'use strict';
const u = require('./citestutils.js');
const assert = require('assert');
const nock = require('nock');
const request = require('../includes/request.js');
const error = require('../includes/error.js');
describe('Encryption tests', function() {
// Note CLI only to use openssl command
const p = { useApi: false, encryption: true };
const url = 'http://localhost:7777/testdb';
const db = request.client(url, { parallelism: 1 });
const timeoutDb = request.client(url, { parallelism: 1, requestTimeout: 500 });
const longTestTimeout = 3000;
it('should backup and restore animaldb via an encrypted file', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const encryptedBackup = `./${this.fileName}`;
return u.testBackupAndRestoreViaFile(p, 'animaldb', encryptedBackup, this.dbName).then(() => {
return u.assertEncryptedFile(encryptedBackup);
beforeEach('Clean nock', function() {
nock.cleanAll();
});
describe('#unit Check request headers', function() {
it('should have a couchbackup user-agent', async function() {
const couch = nock(url)
.matchHeader('user-agent', /couchbackup-cloudant\/\d+\.\d+\.\d+(?:-SNAPSHOT)? \(Node.js v\d+\.\d+\.\d+\)/)
.head('/good')
.reply(200);
return db.service.headDocument({ db: db.db, docId: 'good' }).then(() => {
assert.ok(couch.isDone());
});
});
});
describe('#unit Check request response error callback', function() {
it('should not callback with error for 200 response', async function() {
const couch = nock(url)
.get('/good')
.reply(200, { ok: true });
return db.service.getDocument({ db: db.db, docId: 'good' }).then(response => {
assert.ok(response.result);
assert.ok(couch.isDone());
});
});
it('should callback with error after 3 500 responses', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.reply(500, function(uri, requestBody) {
this.req.response.statusMessage = 'Internal Server Error';
return { error: 'foo', reason: 'bar' };
});
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `500 Internal Server Error: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should callback with error after 3 POST 503 responses', async function() {
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.times(3)
.reply(503, function(uri, requestBody) {
this.req.response.statusMessage = 'Service Unavailable';
return { error: 'service_unavailable', reason: 'Service unavailable' };
});
return assert.rejects(
db.service.postBulkGet({ db: db.db, revs: true, docs: [] }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 Service Unavailable: post ${url}/_bulk_get - Error: service_unavailable, Reason: Service unavailable`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should callback with error after 3 429 responses', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.reply(429, function(uri, requestBody) {
this.req.response.statusMessage = 'Too Many Requests';
return { error: 'foo', reason: 'bar' };
});
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `429 Too Many Requests: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should callback with fatal error for 404 response', async function() {
const couch = nock(url)
.get('/bad')
.reply(404, function(uri, requestBody) {
this.req.response.statusMessage = 'Not Found';
return { error: 'foo', reason: 'bar' };
});
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `404 Not Found: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
});
it('should callback with same error for no status code error response', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.replyWithError('testing badness');
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
const err2 = error.convertResponseError(err);
assert.strictEqual(err, err2);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should retry request if HTTP request gets timed out', async function() {
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.delay(1000)
.reply(200, { results: { docs: [{ id: '1', ok: { _id: '1' } }] } })
.post('/_bulk_get')
.query(true)
.reply(200, { results: { docs: [{ id: '1', ok: { _id: '1' } }, { id: '2', ok: { _id: '2' } }] } });
return timeoutDb.service.postBulkGet({ db: db.db, revs: true, docs: [] }).then((response) => {
assert.ok(response);
assert.ok(response.result);
assert.ok(response.result.results);
assert.ok(response.result.results.docs);
assert.strictEqual(response.result.results.docs.length, 2);
assert.ok(couch.isDone());
});
});
it('should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out', async function() {
// Increase the timeout for this test to allow for the delays
this.timeout(3000);
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.delay(1000)
.times(3)
.reply(200, { ok: true });
return assert.rejects(
timeoutDb.service.postBulkGet({ db: db.db, revs: true, docs: [] }),
(err) => {
err = error.convertResponseError(err);
// Note axios returns ECONNABORTED rather than ESOCKETTIMEDOUT
// See https://github.com/axios/axios/issues/2710 via https://github.com/axios/axios/issues/1543`
assert.strictEqual(err.statusText, 'ECONNABORTED');
assert.strictEqual(err.message, `timeout of 500ms exceeded: post ${url}/_bulk_get ECONNABORTED`);
assert.ok(couch.isDone());
return true;
});
});
describe('#unit Check credentials', async function() {
it('should properly decode username and password', async function() {
const username = 'user%123';
const password = 'colon:at@321';
const url = `http://${encodeURIComponent(username)}:${encodeURIComponent(password)}@localhost:7777/testdb`;
const sessionUrl = 'http://localhost:7777';
const couch = nock(sessionUrl)
.post('/_session', { username: username, password: password })
.reply(200, { ok: true }, { 'Set-Cookie': 'AuthSession=ABC123DEF4356;' })
.get('/')
.reply(200);
const db = request.client(url, { parallelism: 1 });
return db.service.getServerInformation().then(response => {
assert.ok(response);
assert.ok(couch.isDone());
});
});
});
});

@@ -18,26 +18,16 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

const assert = require('assert');
const fs = require('fs');
const u = require('./citestutils.js');
describe('Write error tests', function() {
it('calls callback with error set when stream is not writeable', async function() {
u.setTimeout(this, 10);
const dirname = fs.mkdtempSync('test_backup_');
// make temp dir read only
fs.chmodSync(dirname, 0o444);
const filename = dirname + '/test.backup';
const backupStream = fs.createWriteStream(filename, { flags: 'w' });
const params = { useApi: true };
// try to do backup and check err was set in callback
return u.testBackup(params, 'animaldb', backupStream).then(() => {
assert.fail('Should throw an "EACCES" error');
}).catch((resultErr) => {
// cleanup temp dir
fs.rmdirSync(dirname);
// error should have been set
assert.ok(resultErr);
assert.strictEqual(resultErr.code, 'EACCES');
describe('Encryption tests', function() {
// Note CLI only to use openssl command
const p = { useApi: false, encryption: true };
it('should backup and restore animaldb via an encrypted file', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const encryptedBackup = `./${this.fileName}`;
return u.testBackupAndRestoreViaFile(p, 'animaldb', encryptedBackup, this.dbName).then(() => {
return u.assertEncryptedFile(encryptedBackup);
});
});
});

@@ -15,3 +15,3 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it before after beforeEach */
/* global describe it beforeEach */
'use strict';

@@ -21,271 +21,116 @@

const fs = require('fs');
const u = require('./citestutils.js');
const mockServerPort = +process.env.COUCHBACKUP_MOCK_SERVER_PORT || 7777;
const nock = require('nock');
const request = require('../includes/request.js');
const writer = require('../includes/writer.js');
const noopEmitter = new (require('events')).EventEmitter();
const liner = require('../includes/liner.js');
const { once } = require('node:events');
const url = `http://localhost:${mockServerPort}`;
const nock = require('nock');
const httpProxy = require('http-proxy');
const Readable = require('stream').Readable;
const { pipeline } = require('node:stream/promises');
const longTestTimeout = 3000;
// Create an infinite stream to read.
// It just keeps sending a backup line, useful for testing cases of
// termination while a stream has content remaining (the animaldb backup
// is too small for that).
class InfiniteBackupStream extends Readable {
constructor(opt) {
super(opt);
this.contents = Buffer.from('[{"_id":"giraffe","_rev":"3-7665c3e66315ff40616cceef62886bd8","min_weight":830,"min_length":5,"max_weight":1600,"max_length":6,"wiki_page":"http://en.wikipedia.org/wiki/Giraffe","class":"mammal","diet":"herbivore","_revisions":{"start":3,"ids":["7665c3e66315ff40616cceef62886bd8","aaaf10d5a68cdf22d95a5482a0e95549","967a00dff5e02add41819138abb3284d"]}}]\n', 'utf8');
}
describe('#unit Check database restore writer', function() {
const dbUrl = 'http://localhost:5984/animaldb';
const db = request.client(dbUrl, { parallelism: 1 });
_read() {
let proceed;
do {
proceed = this.push(this.contents);
} while (proceed);
}
}
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
function assertNock() {
try {
assert.ok(nock.isDone());
} catch (err) {
console.error('pending mocks: %j', nock.pendingMocks());
throw err;
}
}
it('should complete successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, []); // success
function testPromiseWithAssertNock(testPromise) {
return testPromise.finally(() => {
assertNock();
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
});
}
async function backupHttpError(opts, errorName, errorCode) {
const p = u.p(opts, { expectedBackupError: { name: errorName, code: errorCode } });
it('should terminate on a fatal error', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(401, { error: 'Unauthorized' }); // fatal error
// Create a file and attempt a backup to it
const output = fs.createWriteStream('/dev/null');
return once(output, 'open')
.then(() => {
return testPromiseWithAssertNock(u.testBackup(p, 'fakenockdb', output));
});
}
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Unauthorized');
assert.strictEqual(err.message, 'Access is denied due to invalid credentials.');
assert.ok(nock.isDone());
return true;
}
);
});
async function restoreHttpError(opts, errorName, errorCode) {
const q = u.p(opts, { expectedRestoreError: { name: errorName, code: errorCode } });
return testPromiseWithAssertNock(u.testRestoreFromFile(q, './test/fixtures/animaldb_expected.json', 'fakenockdb'));
}
it('should retry on transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(200, { ok: true }); // third time lucky success
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('#unit Fatal errors', params), function() {
let processEnvCopy;
let proxy;
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
}).timeout(longTestTimeout);
before('Set process data for test', function() {
const proxyPort = mockServerPort + 1000;
// Copy env and argv so we can reset them after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
it('should fail after 3 transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(503, { error: 'Service Unavailable' }); // Final transient error
// Set up a proxy to point to our nock server because the nock override
// isn't visible to the spawned CLI process
if (!params.useApi) {
proxy = httpProxy.createProxyServer({ target: url }).listen(proxyPort, 'localhost');
proxy.on('error', (err, req, res) => {
console.log(`Proxy received error ${err}`);
res.writeHead(400, {
'Content-Type': 'application/json'
});
res.end(JSON.stringify(err));
});
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 : post ${dbUrl}/_bulk_docs - Error: Service Unavailable`);
assert.ok(nock.isDone());
return true;
}
);
}).timeout(longTestTimeout);
// setup environment variables
process.env.COUCH_URL = (params.useApi) ? url : `http://localhost:${proxyPort}`;
it('should restore shallow backups without rev info successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ ok: true, id: 'foo', rev: '1-abc' }]); // success
nock.emitter.on('no match', (req, opts) => {
console.error(`Unmatched nock request ${opts.method} ${opts.protocol}${opts.host}${opts.path}`);
});
});
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_old_shallow.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 11);
assert.ok(nock.isDone());
})]);
});
after('Reset process data', function(done) {
process.env = processEnvCopy;
nock.emitter.removeAllListeners();
if (!params.useApi) {
proxy.close(done);
} else {
done();
it('should get a batch error for non-empty array response with new_edits false', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ id: 'foo', error: 'foo', reason: 'bar' }]);
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Error');
assert.strictEqual(err.message, 'Error writing batch with new_edits:false and 1 items');
assert.ok(nock.isDone());
return true;
}
});
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
describe('for backup', function() {
it('should terminate when DB does not exist', function() {
// Simulate existence check
nock(url).head('/fakenockdb').reply(404, { error: 'not_found', reason: 'missing' });
return backupHttpError(params, 'DatabaseNotFound', 10);
});
it('should terminate on BulkGetError', function() {
// Simulate existence check
const n = nock(url).head('/fakenockdb').reply(200);
// Simulate _bulk_get not available
n.post('/fakenockdb/_bulk_get').reply(404, { error: 'not_found', reason: 'missing' });
return backupHttpError(params, 'BulkGetError', 50);
});
it('should terminate on Unauthorized existence check', function() {
// Simulate a 401
nock(url).head('/fakenockdb').reply(401, { error: 'unauthorized', reason: '_reader access is required for this request' });
return backupHttpError(params, 'Unauthorized', 11);
});
it('should terminate on Forbidden no _reader', function() {
// Simulate a 403
nock(url).head('/fakenockdb').reply(403, { error: 'forbidden', reason: '_reader access is required for this request' });
return backupHttpError(params, 'Forbidden', 12);
});
it('should terminate on _bulk_get HTTPFatalError', function() {
// Provide a mock complete changes log to allow a resume to skip ahead
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/test.log' } });
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a fatal HTTP error when trying to fetch docs
// Note: 2 outstanding batches, so 2 responses, 1 mock is optional because we can't guarantee timing
n.post('/fakenockdb/_bulk_get').query(true).reply(400, { error: 'bad_request', reason: 'testing bad response' });
n.post('/fakenockdb/_bulk_get').query(true).optionally().reply(400, { error: 'bad_request', reason: 'testing bad response' });
return backupHttpError(p, 'HTTPFatalError', 40);
});
it('should terminate on NoLogFileName', function() {
// Don't supply a log file name with resume
const p = u.p(params, { opts: { resume: true } });
return backupHttpError(p, 'NoLogFileName', 20);
});
it('should terminate on LogDoesNotExist', function() {
// Use a non-existent log file
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/doesnotexist.log' } });
return backupHttpError(p, 'LogDoesNotExist', 21);
});
it('should terminate on IncompleteChangesInLogFile', function() {
// Use an incomplete changes log file
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/incomplete_changes.log' } });
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Should fail when it reads the incomplete changes
return backupHttpError(p, 'IncompleteChangesInLogFile', 22);
});
it('should terminate on _changes HTTPFatalError', function() {
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a fatal HTTP error when trying to fetch docs (note 2 outstanding batches)
n.post('/fakenockdb/_changes').query(true).reply(400, { error: 'bad_request', reason: 'testing bad response' });
return backupHttpError(params, 'HTTPFatalError', 40);
});
it('should terminate on SpoolChangesError', function() {
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a changes without a last_seq
n.post('/fakenockdb/_changes').query(true).reply(200,
{
results: [{
seq: '2-g1AAAAEbeJzLYWBgYMlgTmFQSElKzi9KdUhJstTLTS3KLElMT9VLzskvTUnMK9HLSy3JAapkSmRIsv___39WBnMiUy5QgN3MzDIxOdEMWb85dv0gSxThigyN8diS5AAkk-pBFiUyoOkzxKMvjwVIMjQAKaDW_Zh6TQnqPQDRC7I3CwDPDV1k',
id: 'badger',
changes: [{ rev: '4-51aa94e4b0ef37271082033bba52b850' }]
}]
});
return backupHttpError(params, 'SpoolChangesError', 30);
});
});
describe('for restore', function() {
it('should terminate on Unauthorized db existence check', function() {
// Simulate a 401
nock(url).get('/fakenockdb').reply(401, { error: 'unauthorized', reason: '_reader access is required for this request' });
return restoreHttpError(params, 'Unauthorized', 11);
});
it('should terminate on Forbidden no _writer', function() {
// Simulate the DB exists (i.e. you can read it)
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 403 trying to write
n.post('/fakenockdb/_bulk_docs').reply(403, { error: 'forbidden', reason: '_writer access is required for this request' });
return restoreHttpError(params, 'Forbidden', 12);
});
it('should terminate on RestoreDatabaseNotFound', function() {
// Simulate the DB does not exist
nock(url).get('/fakenockdb').reply(404, { error: 'not_found', reason: 'Database does not exist.' });
return restoreHttpError(params, 'DatabaseNotFound', 10);
});
it('should terminate on notEmptyDBErr when database is not empty', function() {
// Simulate the DB that does exist and not empty
nock(url).get('/fakenockdb').reply(200, { doc_count: 10, doc_del_count: 0 });
return restoreHttpError(params, 'DatabaseNotEmpty', 13);
});
it('should terminate on notEmptyDBErr when database is not new', function() {
// Simulate the DB that does exist and not new
nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 10 });
return restoreHttpError(params, 'DatabaseNotEmpty', 13);
});
it('should terminate on _bulk_docs HTTPFatalError', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Use a parallelism of one and mock one response
const p = u.p(params, { opts: { parallelism: 1 } });
// Simulate a 400 trying to write
n.post('/fakenockdb/_bulk_docs').reply(400, { error: 'bad_request', reason: 'testing bad response' });
return restoreHttpError(p, 'HTTPFatalError', 40);
});
it('should terminate on _bulk_docs HTTPFatalError from system database', function() {
// Simulate that target database exists and is _not_ empty.
// This should pass validator as we exclude system databases from the check.
const n = nock(url).get('/_replicator').reply(200, { doc_count: 1, doc_del_count: 0 });
// Simulate a 400 trying to write
n.post('/_replicator/_bulk_docs').reply(400, { error: 'bad_request', reason: 'testing bad response' });
// Use a parallelism of one and mock one response
const q = u.p(params, { opts: { parallelism: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return testPromiseWithAssertNock(u.testRestore(q, new InfiniteBackupStream(), '_replicator'));
});
it('should terminate on _bulk_docs HTTPFatalError large stream', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 400 trying to write
// Provide a body function to handle the stream, but allow any body
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).reply(400, { error: 'bad_request', reason: 'testing bad response' });
// Use only parallelism 1 so we don't have to mock up loads of responses
const q = u.p(params, { opts: { parallelism: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return testPromiseWithAssertNock(u.testRestore(q, new InfiniteBackupStream(), 'fakenockdb'));
});
it('should terminate on multiple _bulk_docs HTTPFatalError', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 400 trying to write docs, 5 times because of default parallelism
// Provide a body function to handle the stream, but allow any body
// Four of the mocks are optional because of parallelism 5 we can't guarantee that the exit will happen
// after all 5 requests, but we must get at least one of them
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).reply(400, { error: 'bad_request', reason: 'testing bad response' });
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).times(4).optionally().reply(400, { error: 'bad_request', reason: 'testing bad response' });
const q = u.p(params, { opts: { bufferSize: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return restoreHttpError(q, 'HTTPFatalError', 40);
});
});
);
});
});

@@ -18,85 +18,13 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

const assert = require('assert');
const fs = require('fs');
const { once } = require('node:events');
const u = require('./citestutils.js');
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Resume tests', params), function() {
it('should create a log file', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const p = u.p(params, { opts: { log: logFile } });
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
assert.ok(fs.existsSync(logFile), 'The log file should exist.');
});
describe(u.scenario('#slowest End to end backup and restore', params), function() {
// 10 GB is about the largest the CI can handle before getting very upset
// about how long things are taking
it('should backup and restore largedb10g', async function() {
u.setTimeout(this, 350 * 60);
return u.testDirectBackupAndRestore(params, 'largedb10g', this.dbName);
});
it('should restore corrupted animaldb to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_corrupted.json');
const dbName = this.dbName;
const p = u.p(params, { expectedRestoreErrorRecoverable: { name: 'BackupFileJsonError' } });
return once(input, 'open')
.then(() => {
return u.testRestore(p, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
it('should restore resumed animaldb with blank line to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_resumed_blank.json');
const dbName = this.dbName;
return once(input, 'open')
.then(() => {
return u.testRestore(params, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
});
});
describe('Resume tests', function() {
// Currently cannot abort API backups, when we do this test should be run for
// both API and CLI
it('should correctly backup and restore backup10m', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 90);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
// Use abort parameter to terminate the backup
const p = u.p(params, { abort: true }, { opts: { log: logFile } });
const restoreDb = this.dbName;
// Set the database doc count as fewer than this should be written during
// resumed backup.
p.exclusiveMaxExpected = 5096;
return u.testBackupAbortResumeRestore(p, 'backup10m', actualBackup, restoreDb);
});
// Note --output is only valid for CLI usage, this test should only run for CLI
const params = { useApi: false };
it('should correctly backup and restore backup10m using --output', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 90);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
// Use abort parameter to terminate the backup
const p = u.p(params, { abort: true }, { opts: { output: actualBackup, log: logFile } });
const restoreDb = this.dbName;
// Set the database doc count as fewer than this should be written during
// resumed backup.
p.exclusiveMaxExpected = 5096;
return await u.testBackupAbortResumeRestore(p, 'backup10m', actualBackup, restoreDb);
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2018, 2023 IBM Corp. All rights reserved.
//

@@ -18,25 +18,59 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const assert = require('assert');
const logfilegetbatches = require('../includes/logfilegetbatches.js');
const fs = require('fs');
const { once } = require('node:events');
const readline = require('readline');
const u = require('./citestutils.js');
const uuid = require('uuid').v4;
describe('#unit Fetching batches from a log file', function() {
it('should fetch multiple batches correctly', async function() {
return new Promise((resolve, reject) => {
logfilegetbatches('./test/fixtures/test.log', [1, 4], function(err, data) {
try {
assert.ok(!err);
assert.ok(data);
assert.strictEqual(typeof data, 'object');
assert.strictEqual(Object.keys(data).length, 2);
assert.deepStrictEqual(data['1'].docs, [{ id: '6' }, { id: '7' }, { id: '8' }, { id: '9' }, { id: '10' }]);
assert.strictEqual(data['1'].batch, 1);
assert.deepStrictEqual(data['4'].docs, [{ id: '21' }, { id: '22' }]);
assert.strictEqual(data['4'].batch, 4);
resolve();
} catch (err) {
reject(err);
const params = { useApi: true };
describe(u.scenario('Concurrent database backups', params), function() {
it('should run concurrent API database backups correctly #slower', async function() {
// Allow up to 900 s to backup and compare (it should be much faster)!
u.setTimeout(this, 900);
const checkForEmptyBatches = async function(fileName) {
let foundEmptyBatch = false;
const rd = readline.createInterface({
input: fs.createReadStream(fileName),
output: fs.createWriteStream('/dev/null'),
terminal: false
});
rd.on('line', function(line) {
if (JSON.parse(line).length === 0) {
// Note: Empty batch arrays indicate that the running backup is
// incorrectly sharing a log file with another ongoing backup job.
foundEmptyBatch = true;
}
});
});
rd.on('close', function() {
if (foundEmptyBatch) {
return Promise.reject(new Error(`Log file '${fileName}' contains empty batches`));
} else {
return Promise.resolve();
}
});
};
const backupPromise = async function() {
const actualBackup = `./${uuid()}`;
const output = fs.createWriteStream(actualBackup);
return once(output, 'open').then(() => {
return u.testBackup(params, 'largedb1g', output);
}).then(() => {
return checkForEmptyBatches(actualBackup);
});
};
// [1] Run 'largedb1g' database backup
const backup1 = backupPromise();
// [2] Run 'largedb1g' database backup
const backup2 = backupPromise();
return Promise.all([backup1, backup2]);
});
});

@@ -18,21 +18,90 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

const fs = require('fs');
const { once } = require('node:events');
const u = require('./citestutils.js');
describe('Event tests', function() {
it('should get a finished event when using stdout', async function() {
u.setTimeout(this, 40);
// Use the API so we can get events, pass eventEmitter so we get the emitter back
const params = { useApi: true, useStdOut: true };
// All API backups now set an event listener for finished and it is part of the backup
// promise, so if the backup passes the finished event fired.
return u.testBackup(params, 'animaldb', process.stdout);
});
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Basic backup and restore', params), function() {
it('should backup animaldb to a file correctly', async function() {
// Allow up to 40 s to backup and compare (it should be much faster)!
u.setTimeout(this, 40);
const actualBackup = `./${this.fileName}`;
// Create a file and backup to it
const output = fs.createWriteStream(actualBackup);
return once(output, 'open')
.then(() => {
return u.testBackup(params, 'animaldb', output);
}).then(() => {
return u.readSortAndDeepEqual(actualBackup, './test/fixtures/animaldb_expected.json');
});
});
it('should get a finished event when using file output', async function() {
u.setTimeout(this, 40);
// Use the API so we can get events, pass eventEmitter so we get the emitter back
const params = { useApi: true };
const actualBackup = `./${this.fileName}`;
return u.testBackupToFile(params, 'animaldb', actualBackup);
it('should restore animaldb to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_expected.json');
const dbName = this.dbName;
return once(input, 'open').then(() => {
return u.testRestore(params, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
it('should execute a shallow mode backup successfully', async function() {
// Allow 30 s
u.setTimeout(this, 30);
const actualBackup = `./${this.fileName}`;
const output = fs.createWriteStream(actualBackup);
// Add the shallow mode option
const p = u.p(params, { opts: { mode: 'shallow' } });
return once(output, 'open')
.then(() => {
return u.testBackup(p, 'animaldb', output);
}).then(() => {
return u.readSortAndDeepEqual(actualBackup, './test/fixtures/animaldb_expected_shallow.json');
});
});
describe(u.scenario('Buffer size tests', params), function() {
it('should backup/restore animaldb with the same buffer size', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const p = u.p(params, { opts: { log: logFile, bufferSize: 1 } });
return u.testBackupAndRestoreViaFile(p, 'animaldb', actualBackup, this.dbName);
});
it('should backup/restore animaldb with backup buffer > restore buffer', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const dbName = this.dbName;
const p = u.p(params, { opts: { log: logFile, bufferSize: 2 } }); // backup
const q = u.p(params, { opts: { bufferSize: 1 } }); // restore
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
return u.testRestoreFromFile(q, actualBackup, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
it('should backup/restore animaldb with backup buffer < restore buffer', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const dbName = this.dbName;
const p = u.p(params, { opts: { log: logFile, bufferSize: 1 } }); // backup
const q = u.p(params, { opts: { bufferSize: 2 } }); // restore
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
return u.testRestoreFromFile(q, actualBackup, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
});
});
});

@@ -15,316 +15,117 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe afterEach before after it */
/* global describe it before after */
'use strict';
const assert = require('assert');
const parser = require('../includes/parser.js');
const applyEnvVars = require('../includes/config.js').applyEnvironmentVariables;
describe('#unit Default parameters', function() {
describe('#unit Configuration', function() {
let processEnvCopy;
let processArgvCopy;
before('Set process data for test', function() {
// Copy env and argv so we can reset them after the tests
before('Save env', function() {
// Copy env so we can reset it after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
processArgvCopy = JSON.parse(JSON.stringify(process.argv));
// setup environment variables
process.env.COUCH_URL = 'http://user:pass@myurl.com';
process.env.COUCH_DATABASE = 'mydb';
process.env.COUCH_BUFFER_SIZE = '1000';
process.env.COUCH_PARALLELISM = '20';
process.env.COUCH_REQUEST_TIMEOUT = '20000';
process.env.COUCH_LOG = 'my.log';
process.env.COUCH_RESUME = 'true';
process.env.COUCH_OUTPUT = 'myfile.txt';
process.env.COUCH_MODE = 'shallow';
process.env.CLOUDANT_IAM_API_KEY = 'ABC123-ZYX987_cba789-xyz321';
process.env.COUCH_QUIET = 'true';
});
after('Reset process data', function() {
after('Reset env', function() {
process.env = processEnvCopy;
process.argv = processArgvCopy;
});
afterEach(function() {
delete require.cache[require.resolve('commander')];
it('respects the COUCH_URL env variable', function() {
process.env.COUCH_URL = 'http://user:pass@myurl.com';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.url, 'string');
assert.strictEqual(config.url, process.env.COUCH_URL);
});
describe('Backup command-line', function() {
it('respects the COUCH_URL env variable if the --url backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, process.env.COUCH_URL);
});
it('respects the COUCH_DATABASE env variable', function() {
process.env.COUCH_DATABASE = 'mydb';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.db, 'string');
assert.strictEqual(config.db, process.env.COUCH_DATABASE);
});
it('respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, process.env.COUCH_DATABASE);
});
it('respects the COUCH_BUFFER_SIZE env variable', function() {
process.env.COUCH_BUFFER_SIZE = '1000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.bufferSize, 'number');
assert.strictEqual(config.bufferSize, 1000);
});
it('respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, parseInt(process.env.COUCH_BUFFER_SIZE, 10));
});
it('respects the COUCH_PARALLELISM env variable', function() {
process.env.COUCH_PARALLELISM = '20';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.parallelism, 'number');
assert.strictEqual(config.parallelism, 20);
});
it('respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parseInt(process.env.COUCH_PARALLELISM, 10));
});
it('respects the COUCH_REQUEST_TIMEOUT env variable', function() {
process.env.COUCH_REQUEST_TIMEOUT = '10000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.requestTimeout, 'number');
assert.strictEqual(config.requestTimeout, 10000);
});
it('respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, parseInt(process.env.COUCH_REQUEST_TIMEOUT, 10));
});
it('respects the CLOUDANT_IAM_API_KEY env variable', function() {
const key = 'ABC123-ZYX987_cba789-xyz321';
process.env.CLOUDANT_IAM_API_KEY = key;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamApiKey, 'string');
assert.strictEqual(config.iamApiKey, key);
});
it('respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, process.env.CLOUDANT_IAM_API_KEY);
});
it('respects the CLOUDANT_IAM_TOKEN_URL env variable', function() {
const u = 'https://testhost.example:1234/identity/token';
process.env.CLOUDANT_IAM_TOKEN_URL = u;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamTokenUrl, 'string');
assert.strictEqual(config.iamTokenUrl, u);
});
it('respects the COUCH_LOG env variable if the --log backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.log, 'string');
assert.strictEqual(program.log, process.env.COUCH_LOG);
});
it('respects the COUCH_LOG env variable', function() {
process.env.COUCH_LOG = 'my.log';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.log, 'string');
assert.strictEqual(config.log, process.env.COUCH_LOG);
});
it('respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.resume, 'boolean');
assert.strictEqual(program.resume, true);
});
it('respects the COUCH_RESUME env variable', function() {
process.env.COUCH_RESUME = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.resume, 'boolean');
assert.strictEqual(config.resume, true);
});
it('respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.output, 'string');
assert.strictEqual(program.output, process.env.COUCH_OUTPUT);
});
it('respects the COUCH_OUTPUT env variable', function() {
process.env.COUCH_OUTPUT = 'myfile.txt';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.output, 'string');
assert.strictEqual(config.output, process.env.COUCH_OUTPUT);
});
it('respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, process.env.COUCH_MODE);
});
it('respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
it('respects the backup --url command-line parameter', function() {
const url = 'http://user:pass@myurl2.com';
process.argv = ['node', 'test', '--url', url];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, url);
});
it('respects the backup --db command-line parameter', function() {
const db = 'mydb2';
process.argv = ['node', 'test', '--db', db];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, db);
});
it('respects the backup --buffer-size command-line parameter', function() {
const bufferSize = 500;
process.argv = ['node', 'test', '--buffer-size', bufferSize];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, bufferSize);
});
it('respects the backup --parallelism command-line parameter', function() {
const parallelism = 10;
process.argv = ['node', 'test', '--parallelism', parallelism];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parallelism);
});
it('respects the backup --request-timeout command-line parameter', function() {
const requestTimeout = 10000;
process.argv = ['node', 'test', '--request-timeout', requestTimeout];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, requestTimeout);
});
it('respects the backup --iam-api-key command-line parameter', function() {
const key = '123abc-789zyx_CBA987-XYZ321';
process.argv = ['node', 'test', '--iam-api-key', key];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, key);
});
it('respects the backup --log command-line parameter', function() {
const filename = 'my2.log';
process.argv = ['node', 'test', '--log', filename];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.log, 'string');
assert.strictEqual(program.log, filename);
});
it('respects the backup --resume command-line parameter', function() {
process.argv = ['node', 'test', '--resume'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.resume, 'boolean');
assert.strictEqual(program.resume, true);
});
it('respects the backup --output command-line parameter', function() {
const filename = 'myfile2.txt';
process.argv = ['node', 'test', '--output', filename];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.output, 'string');
assert.strictEqual(program.output, filename);
});
it('respects the backup --mode full command-line parameter', function() {
process.argv = ['node', 'test', '--mode', 'full'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, 'full');
});
it('respects the backup --mode shallow command-line parameter', function() {
process.argv = ['node', 'test', '--mode', 'shallow'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, 'shallow');
});
it('respects the backup --quiet command-line parameter', function() {
process.argv = ['node', 'test', '--quiet'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
it('respects the COUCH_MODE env variable', function() {
process.env.COUCH_MODE = 'shallow';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.mode, 'string');
assert.strictEqual(config.mode, 'shallow');
});
describe('Restore command-line', function() {
it('respects the COUCH_URL env variable if the --url restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, process.env.COUCH_URL);
});
it('respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, process.env.COUCH_DATABASE);
});
it('respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, parseInt(process.env.COUCH_BUFFER_SIZE, 10));
});
it('respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parseInt(process.env.COUCH_PARALLELISM, 10));
});
it('respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, parseInt(process.env.COUCH_REQUEST_TIMEOUT, 10));
});
it('respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, process.env.CLOUDANT_IAM_API_KEY);
});
it('respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
it('respects the restore --url command-line parameter', function() {
const url = 'https://a:b@myurl3.com';
process.argv = ['node', 'test', '--url', url];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, url);
});
it('respects the restore --db command-line parameter', function() {
const db = 'mydb3';
process.argv = ['node', 'test', '--db', db];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, db);
});
it('respects the restore --buffer-size command-line parameter', function() {
const bufferSize = 250;
process.argv = ['node', 'test', '--buffer-size', bufferSize];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, bufferSize);
});
it('respects the restore --parallelism command-line parameter', function() {
const parallelism = 5;
process.argv = ['node', 'test', '--parallelism', parallelism];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parallelism);
});
it('respects the restore --request-timeout command-line parameter', function() {
const requestTimeout = 10000;
process.argv = ['node', 'test', '--request-timeout', requestTimeout];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, requestTimeout);
});
it('respects the restore --iam-api-key command-line parameter', function() {
const key = '123abc-789zyx_CBA987-XYZ321';
process.argv = ['node', 'test', '--iam-api-key', key];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, key);
});
it('respects the restore --quiet command-line parameter', function() {
process.argv = ['node', 'test', '--quiet'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
it('respects the COUCH_QUIET env variable', function() {
process.env.COUCH_QUIET = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.quiet, 'boolean');
assert.strictEqual(config.quiet, true);
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2023 IBM Corp. All rights reserved.
//

@@ -15,93 +15,138 @@ // Licensed under the Apache License, Version 2.0 (the "License");

/* global describe it */
/* global */
'use strict';
const fs = require('fs');
const { fork, spawn } = require('node:child_process');
const { once } = require('node:events');
const u = require('./citestutils.js');
const { Duplex } = require('node:stream');
const debug = require('debug');
const logProcess = debug('couchbackup:test:process');
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Basic backup and restore', params), function() {
it('should backup animaldb to a file correctly', async function() {
// Allow up to 40 s to backup and compare (it should be much faster)!
u.setTimeout(this, 40);
const actualBackup = `./${this.fileName}`;
// Create a file and backup to it
const output = fs.createWriteStream(actualBackup);
return once(output, 'open')
.then(() => {
return u.testBackup(params, 'animaldb', output);
}).then(() => {
return u.readSortAndDeepEqual(actualBackup, './test/fixtures/animaldb_expected.json');
});
});
class TestProcess {
constructor(cmd, args, mode) {
this.cmd = cmd;
// Child process stdio [stdin, stdout, stderr, ...extra channels]
const childProcessOptions = { stdio: [] };
switch (mode) {
case 'readable':
// Readable only, no writing to stdin so ignore it
childProcessOptions.stdio = ['ignore', 'pipe', 'inherit'];
break;
case 'writable':
// Writable only, no reading from stdout so ignore it
childProcessOptions.stdio = ['pipe', 'ignore', 'inherit'];
break;
default:
// Default Duplex mode pipe both stdin and stdout
childProcessOptions.stdio = ['pipe', 'pipe', 'inherit'];
break;
}
if (cmd.endsWith('.js')) {
// Add Node fork ipc channel
childProcessOptions.stdio.push('ipc');
logProcess(`Forking Node process for ${cmd} with stdio:[${childProcessOptions.stdio}]`);
this.childProcess = fork(cmd, args, childProcessOptions);
} else {
logProcess(`Spawning process for ${cmd} with stdio:[${childProcessOptions.stdio}]`);
this.childProcess = spawn(cmd, args, childProcessOptions);
}
it('should restore animaldb to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_expected.json');
const dbName = this.dbName;
return once(input, 'open').then(() => {
return u.testRestore(params, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
this.childProcessPromise = once(this.childProcess, 'close').then(() => {
const code = this.childProcess.exitCode;
const signal = this.childProcess.signalCode;
logProcess(`Test process ${cmd} closed with code ${code} and signal ${signal}`);
if (code === 0) {
logProcess(`Resolving process promise for ${cmd}`);
return Promise.resolve(code);
} else {
const e = new Error(`Test child process ${cmd} exited with code ${code} and ${signal}. This may be normal for error case testing.`);
e.code = code;
e.signal = signal;
logProcess(`Will reject process promise for ${cmd} with ${e}`);
return Promise.reject(e);
}
});
it('should execute a shallow mode backup successfully', async function() {
// Allow 30 s
u.setTimeout(this, 30);
const actualBackup = `./${this.fileName}`;
const output = fs.createWriteStream(actualBackup);
// Add the shallow mode option
const p = u.p(params, { opts: { mode: 'shallow' } });
return once(output, 'open')
.then(() => {
return u.testBackup(p, 'animaldb', output);
}).then(() => {
return u.readSortAndDeepEqual(actualBackup, './test/fixtures/animaldb_expected_shallow.json');
});
});
switch (mode) {
case 'readable':
this.duplexFrom = this.childProcess.stdout;
break;
case 'writable':
this.duplexFrom = this.childProcess.stdin;
break;
default:
// Default is duplex
this.duplexFrom = { writable: this.childProcess.stdin, readable: this.childProcess.stdout };
}
describe(u.scenario('Buffer size tests', params), function() {
it('should backup/restore animaldb with the same buffer size', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const p = u.p(params, { opts: { log: logFile, bufferSize: 1 } });
return u.testBackupAndRestoreViaFile(p, 'animaldb', actualBackup, this.dbName);
});
this.stream = Duplex.from(this.duplexFrom);
}
}
it('should backup/restore animaldb with backup buffer > restore buffer', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const dbName = this.dbName;
const p = u.p(params, { opts: { log: logFile, bufferSize: 2 } }); // backup
const q = u.p(params, { opts: { bufferSize: 1 } }); // restore
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
return u.testRestoreFromFile(q, actualBackup, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
it('should backup/restore animaldb with backup buffer < restore buffer', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const dbName = this.dbName;
const p = u.p(params, { opts: { log: logFile, bufferSize: 1 } }); // backup
const q = u.p(params, { opts: { bufferSize: 2 } }); // restore
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
return u.testRestoreFromFile(q, actualBackup, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
});
});
});
module.exports = {
TestProcess,
cliBackup: function(databaseName, params = {}) {
const args = ['--db', databaseName];
if (params.opts) {
if (params.opts.mode) {
args.push('--mode');
args.push(params.opts.mode);
}
if (params.opts.output) {
args.push('--output');
args.push(params.opts.output);
}
if (params.opts.log) {
args.push('--log');
args.push(params.opts.log);
}
if (params.opts.resume) {
args.push('--resume');
args.push(params.opts.resume);
}
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
return new TestProcess('./bin/couchbackup.bin.js', args, 'readable');
},
cliRestore: function(databaseName, params) {
const args = ['--db', databaseName];
if (params.opts) {
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.parallelism) {
args.push('--parallelism');
args.push(params.opts.parallelism);
}
if (params.opts.requestTimeout) {
args.push('--request-timeout');
args.push(params.opts.requestTimeout);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
return new TestProcess('./bin/couchrestore.bin.js', args, 'writable');
},
cliGzip: function() {
return new TestProcess('gzip', []);
},
cliGunzip: function() {
return new TestProcess('gunzip', []);
},
cliEncrypt: function() {
return new TestProcess('openssl', ['aes-128-cbc', '-pass', 'pass:12345']);
},
cliDecrypt: function() {
return new TestProcess('openssl', ['aes-128-cbc', '-d', '-pass', 'pass:12345']);
}
};

@@ -15,117 +15,24 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it before after */
/* global describe it */
'use strict';
const assert = require('assert');
const applyEnvVars = require('../includes/config.js').applyEnvironmentVariables;
const u = require('./citestutils.js');
describe('#unit Configuration', function() {
let processEnvCopy;
before('Save env', function() {
// Copy env so we can reset it after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
describe('Event tests', function() {
it('should get a finished event when using stdout', async function() {
u.setTimeout(this, 40);
// Use the API so we can get events, pass eventEmitter so we get the emitter back
const params = { useApi: true, useStdOut: true };
// All API backups now set an event listener for finished and it is part of the backup
// promise, so if the backup passes the finished event fired.
return u.testBackup(params, 'animaldb', process.stdout);
});
after('Reset env', function() {
process.env = processEnvCopy;
it('should get a finished event when using file output', async function() {
u.setTimeout(this, 40);
// Use the API so we can get events, pass eventEmitter so we get the emitter back
const params = { useApi: true };
const actualBackup = `./${this.fileName}`;
return u.testBackupToFile(params, 'animaldb', actualBackup);
});
it('respects the COUCH_URL env variable', function() {
process.env.COUCH_URL = 'http://user:pass@myurl.com';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.url, 'string');
assert.strictEqual(config.url, process.env.COUCH_URL);
});
it('respects the COUCH_DATABASE env variable', function() {
process.env.COUCH_DATABASE = 'mydb';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.db, 'string');
assert.strictEqual(config.db, process.env.COUCH_DATABASE);
});
it('respects the COUCH_BUFFER_SIZE env variable', function() {
process.env.COUCH_BUFFER_SIZE = '1000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.bufferSize, 'number');
assert.strictEqual(config.bufferSize, 1000);
});
it('respects the COUCH_PARALLELISM env variable', function() {
process.env.COUCH_PARALLELISM = '20';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.parallelism, 'number');
assert.strictEqual(config.parallelism, 20);
});
it('respects the COUCH_REQUEST_TIMEOUT env variable', function() {
process.env.COUCH_REQUEST_TIMEOUT = '10000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.requestTimeout, 'number');
assert.strictEqual(config.requestTimeout, 10000);
});
it('respects the CLOUDANT_IAM_API_KEY env variable', function() {
const key = 'ABC123-ZYX987_cba789-xyz321';
process.env.CLOUDANT_IAM_API_KEY = key;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamApiKey, 'string');
assert.strictEqual(config.iamApiKey, key);
});
it('respects the CLOUDANT_IAM_TOKEN_URL env variable', function() {
const u = 'https://testhost.example:1234/identity/token';
process.env.CLOUDANT_IAM_TOKEN_URL = u;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamTokenUrl, 'string');
assert.strictEqual(config.iamTokenUrl, u);
});
it('respects the COUCH_LOG env variable', function() {
process.env.COUCH_LOG = 'my.log';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.log, 'string');
assert.strictEqual(config.log, process.env.COUCH_LOG);
});
it('respects the COUCH_RESUME env variable', function() {
process.env.COUCH_RESUME = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.resume, 'boolean');
assert.strictEqual(config.resume, true);
});
it('respects the COUCH_OUTPUT env variable', function() {
process.env.COUCH_OUTPUT = 'myfile.txt';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.output, 'string');
assert.strictEqual(config.output, process.env.COUCH_OUTPUT);
});
it('respects the COUCH_MODE env variable', function() {
process.env.COUCH_MODE = 'shallow';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.mode, 'string');
assert.strictEqual(config.mode, 'shallow');
});
it('respects the COUCH_QUIET env variable', function() {
process.env.COUCH_QUIET = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.quiet, 'boolean');
assert.strictEqual(config.quiet, true);
});
});

@@ -18,21 +18,26 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

delete require.cache[require.resolve('./citestutils.js')];
const assert = require('assert');
const fs = require('fs');
const u = require('./citestutils.js');
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('End to end backup and restore', params), function() {
it('should backup and restore animaldb', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
return u.testDirectBackupAndRestore(params, 'animaldb', this.dbName);
describe('Write error tests', function() {
it('calls callback with error set when stream is not writeable', async function() {
u.setTimeout(this, 10);
const dirname = fs.mkdtempSync('test_backup_');
// make temp dir read only
fs.chmodSync(dirname, 0o444);
const filename = dirname + '/test.backup';
const backupStream = fs.createWriteStream(filename, { flags: 'w' });
const params = { useApi: true };
// try to do backup and check err was set in callback
return u.testBackup(params, 'animaldb', backupStream).then(() => {
assert.fail('Should throw an "EACCES" error');
}).catch((resultErr) => {
// cleanup temp dir
fs.rmdirSync(dirname);
// error should have been set
assert.ok(resultErr);
assert.strictEqual(resultErr.code, 'EACCES');
});
it('should backup and restore largedb1g #slow', async function() {
// Allow up to 30 m for backup and restore of largedb1g
// This is a long time but when many builds run in parallel it can take a
// while to get this done.
u.setTimeout(this, 30 * 60);
return u.testDirectBackupAndRestore(params, 'largedb1g', this.dbName);
});
});
});

@@ -15,121 +15,24 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it beforeEach */
/* global describe it */
'use strict';
const assert = require('assert');
const fs = require('fs');
const nock = require('nock');
const request = require('../includes/request.js');
const writer = require('../includes/writer.js');
const noopEmitter = new (require('events')).EventEmitter();
const liner = require('../includes/liner.js');
const { once } = require('node:events');
const { pipeline } = require('node:stream/promises');
const longTestTimeout = 3000;
delete require.cache[require.resolve('./citestutils.js')];
const u = require('./citestutils.js');
describe('#unit Check database restore writer', function() {
const dbUrl = 'http://localhost:5984/animaldb';
const db = request.client(dbUrl, { parallelism: 1 });
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('End to end backup and restore', params), function() {
it('should backup and restore animaldb', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
return u.testDirectBackupAndRestore(params, 'animaldb', this.dbName);
});
beforeEach('Reset nocks', function() {
nock.cleanAll();
it('should backup and restore largedb1g #slow', async function() {
// Allow up to 30 m for backup and restore of largedb1g
// This is a long time but when many builds run in parallel it can take a
// while to get this done.
u.setTimeout(this, 30 * 60);
return u.testDirectBackupAndRestore(params, 'largedb1g', this.dbName);
});
});
it('should complete successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, []); // success
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
});
it('should terminate on a fatal error', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(401, { error: 'Unauthorized' }); // fatal error
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Unauthorized');
assert.strictEqual(err.message, 'Access is denied due to invalid credentials.');
assert.ok(nock.isDone());
return true;
}
);
});
it('should retry on transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(200, { ok: true }); // third time lucky success
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
}).timeout(longTestTimeout);
it('should fail after 3 transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(503, { error: 'Service Unavailable' }); // Final transient error
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 : post ${dbUrl}/_bulk_docs - Error: Service Unavailable`);
assert.ok(nock.isDone());
return true;
}
);
}).timeout(longTestTimeout);
it('should restore shallow backups without rev info successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ ok: true, id: 'foo', rev: '1-abc' }]); // success
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_old_shallow.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 11);
assert.ok(nock.isDone());
})]);
});
it('should get a batch error for non-empty array response with new_edits false', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ id: 'foo', error: 'foo', reason: 'bar' }]);
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Error');
assert.strictEqual(err.message, 'Error writing batch with new_edits:false and 1 items');
assert.ok(nock.isDone());
return true;
}
);
});
});

@@ -1,187 +0,424 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* global describe it */
'use strict';
const assert = require('assert');
const backup = require('../app.js').backup;
const fs = require('fs');
const nock = require('nock');
const util = require('util');
const backupPromise = util.promisify(backup);
const goodUrl = 'http://localhost:5984/db';
// The real validateArgs function of app.js isn't
// exported - so we call the exported backup method
// instead. We don't get as far as a real backup when
// testing error cases. For success cases we nock the
// goodUrl and
const validateArgs = async function(url, opts, errorValidationForAssertRejects) {
const nullStream = fs.createWriteStream('/dev/null');
if (url === goodUrl) {
// Nock the goodUrl
nock(goodUrl).head('').reply(404, { error: 'not_found', reason: 'missing' });
}
return assert.rejects(backupPromise(url, nullStream, opts), errorValidationForAssertRejects);
};
const validateShallowModeArgs = async function(url, opts, msg) {
// We pass assertNoValidationError because for these shallow opts
// we are expecting only a stderr warning
return validateArgs(url, opts, assertNoValidationError()).then(() => {
// Assert the warning message was in stderr
assert(capturedStderr.indexOf(msg) > -1, 'Log warning message was not present');
});
};
const stderrWriteFun = process.stderr.write;
let capturedStderr;
function captureStderr() {
process.stderr.write = function(string, encoding, fd) {
capturedStderr += string;
};
}
function releaseStderr() {
process.stderr.write = stderrWriteFun;
capturedStderr = null;
}
// Return a validation object for use with assert.rejects
function assertErrorMessage(msg) {
return { name: 'InvalidOption', message: msg };
}
// For cases where validation should pass we reach a real backup that hits a 404
// mock for a DatabaseNotFound, so that it is the expected in the case assertNoValidationError
function assertNoValidationError() { return { name: 'DatabaseNotFound' }; }
describe('#unit Validate arguments', function() {
it('returns error for invalid URL type', async function() {
return validateArgs(true, {}, assertErrorMessage('Invalid URL, must be type string'));
});
it('returns no error for valid URL type', async function() {
return validateArgs(goodUrl, {}, assertNoValidationError());
});
it('returns error for invalid (no host) URL', async function() {
return validateArgs('http://', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid (no protocol) URL', async function() {
return validateArgs('invalid', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid (wrong protocol) URL', async function() {
return validateArgs('ftp://invalid.example.com', {}, assertErrorMessage('Invalid URL protocol.'));
});
it('returns error for invalid (no path) URL', async function() {
return validateArgs('https://invalid.example.com', {}, assertErrorMessage('Invalid URL, missing path element (no database).'));
});
it('returns error for invalid (no protocol, no host) URL', async function() {
return validateArgs('invalid', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid buffer size type', async function() {
return validateArgs(goodUrl, { bufferSize: '123' }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero buffer size', async function() {
return validateArgs(goodUrl, { bufferSize: 0 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float buffer size', async function() {
return validateArgs(goodUrl, { bufferSize: 1.23 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid buffer size type', async function() {
return validateArgs(goodUrl, { bufferSize: 123 }, assertNoValidationError());
});
it('returns error for invalid log type', async function() {
return validateArgs(goodUrl, { log: true }, assertErrorMessage('Invalid log option, must be type string'));
});
it('returns no error for valid log type', async function() {
return validateArgs(goodUrl, { log: 'log.txt' }, assertNoValidationError());
});
it('returns error for invalid mode type', async function() {
return validateArgs(goodUrl, { mode: true }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"'));
});
it('returns error for invalid mode string', async function() {
return validateArgs(goodUrl, { mode: 'foobar' }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"'));
});
it('returns no error for valid mode type', async function() {
return validateArgs(goodUrl, { mode: 'full' }, assertNoValidationError());
});
it('returns error for invalid output type', async function() {
return validateArgs(goodUrl, { output: true }, assertErrorMessage('Invalid output option, must be type string'));
});
it('returns no error for valid output type', async function() {
return validateArgs(goodUrl, { output: 'output.txt' }, assertNoValidationError());
});
it('returns error for invalid parallelism type', async function() {
return validateArgs(goodUrl, { parallelism: '123' }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero parallelism', async function() {
return validateArgs(goodUrl, { parallelism: 0 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float parallelism', async function() {
return validateArgs(goodUrl, { parallelism: 1.23 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid parallelism type', async function() {
return validateArgs(goodUrl, { parallelism: 123 }, assertNoValidationError());
});
it('returns error for invalid request timeout type', async function() {
return validateArgs(goodUrl, { requestTimeout: '123' }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero request timeout', async function() {
return validateArgs(goodUrl, { requestTimeout: 0 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float request timout', async function() {
return validateArgs(goodUrl, { requestTimeout: 1.23 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid request timeout type', async function() {
return validateArgs(goodUrl, { requestTimeout: 123 }, assertNoValidationError());
});
it('returns error for invalid resume type', async function() {
return validateArgs(goodUrl, { resume: 'true' }, assertErrorMessage('Invalid resume option, must be type boolean'));
});
it('returns no error for valid resume type', async function() {
return validateArgs(goodUrl, { resume: false }, assertNoValidationError());
});
it('returns error for invalid key type', async function() {
return validateArgs(goodUrl, { iamApiKey: true }, assertErrorMessage('Invalid iamApiKey option, must be type string'));
});
it('returns error for key and URL credentials supplied', async function() {
return validateArgs('https://a:b@example.com/db', { iamApiKey: 'abc123' }, assertErrorMessage('URL user information must not be supplied when using IAM API key.'));
});
it('warns for log arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', log: 'test' },
'the options "log" and "resume" are invalid when using shallow mode.').finally(
() => {
releaseStderr();
});
});
it('warns for resume arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', log: 'test', resume: true },
'the options "log" and "resume" are invalid when using shallow mode.').finally(
() => {
releaseStderr();
});
});
it('warns for parallelism arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', parallelism: 10 },
'the option "parallelism" has no effect when using shallow mode.').finally(
() => {
releaseStderr();
});
});
});
<testsuites name="test">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:34:23" time="0.091">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.003">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.024">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid buffer size type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.007">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid log type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid log type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode string" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid output type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid output type" time="0.003">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid parallelism type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero parallelism" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid request timeout type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero request timeout" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float request timout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid resume type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid key type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for resume arg in shallow mode" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for parallelism arg in shallow mode" time="0.004">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:34:23" time="4.507">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.934">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.79">
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.634">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:34:27" time="10.366">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="2.601">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.429">
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.52">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:34:38" time="5.544">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.368">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.267">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.081">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:34:43" time="12.971">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.455">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.322">
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.385">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:34:56" time="5.364">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.871">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.537">
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.158">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:35:01" time="7.306">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.333">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.497">
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.663">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:35:09" time="316.287">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.496">
</testcase>
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="313.003">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:40:25" time="487.867">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.643">
</testcase>
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="484.438">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:48:33" time="2.77">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.507">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:48:36" time="0.266">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.009">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:48:36" time="2.324">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.892">
</testcase>
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.899">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:48:38" time="5.205">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.874">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.749">
</testcase>
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.796">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:48:43" time="6.647">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.36">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.262">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.231">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:48:50" time="33.545">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="16.583">
</testcase>
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.432">
</testcase>
</testsuite>
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:24" time="0.01">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_DATABASE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_BUFFER_SIZE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_PARALLELISM env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_REQUEST_TIMEOUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_API_KEY env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_TOKEN_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_LOG env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_RESUME env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_OUTPUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_MODE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:24" time="0.092">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.011">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.011">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.017">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on NoLogFileName" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on LogDoesNotExist" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on IncompleteChangesInLogFile" time="0.009">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.013">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.014">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:24" time="0.137">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.007">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.011">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on RestoreDatabaseNotFound" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.003">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.007">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.046">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.032">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.009">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:24" time="3.85">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.392">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.399">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.355">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.364">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.469">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.331">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.315">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.389">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.385">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.438">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:28" time="3.889">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.364">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.431">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.468">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.359">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.377">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.44">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.437">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.589">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.412">
</testcase>
</testsuite>
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:32" time="0.003">
<testcase classname="test.#unit Fetching batches from a log file" name="should fetch multiple batches correctly" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:32" time="0.003">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:32" time="0.039">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.014">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --db command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --iam-api-key command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode full command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode shallow command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --quiet command-line parameter" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:32" time="0.014">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --request-timeout command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --quiet command-line parameter" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:32" time="0.005">
<testcase classname="test.#unit Check request headers" name="should have a couchbackup user-agent" time="0.005">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:32" time="10.6">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.004">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.016">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.018">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.016">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with fatal error for 404 response" time="0.005">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with same error for no status code error response" time="2.014">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.51">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.013">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:42" time="0.013">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.012">
</testcase>
</testsuite>
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:42" time="0.565">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.024">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.523">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.016">
</testcase>
</testsuite>
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:43" time="15.578">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.013">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.016">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="2.103">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="9.413">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:49:58" time="47.551">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="47.285">
</testcase>
</testsuite>
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2023-11-20T09:50:46" time="4.118">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.024">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should terminate on a fatal error" time="0.009">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.035">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.015">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.022">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should get a batch error for non-empty array response with new_edits false" time="0.008">
</testcase>
</testsuite>
</testsuites>
{
"name": "@cloudant/couchbackup",
"version": "2.9.15-SNAPSHOT.167",
"version": "2.9.15-SNAPSHOT.168",
"description": "CouchBackup - command-line backup utility for Cloudant/CouchDB",

@@ -40,3 +40,3 @@ "homepage": "https://github.com/IBM/couchbackup",

"devDependencies": {
"eslint": "8.53.0",
"eslint": "8.54.0",
"eslint-config-semistandard": "17.0.0",

@@ -43,0 +43,0 @@ "eslint-config-standard": "17.1.0",

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc