Socket
Socket
Sign inDemoInstall

@cloudant/couchbackup

Package Overview
Dependencies
Maintainers
6
Versions
479
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@cloudant/couchbackup - npm Package Compare versions

Comparing version 2.9.13-SNAPSHOT.141 to 2.9.13-SNAPSHOT.142

.scannerwork/scanner-report/changesets-2.pb

4

.scannerwork/report-task.txt

@@ -6,3 +6,3 @@ projectKey=couchbackup

dashboardUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/dashboard?id=couchbackup&branch=main
ceTaskId=AYqNVrJ1M4c8Twoz-Z74
ceTaskUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/api/ce/task?id=AYqNVrJ1M4c8Twoz-Z74
ceTaskId=AYrLkmMY003a9bIG60lg
ceTaskUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/api/ce/task?id=AYrLkmMY003a9bIG60lg

@@ -1,2 +0,2 @@

// Copyright © 2017, 2022 IBM Corp. All rights reserved.
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//

@@ -14,99 +14,213 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const assert = require('assert');
const backup = require('../app.js').backup;
const fs = require('fs');
const liner = require('./liner.js');
const change = require('./change.js');
const error = require('./error.js');
const debug = require('debug')('couchbackup:spoolchanges');
const nock = require('nock');
/**
* Write log file for all changes from a database, ready for downloading
* in batches.
*
* @param {string} dbUrl - URL of database
* @param {string} log - path to log file to use
* @param {number} bufferSize - the number of changes per batch/log line
* @param {function(err)} callback - a callback to run on completion
*/
module.exports = function(db, log, bufferSize, ee, callback) {
// list of document ids to process
const buffer = [];
let batch = 0;
let lastSeq = null;
const logStream = fs.createWriteStream(log);
let pending = 0;
// The number of changes to fetch per request
const limit = 100000;
const goodUrl = 'http://localhost:5984/db';
// The real validateArgs function of app.js isn't
// exported - so we call the exported backup method
// instead. We don't get as far as a real backup when
// testing error cases. For success cases we nock the
// goodUrl and
const validateArgs = function(url, opts, callback) {
const nullStream = fs.createWriteStream('/dev/null');
let cb = callback;
if (url === goodUrl) {
// Nock the goodUrl
nock(goodUrl).head('').reply(404, { error: 'not_found', reason: 'missing' });
// replace the callback to handle the nock response
// to avoid attempting a real backup
cb = function(err) {
nullStream.end();
if (err.name === 'DatabaseNotFound') {
// This is what we expect if we reached the backup
// This is success for valid args cases.
err = null;
}
callback(err);
};
}
backup(url, nullStream, opts, cb);
return true;
};
// send documents ids to the queue in batches of bufferSize + the last batch
const processBuffer = function(lastOne) {
if (buffer.length >= bufferSize || (lastOne && buffer.length > 0)) {
debug('writing', buffer.length, 'changes to the backup file');
const b = { docs: buffer.splice(0, bufferSize), batch: batch };
logStream.write(':t batch' + batch + ' ' + JSON.stringify(b.docs) + '\n');
ee.emit('changes', batch);
batch++;
const stderrWriteFun = process.stderr.write;
let capturedStderr;
function captureStderr() {
process.stderr.write = function(string, encoding, fd) {
capturedStderr += string;
};
}
function releaseStderr() {
process.stderr.write = stderrWriteFun;
capturedStderr = null;
}
function assertErrorMessage(msg, done) {
return function(err, data) {
try {
assert(err.message, 'There should be an error message');
assert(err.message.indexOf(msg) >= 0);
assert(data === null || data === undefined, 'There should only be an error.');
done();
} catch (e) {
done(e);
}
};
}
// called once per received change
const onChange = function(c) {
if (c) {
if (c.error) {
ee.emit('error', new error.BackupError('InvalidChange', `Received invalid change: ${c}`));
} else if (c.changes) {
const obj = { id: c.id };
buffer.push(obj);
processBuffer(false);
} else if (c.last_seq) {
lastSeq = c.last_seq;
pending = c.pending;
}
function assertNoError(done) {
return function(err, data) {
try {
assert(err === null, 'There should be no error message.');
done();
} catch (e) {
done(e);
}
};
}
function getChanges(since = 0) {
debug('making changes request since ' + since);
return db.service.postChangesAsStream({ db: db.db, since: since, limit: limit, seqInterval: limit })
.then(response => {
response.result.pipe(liner())
.on('error', function(err) {
logStream.end();
callback(err);
})
.pipe(change(onChange))
.on('error', function(err) {
logStream.end();
callback(err);
})
.on('finish', function() {
processBuffer(true);
if (!lastSeq) {
logStream.end();
debug('changes request terminated before last_seq was sent');
callback(new error.BackupError('SpoolChangesError', 'Changes request terminated before last_seq was sent'));
} else {
debug(`changes request completed with last_seq: ${lastSeq} and ${pending} changes pending.`);
if (pending > 0) {
// Return the next promise
return getChanges(lastSeq);
} else {
debug('finished streaming database changes');
logStream.end(':changes_complete ' + lastSeq + '\n', 'utf8', callback);
}
}
});
})
.catch(err => {
logStream.end();
if (err.status && err.status >= 400) {
callback(error.convertResponseError(err));
} else if (err.name !== 'SpoolChangesError') {
callback(new error.BackupError('SpoolChangesError', `Failed changes request - ${err.message}`));
}
describe('#unit Validate arguments', function() {
it('returns error for invalid URL type', function(done) {
validateArgs(true, {}, assertErrorMessage('Invalid URL, must be type string', done));
});
it('returns no error for valid URL type', function(done) {
validateArgs(goodUrl, {}, assertNoError(done));
});
it('returns error for invalid (no host) URL', function(done) {
validateArgs('http://', {}, assertErrorMessage('Invalid URL', done));
});
it('returns error for invalid (no protocol) URL', function(done) {
validateArgs('invalid', {}, assertErrorMessage('Invalid URL', done));
});
it('returns error for invalid (wrong protocol) URL', function(done) {
validateArgs('ftp://invalid.example.com', {}, assertErrorMessage('Invalid URL protocol.', done));
});
it('returns error for invalid (no path) URL', function(done) {
validateArgs('https://invalid.example.com', {}, assertErrorMessage('Invalid URL, missing path element (no database).', done));
});
it('returns error for invalid (no protocol, no host) URL', function(done) {
validateArgs('invalid', {}, assertErrorMessage('Invalid URL', done));
});
it('returns error for invalid buffer size type', function(done) {
validateArgs(goodUrl, { bufferSize: '123' }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns error for zero buffer size', function(done) {
validateArgs(goodUrl, { bufferSize: 0 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns error for float buffer size', function(done) {
validateArgs(goodUrl, { bufferSize: 1.23 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns no error for valid buffer size type', function(done) {
validateArgs(goodUrl, { bufferSize: 123 }, assertNoError(done));
});
it('returns error for invalid log type', function(done) {
validateArgs(goodUrl, { log: true }, assertErrorMessage('Invalid log option, must be type string', done));
});
it('returns no error for valid log type', function(done) {
validateArgs(goodUrl, { log: 'log.txt' }, assertNoError(done));
});
it('returns error for invalid mode type', function(done) {
validateArgs(goodUrl, { mode: true }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"', done));
});
it('returns error for invalid mode string', function(done) {
validateArgs(goodUrl, { mode: 'foobar' }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"', done));
});
it('returns no error for valid mode type', function(done) {
validateArgs(goodUrl, { mode: 'full' }, assertNoError(done));
});
it('returns error for invalid output type', function(done) {
validateArgs(goodUrl, { output: true }, assertErrorMessage('Invalid output option, must be type string', done));
});
it('returns no error for valid output type', function(done) {
validateArgs(goodUrl, { output: 'output.txt' }, assertNoError(done));
});
it('returns error for invalid parallelism type', function(done) {
validateArgs(goodUrl, { parallelism: '123' }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns error for zero parallelism', function(done) {
validateArgs(goodUrl, { parallelism: 0 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns error for float parallelism', function(done) {
validateArgs(goodUrl, { parallelism: 1.23 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns no error for valid parallelism type', function(done) {
validateArgs(goodUrl, { parallelism: 123 }, assertNoError(done));
});
it('returns error for invalid request timeout type', function(done) {
validateArgs(goodUrl, { requestTimeout: '123' }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns error for zero request timeout', function(done) {
validateArgs(goodUrl, { requestTimeout: 0 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns error for float request timout', function(done) {
validateArgs(goodUrl, { requestTimeout: 1.23 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns no error for valid request timeout type', function(done) {
validateArgs(goodUrl, { requestTimeout: 123 }, assertNoError(done));
});
it('returns error for invalid resume type', function(done) {
validateArgs(goodUrl, { resume: 'true' }, assertErrorMessage('Invalid resume option, must be type boolean', done));
});
it('returns no error for valid resume type', function(done) {
validateArgs(goodUrl, { resume: false }, assertNoError(done));
});
it('returns error for invalid key type', function(done) {
validateArgs(goodUrl, { iamApiKey: true }, assertErrorMessage('Invalid iamApiKey option, must be type string', done));
});
it('returns error for key and URL credentials supplied', function(done) {
validateArgs('https://a:b@example.com/db', { iamApiKey: 'abc123' }, assertErrorMessage('URL user information must not be supplied when using IAM API key.', done));
});
it('warns for log arg in shallow mode', function(done) {
captureStderr();
try {
validateArgs(goodUrl, { mode: 'shallow', log: 'test' }, function(err, data) {
assert.ok(err);
assert.ok(!data);
assert(capturedStderr.indexOf('The options "log" and "resume" are invalid when using shallow mode.') > -1, 'Log warning message was not present');
});
}
getChanges();
};
done();
} catch (e) {
done(e);
} finally {
releaseStderr();
}
});
it('warns for resume arg in shallow mode', function(done) {
captureStderr();
try {
validateArgs(goodUrl, { mode: 'shallow', log: 'test', resume: true }, function(err, data) {
assert.ok(err);
assert.ok(!data);
assert(capturedStderr.indexOf('The options "log" and "resume" are invalid when using shallow mode.') > -1, 'Log warning message was not present');
});
done();
} catch (e) {
done(e);
} finally {
releaseStderr();
}
});
it('warns for parallism arg in shallow mode', function(done) {
captureStderr();
try {
validateArgs(goodUrl, { mode: 'shallow', parallelsim: 10 }, function(err, data) {
assert.ok(err);
assert.ok(!data);
assert(capturedStderr.indexOf('The option "parallelism" has no effect when using shallow mode.') > -1, 'Log warning message was not present');
});
done();
} catch (e) {
done(e);
} finally {
releaseStderr();
}
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2018 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,19 +14,232 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it beforeEach */
'use strict';
module.exports = function(db, options, readstream, ee, callback) {
const liner = require('../includes/liner.js')();
const writer = require('../includes/writer.js')(db, options.bufferSize, options.parallelism, ee);
const assert = require('assert');
const nock = require('nock');
const request = require('../includes/request.js');
const error = require('../includes/error.js');
// pipe the input to the output, via transformation functions
readstream
.pipe(liner) // transform the input stream into per-line
.on('error', function(err) {
// Forward the error to the writer event emitter where we already have
// listeners on for handling errors
writer.emit('error', err);
})
.pipe(writer); // transform the data
const url = 'http://localhost:7777/testdb';
const db = request.client(url, { parallelism: 1 });
const timeoutDb = request.client(url, { parallelism: 1, requestTimeout: 500 });
const longTestTimeout = 3000;
callback(null, writer);
};
beforeEach('Clean nock', function() {
nock.cleanAll();
});
describe('#unit Check request headers', function() {
it('should have a couchbackup user-agent', function(done) {
const couch = nock(url)
.matchHeader('user-agent', /couchbackup-cloudant\/\d+\.\d+\.\d+(?:-SNAPSHOT)? \(Node.js v\d+\.\d+\.\d+\)/)
.head('/good')
.reply(200);
db.service.headDocument({ db: db.db, docId: 'good' }).then(response => {
assert.ok(couch.isDone());
done();
}).catch(err => {
done(err);
});
});
});
describe('#unit Check request response error callback', function() {
it('should not callback with error for 200 response', function(done) {
const couch = nock(url)
.get('/good')
.reply(200, { ok: true });
db.service.getDocument({ db: db.db, docId: 'good' }).then(response => {
assert.ok(response.result);
assert.ok(couch.isDone());
done();
}).catch(err => {
err = error.convertResponseError(err);
done(err);
});
});
it('should callback with error after 3 500 responses', function(done) {
const couch = nock(url)
.get('/bad')
.times(3)
.reply(500, function(uri, requestBody) {
this.req.response.statusMessage = 'Internal Server Error';
return { error: 'foo', reason: 'bar' };
});
db.service.getDocument({ db: db.db, docId: 'bad' }).then(response => {
done(new Error('Successful response when error expected.'));
}).catch(err => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `500 Internal Server Error: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
done();
}).catch(err => {
// Handle assertion errors
done(err);
});
}).timeout(longTestTimeout);
it('should callback with error after 3 POST 503 responses', function(done) {
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.times(3)
.reply(503, function(uri, requestBody) {
this.req.response.statusMessage = 'Service Unavailable';
return { error: 'service_unavailable', reason: 'Service unavailable' };
});
db.service.postBulkGet({ db: db.db, revs: true, docs: [] }).then(response => {
done(new Error('Successful response when error expected.'));
}).catch(err => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 Service Unavailable: post ${url}/_bulk_get - Error: service_unavailable, Reason: Service unavailable`);
assert.ok(couch.isDone());
done();
}).catch(err => {
// Handle assertion errors
done(err);
});
}).timeout(longTestTimeout);
it('should callback with error after 3 429 responses', function(done) {
const couch = nock(url)
.get('/bad')
.times(3)
.reply(429, function(uri, requestBody) {
this.req.response.statusMessage = 'Too Many Requests';
return { error: 'foo', reason: 'bar' };
});
db.service.getDocument({ db: db.db, docId: 'bad' }).then(response => {
done(new Error('Successful response when error expected.'));
}).catch(err => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `429 Too Many Requests: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
done();
}).catch(err => {
// Handle assertion errors
done(err);
});
}).timeout(longTestTimeout);
it('should callback with fatal error for 404 response', function(done) {
const couch = nock(url)
.get('/bad')
.reply(404, function(uri, requestBody) {
this.req.response.statusMessage = 'Not Found';
return { error: 'foo', reason: 'bar' };
});
db.service.getDocument({ db: db.db, docId: 'bad' }).then(response => {
done(new Error('Successful response when error expected.'));
}).catch(err => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `404 Not Found: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
done();
}).catch(err => {
// Handle assertion errors
done(err);
});
});
it('should callback with same error for no status code error response', function(done) {
const couch = nock(url)
.get('/bad')
.times(3)
.replyWithError('testing badness');
db.service.getDocument({ db: db.db, docId: 'bad' }).then(response => {
done(new Error('Successful response when error expected.'));
}).catch(err => {
const err2 = error.convertResponseError(err);
assert.strictEqual(err, err2);
assert.ok(couch.isDone());
done();
}).catch(err => {
// Handle assertion errors
done(err);
});
}).timeout(longTestTimeout);
it('should retry request if HTTP request gets timed out', function(done) {
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.delay(1000)
.reply(200, { results: { docs: [{ id: '1', ok: { _id: '1' } }] } })
.post('/_bulk_get')
.query(true)
.reply(200, { results: { docs: [{ id: '1', ok: { _id: '1' } }, { id: '2', ok: { _id: '2' } }] } });
timeoutDb.service.postBulkGet({ db: db.db, revs: true, docs: [] }).then(response => {
assert.ok(response);
assert.ok(response.result);
assert.ok(response.result.results);
assert.ok(response.result.results.docs);
assert.strictEqual(response.result.results.docs.length, 2);
assert.ok(couch.isDone());
done();
}).catch(err => {
done(err);
});
});
it('should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out', function(done) {
// Increase the timeout for this test to allow for the delays
this.timeout(3000);
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.delay(1000)
.times(3)
.reply(200, { ok: true });
timeoutDb.service.postBulkGet({ db: db.db, revs: true, docs: [] }).then(response => {
done(new Error('Successful response when error expected.'));
}).catch(err => {
err = error.convertResponseError(err);
// Note axios returns ECONNABORTED rather than ESOCKETTIMEDOUT
// See https://github.com/axios/axios/issues/2710 via https://github.com/axios/axios/issues/1543`
assert.strictEqual(err.statusText, 'ECONNABORTED');
assert.strictEqual(err.message, `timeout of 500ms exceeded: post ${url}/_bulk_get ECONNABORTED`);
assert.ok(couch.isDone());
done();
}).catch(err => {
// Handle assertion errors
done(err);
});
});
describe('#unit Check credentials', function() {
it('should properly decode username and password', function(done) {
const username = 'user%123';
const password = 'colon:at@321';
const url = `http://${encodeURIComponent(username)}:${encodeURIComponent(password)}@localhost:7777/testdb`;
const sessionUrl = 'http://localhost:7777';
const couch = nock(sessionUrl)
.post('/_session', { username: username, password: password })
.reply(200, { ok: true }, { 'Set-Cookie': 'AuthSession=ABC123DEF4356;' })
.get('/')
.reply(200);
const db = request.client(url, { parallelism: 1 });
db.service.getServerInformation().then(response => {
assert.ok(response);
assert.ok(couch.isDone());
done();
}).catch(err => {
done(err);
});
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,80 +14,56 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global beforeEach afterEach */
'use strict';
const { CloudantV1 } = require('@ibm-cloud/cloudant');
const url = new URL((process.env.COUCH_BACKEND_URL) ? process.env.COUCH_BACKEND_URL : 'https://no-couch-backend-url-set.test');
const { BasicAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const authenticator = (url.username) ? new BasicAuthenticator({ username: url.username, password: decodeURIComponent(url.password) }) : new NoAuthAuthenticator();
const serviceOpts = {
authenticator: authenticator
};
const cloudant = new CloudantV1(serviceOpts);
// Remove auth from URL before using for service
cloudant.setServiceUrl(new URL(url.pathname, url.origin).toString());
const uuid = require('uuid').v4;
const fs = require('fs');
const stream = require('stream');
const liner = require('./liner.js');
const onLine = function(onCommand, getDocs) {
const change = new stream.Transform({ objectMode: true });
// Mocha hooks that will be at the root context so run for all tests
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
beforeEach('Create test database', function(done) {
// Don't run hook for unit tests, just for CI
if (!this.currentTest.fullTitle().includes('#unit')) {
// Allow 10 seconds to create the DB
this.timeout(10 * 1000);
const unique = uuid();
this.fileName = `${unique}`;
this.dbName = 'couchbackup_test_' + unique;
cloudant.putDatabase({ db: this.dbName }).then(() => { done(); }).catch((err) => { done(err); });
} else {
done();
}
});
let matches;
afterEach('Delete test database', function(done) {
// Don't run hook for unit tests, just for CI
if (!this.currentTest.fullTitle().includes('#unit')) {
// Allow 10 seconds to delete the DB
this.timeout(10 * 1000);
deleteIfExists(this.fileName);
deleteIfExists(`${this.fileName}.log`);
cloudant.deleteDatabase({ db: this.dbName }).then(() => { done(); }).catch((err) => { done(err); });
} else {
done();
}
});
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
function deleteIfExists(fileName) {
fs.unlink(fileName, function(err) {
if (err) {
if (err.code !== 'ENOENT') {
console.error(`${err.code} ${err.message}`);
}
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
// extract doc ids
if (getDocs && obj.command === 't') {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
}
onCommand(obj);
}
done();
};
return change;
};
/**
* Generate a list of remaining batches from a download file.
*
* @param {string} log - log file name
* @param {function} callback - callback with err, {changesComplete: N, batches: N}.
* changesComplete signifies whether the log file appeared to
* have completed reading the changes feed (contains :changes_complete).
* batches are remaining batch IDs for download.
*/
module.exports = function(log, callback) {
// our sense of state
const state = {
};
let changesComplete = false;
// called with each line from the log file
const onCommand = function(obj) {
if (obj.command === 't') {
state[obj.batch] = true;
} else if (obj.command === 'd') {
delete state[obj.batch];
} else if (obj.command === 'changes_complete') {
changesComplete = true;
}
};
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, false))
.on('finish', function() {
const obj = { changesComplete: changesComplete, batches: state };
callback(null, obj);
});
};
});
}

@@ -1,2 +0,2 @@

// Copyright © 2017, 2022 IBM Corp. All rights reserved.
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//

@@ -14,68 +14,56 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const async = require('async');
const error = require('./error.js');
const events = require('events');
const fs = require('fs');
const u = require('./citestutils.js');
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime();
let batch = 0;
let hasErrored = false;
let startKey = null;
let total = 0;
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Compression tests', params), function() {
const p = u.p(params, { compression: true });
async.doUntil(
function(callback) {
// Note, include_docs: true is set automatically when using the
// fetch function.
const opts = { db: db.db, limit: options.bufferSize, includeDocs: true };
// To avoid double fetching a document solely for the purposes of getting
// the next ID to use as a startKey for the next page we instead use the
// last ID of the current page and append the lowest unicode sort
// character.
if (startKey) opts.startKey = `${startKey}\0`;
db.service.postAllDocs(opts).then(response => {
const body = response.result;
if (!body.rows) {
ee.emit('error', new error.BackupError(
'AllDocsError', 'ERROR: Invalid all docs response'));
callback();
} else {
if (body.rows.length < opts.limit) {
startKey = null; // last batch
it('should backup animaldb to a compressed file', function(done) {
// Allow up to 60 s for backup of animaldb
u.setTimeout(this, 60);
const compressedBackup = `./${this.fileName}`;
const output = fs.createWriteStream(compressedBackup);
output.on('open', function() {
u.testBackup(p, 'animaldb', output, function(err) {
if (err) {
done(err);
} else {
startKey = body.rows[opts.limit - 1].id;
u.assertGzipFile(compressedBackup, done);
}
});
});
});
const docs = [];
body.rows.forEach(function(doc) {
docs.push(doc.doc);
});
if (docs.length > 0) {
ee.emit('received', {
batch: batch++,
data: docs,
length: docs.length,
time: (new Date().getTime() - start) / 1000,
total: total += docs.length
});
}
callback();
it('should backup and restore animaldb via a compressed file', function(done) {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const compressedBackup = `./${this.fileName}`;
u.testBackupAndRestoreViaFile(p, 'animaldb', compressedBackup, this.dbName, function(err) {
if (err) {
done(err);
} else {
u.assertGzipFile(compressedBackup, done);
}
}).catch(err => {
err = error.convertResponseError(err);
ee.emit('error', err);
hasErrored = true;
callback();
});
},
function(callback) { callback(null, hasErrored || startKey == null); },
function() { ee.emit('finished', { total: total }); }
);
});
return ee;
};
it('should backup and restore animaldb via a compressed stream', function(done) {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
u.testDirectBackupAndRestore(p, 'animaldb', this.dbName, done);
});
it('should backup and restore largedb2g via a compressed file #slower', function(done) {
// Takes ~ 25 min using CLI, but sometimes over an hour with API
u.setTimeout(this, 180 * 60);
const compressedBackup = `./${this.fileName}`;
params.compression = true;
u.testBackupAndRestoreViaFile(p, 'largedb2g', compressedBackup, this.dbName, done);
});
});
});

@@ -1,167 +0,156 @@

<testsuites name="test-iam">
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:42:23" time="4.757">
<testcase classname="test-iam.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="1.019">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.905">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.642">
<system-out><![CDATA[{ total: 11 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:42:28" time="11.475">
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="3.7">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.531">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.435">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:42:39" time="5.433">
<testcase classname="test-iam.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.338">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.255">
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.019">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:42:45" time="14.658">
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="4.82">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.5">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.526">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:42:59" time="6.621">
<testcase classname="test-iam.Compression tests using API" name="should backup animaldb to a compressed file" time="0.89">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="2.778">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.132">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:43:06" time="8.649">
<testcase classname="test-iam.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.357">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="3.708">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.753">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:43:15" time="317.344">
<testcase classname="test-iam.End to end backup and restore using API" name="should backup and restore animaldb" time="2.15">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="314.421">
<system-out><![CDATA[{ total: 522948 }
{ total: 522948 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:48:32" time="497.779">
<testcase classname="test-iam.End to end backup and restore using CLI" name="should backup and restore animaldb" time="3.935">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="493.064">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:56:50" time="4.049">
<testcase classname="test-iam.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="3.785">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:56:54" time="0.278">
<testcase classname="test-iam.Write error tests" name="calls callback with error set when stream is not writeable" time="0.01">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:56:54" time="2.178">
<testcase classname="test-iam.Event tests" name="should get a finished event when using stdout" time="0.812">
</testcase>
<testcase classname="test-iam.Event tests" name="should get a finished event when using file output" time="0.83">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:56:56" time="5.247">
<testcase classname="test-iam.Resume tests using API" name="should create a log file" time="0.828">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.851">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.765">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:02" time="6.919">
<testcase classname="test-iam.Resume tests using CLI" name="should create a log file" time="1.454">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.276">
</testcase>
<testcase classname="test-iam.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.375">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:09" time="33.413">
<testcase classname="test-iam.Resume tests" name="should correctly backup and restore backup10m" time="16.123">
<system-out><![CDATA[Backup process close null SIGTERM
Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.747">
<system-out><![CDATA[Backup process close null SIGTERM
Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:42" time="46.133">
<testcase classname="test-iam.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="45.86">
</testcase>
</testsuite>
</testsuites>
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* global describe it */
'use strict';
const assert = require('assert');
const nock = require('nock');
const request = require('../includes/request.js');
const changes = require('../includes/spoolchanges.js');
const url = 'http://localhost:7777';
const dbName = 'fakenockdb';
const longTestTimeout = 3000;
const db = request.client(`${url}/${dbName}`, { parallelism: 1 });
const seqSuffix = Buffer.alloc(124, 'abc123').toString('base64');
function provideChanges(batchSize, totalChanges, fullResponse = false) {
let pending = totalChanges;
const sparseResultsArray = Array(batchSize).fill({
seq: null,
id: 'doc',
changes: [{ rev: '1-abcdef0123456789abcdef0123456789' }]
});
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(totalChanges / batchSize + (totalChanges % batchSize > 0 ? 1 : 0))
.reply(200, (uri, requestBody) => {
pending -= batchSize;
const lastSeq = (totalChanges - pending);
const seq = lastSeq - batchSize;
return {
results: fullResponse
? Array.from(Array(batchSize), (_, i) => {
return {
seq: `${seq + i}-${seqSuffix}`,
id: `doc${seq + i}`,
changes: [{ rev: '1-abcdef0123456789abcdef0123456789' }]
};
})
: sparseResultsArray,
pending: pending,
last_seq: `${lastSeq}-abc`
};
});
}
describe('#unit Check spool changes', function() {
it('should terminate on request error', function(done) {
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(3)
.replyWithError({ code: 'ECONNRESET', message: 'socket hang up' });
changes(db, '/dev/null', 500, null, function(err) {
assert.strictEqual(err.name, 'SpoolChangesError');
assert.strictEqual(err.message, `Failed changes request - socket hang up: post ${url}/${dbName}/_changes`);
assert.ok(nock.isDone());
done();
});
}).timeout(longTestTimeout);
it('should terminate on bad HTTP status code response', function(done) {
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(3)
.reply(500, function(uri, requestBody) {
this.req.response.statusMessage = 'Internal Server Error';
return { error: 'foo', reason: 'bar' };
});
changes(db, '/dev/null', 500, null, function(err) {
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `500 Internal Server Error: post ${url}/${dbName}/_changes - Error: foo, Reason: bar`);
assert.ok(nock.isDone());
done();
});
}).timeout(longTestTimeout);
it('should keep collecting changes', function(done) {
// This test validates that spooling will correctly
// continue across multiple requests
// (4 batches of 100000 to be precise).
// This test might take up to 10 seconds
this.timeout(10 * 1000);
// Use full changes for this test
provideChanges(100000, 400000, true);
changes(db, '/dev/null', 500, null, function(err) {
assert.ok(!err);
assert.ok(nock.isDone());
done();
});
});
it('should keep collecting sparse changes', function(done) {
// This test checks that making thousands of requests doesn't
// make anything bad happen.
// This test might take up to 25 seconds
this.timeout(25 * 1000);
// Use sparse changes for this test and a batch size of 1
provideChanges(1, 2500);
changes(db, '/dev/null', 500, null, function(err) {
assert.ok(!err);
assert.ok(nock.isDone());
done();
});
});
});
describe('Longer spool changes checks', function() {
it('#slow should keep collecting changes (25M)', function(done) {
// This test might take up to 5 minutes
this.timeout(5 * 60 * 1000);
// Note changes spooling uses a constant batch size, we are setting
// a test value here and setting the buffer to match
const batch = 100000;
// Use sparse changes for this test
provideChanges(batch, 25000000);
changes(db, '/dev/null', batch, null, function(err) {
assert.ok(!err);
assert.ok(nock.isDone());
done();
});
});
it('#slower should keep collecting changes (500M)', function(done) {
// This test might take up to 90 minutes
this.timeout(90 * 60 * 1000);
// Note changes spooling uses a constant batch size, we are setting
// a test value here and setting the buffer to match
const batch = 1000000;
// Use full changes for this test to exercise load
provideChanges(batch, 500000000, true);
changes(db, '/dev/null', batch, null, function(err) {
assert.ok(!err);
assert.ok(nock.isDone());
done();
});
});
});

@@ -0,1 +1,2 @@

#!/usr/bin/env node
// Copyright © 2017, 2021 IBM Corp. All rights reserved.

@@ -14,144 +15,43 @@ //

// limitations under the License.
/* global describe it */
'use strict';
const assert = require('assert');
const nock = require('nock');
const request = require('../includes/request.js');
const changes = require('../includes/spoolchanges.js');
const error = require('../includes/error.js');
const cliutils = require('../includes/cliutils.js');
const couchbackup = require('../app.js');
const parser = require('../includes/parser.js');
const debug = require('debug');
const restoreDebug = debug('couchbackup:restore');
const restoreBatchDebug = debug('couchbackup:restore:batch');
const url = 'http://localhost:7777';
const dbName = 'fakenockdb';
const longTestTimeout = 3000;
restoreDebug.enabled = true;
const db = request.client(`${url}/${dbName}`, { parallelism: 1 });
const program = parser.parseRestoreArgs();
const databaseUrl = cliutils.databaseUrl(program.url, program.db);
const opts = {
bufferSize: program.bufferSize,
parallelism: program.parallelism,
requestTimeout: program.requestTimeout,
iamApiKey: program.iamApiKey,
iamTokenUrl: program.iamTokenUrl
};
const seqSuffix = Buffer.alloc(124, 'abc123').toString('base64');
function provideChanges(batchSize, totalChanges, fullResponse = false) {
let pending = totalChanges;
const sparseResultsArray = Array(batchSize).fill({
seq: null,
id: 'doc',
changes: [{ rev: '1-abcdef0123456789abcdef0123456789' }]
});
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(totalChanges / batchSize + (totalChanges % batchSize > 0 ? 1 : 0))
.reply(200, (uri, requestBody) => {
pending -= batchSize;
const lastSeq = (totalChanges - pending);
const seq = lastSeq - batchSize;
return {
results: fullResponse
? Array.from(Array(batchSize), (_, i) => {
return {
seq: `${seq + i}-${seqSuffix}`,
id: `doc${seq + i}`,
changes: [{ rev: '1-abcdef0123456789abcdef0123456789' }]
};
})
: sparseResultsArray,
pending: pending,
last_seq: `${lastSeq}-abc`
};
});
}
// log configuration to console
console.error('='.repeat(80));
console.error('Performing restore on ' + databaseUrl.replace(/\/\/.+@/g, '//****:****@') + ' using configuration:');
console.error(JSON.stringify(opts, null, 2).replace(/"iamApiKey": "[^"]+"/, '"iamApiKey": "****"'));
console.error('='.repeat(80));
describe('#unit Check spool changes', function() {
it('should terminate on request error', function(done) {
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(3)
.replyWithError({ code: 'ECONNRESET', message: 'socket hang up' });
restoreBatchDebug.enabled = !program.quiet;
changes(db, '/dev/null', 500, null, function(err) {
assert.strictEqual(err.name, 'SpoolChangesError');
assert.strictEqual(err.message, `Failed changes request - socket hang up: post ${url}/${dbName}/_changes`);
assert.ok(nock.isDone());
done();
});
}).timeout(longTestTimeout);
it('should terminate on bad HTTP status code response', function(done) {
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(3)
.reply(500, function(uri, requestBody) {
this.req.response.statusMessage = 'Internal Server Error';
return { error: 'foo', reason: 'bar' };
});
changes(db, '/dev/null', 500, null, function(err) {
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `500 Internal Server Error: post ${url}/${dbName}/_changes - Error: foo, Reason: bar`);
assert.ok(nock.isDone());
done();
});
}).timeout(longTestTimeout);
it('should keep collecting changes', function(done) {
// This test validates that spooling will correctly
// continue across multiple requests
// (4 batches of 100000 to be precise).
// This test might take up to 10 seconds
this.timeout(10 * 1000);
// Use full changes for this test
provideChanges(100000, 400000, true);
changes(db, '/dev/null', 500, null, function(err) {
assert.ok(!err);
assert.ok(nock.isDone());
done();
});
});
it('should keep collecting sparse changes', function(done) {
// This test checks that making thousands of requests doesn't
// make anything bad happen.
// This test might take up to 25 seconds
this.timeout(25 * 1000);
// Use sparse changes for this test and a batch size of 1
provideChanges(1, 2500);
changes(db, '/dev/null', 500, null, function(err) {
assert.ok(!err);
assert.ok(nock.isDone());
done();
});
});
return couchbackup.restore(
process.stdin, // restore from stdin
databaseUrl,
opts,
error.terminationCallback
).on('restored', function(obj) {
restoreBatchDebug('restored', obj.total);
}).on('error', function(e) {
restoreDebug('ERROR', e);
}).on('finished', function(obj) {
restoreDebug('finished', obj);
});
describe('Longer spool changes checks', function() {
it('#slow should keep collecting changes (25M)', function(done) {
// This test might take up to 5 minutes
this.timeout(5 * 60 * 1000);
// Note changes spooling uses a constant batch size, we are setting
// a test value here and setting the buffer to match
const batch = 100000;
// Use sparse changes for this test
provideChanges(batch, 25000000);
changes(db, '/dev/null', batch, null, function(err) {
assert.ok(!err);
assert.ok(nock.isDone());
done();
});
});
it('#slower should keep collecting changes (500M)', function(done) {
// This test might take up to 90 minutes
this.timeout(90 * 60 * 1000);
// Note changes spooling uses a constant batch size, we are setting
// a test value here and setting the buffer to match
const batch = 1000000;
// Use full changes for this test to exercise load
provideChanges(batch, 500000000, true);
changes(db, '/dev/null', batch, null, function(err) {
assert.ok(!err);
assert.ok(nock.isDone());
done();
});
});
});

@@ -0,1 +1,2 @@

#!/usr/bin/env node
// Copyright © 2017, 2021 IBM Corp. All rights reserved.

@@ -14,56 +15,64 @@ //

// limitations under the License.
/* global beforeEach afterEach */
'use strict';
const { CloudantV1 } = require('@ibm-cloud/cloudant');
const url = new URL((process.env.COUCH_BACKEND_URL) ? process.env.COUCH_BACKEND_URL : 'https://no-couch-backend-url-set.test');
const { BasicAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const authenticator = (url.username) ? new BasicAuthenticator({ username: url.username, password: decodeURIComponent(url.password) }) : new NoAuthAuthenticator();
const serviceOpts = {
authenticator: authenticator
};
const cloudant = new CloudantV1(serviceOpts);
// Remove auth from URL before using for service
cloudant.setServiceUrl(new URL(url.pathname, url.origin).toString());
const uuid = require('uuid').v4;
const error = require('../includes/error.js');
const fs = require('fs');
const cliutils = require('../includes/cliutils.js');
const couchbackup = require('../app.js');
const parser = require('../includes/parser.js');
const debug = require('debug');
const backupDebug = debug('couchbackup:backup');
const backupBatchDebug = debug('couchbackup:backup:batch');
// Mocha hooks that will be at the root context so run for all tests
backupDebug.enabled = true;
beforeEach('Create test database', function(done) {
// Don't run hook for unit tests, just for CI
if (!this.currentTest.fullTitle().includes('#unit')) {
// Allow 10 seconds to create the DB
this.timeout(10 * 1000);
const unique = uuid();
this.fileName = `${unique}`;
this.dbName = 'couchbackup_test_' + unique;
cloudant.putDatabase({ db: this.dbName }).then(() => { done(); }).catch((err) => { done(err); });
} else {
done();
const program = parser.parseBackupArgs();
const databaseUrl = cliutils.databaseUrl(program.url, program.db);
const opts = {
bufferSize: program.bufferSize,
log: program.log,
mode: program.mode,
parallelism: program.parallelism,
requestTimeout: program.requestTimeout,
resume: program.resume,
iamApiKey: program.iamApiKey,
iamTokenUrl: program.iamTokenUrl
};
// log configuration to console
console.error('='.repeat(80));
console.error('Performing backup on ' + databaseUrl.replace(/\/\/.+@/g, '//****:****@') + ' using configuration:');
console.error(JSON.stringify(opts, null, 2).replace(/"iamApiKey": "[^"]+"/, '"iamApiKey": "****"'));
console.error('='.repeat(80));
backupBatchDebug.enabled = !program.quiet;
let ws = process.stdout;
// open output file
if (program.output) {
let flags = 'w';
if (program.log && program.resume) {
flags = 'a';
}
});
const fd = fs.openSync(program.output, flags);
ws = fs.createWriteStream(null, { fd: fd });
}
afterEach('Delete test database', function(done) {
// Don't run hook for unit tests, just for CI
if (!this.currentTest.fullTitle().includes('#unit')) {
// Allow 10 seconds to delete the DB
this.timeout(10 * 1000);
deleteIfExists(this.fileName);
deleteIfExists(`${this.fileName}.log`);
cloudant.deleteDatabase({ db: this.dbName }).then(() => { done(); }).catch((err) => { done(err); });
} else {
done();
}
backupDebug('Fetching all database changes...');
return couchbackup.backup(
databaseUrl,
ws,
opts,
error.terminationCallback
).on('changes', function(batch) {
backupBatchDebug('Total batches received:', batch + 1);
}).on('written', function(obj) {
backupBatchDebug('Written batch ID:', obj.batch, 'Total document revisions written:', obj.total, 'Time:', obj.time);
}).on('error', function(e) {
backupDebug('ERROR', e);
}).on('finished', function(obj) {
backupDebug('Finished - Total document revisions written:', obj.total);
});
function deleteIfExists(fileName) {
fs.unlink(fileName, function(err) {
if (err) {
if (err.code !== 'ENOENT') {
console.error(`${err.code} ${err.message}`);
}
}
});
}

@@ -1,29 +0,167 @@

// Copyright © 2017 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* global describe it */
'use strict';
const u = require('./citestutils.js');
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('#slowest End to end backup and restore', params), function() {
// 10 GB is about the largest the CI can handle before getting very upset
// about how long things are taking
it('should backup and restore largedb10g', function(done) {
u.setTimeout(this, 350 * 60);
u.testDirectBackupAndRestore(params, 'largedb10g', this.dbName, done);
});
});
});
<testsuites name="test-iam">
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:43:48" time="4.793">
<testcase classname="test-iam.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="1.045">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.832">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.765">
<system-out><![CDATA[{ total: 11 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:43:52" time="11.326">
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="3.58">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.417">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.53">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:44:04" time="7.874">
<testcase classname="test-iam.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.44">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.186">
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="3.437">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:44:12" time="15.066">
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="5.385">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.359">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.516">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:44:27" time="6.451">
<testcase classname="test-iam.Compression tests using API" name="should backup animaldb to a compressed file" time="0.858">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="2.585">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.202">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:44:33" time="8.344">
<testcase classname="test-iam.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.291">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="3.537">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.722">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:44:42" time="317.22">
<testcase classname="test-iam.End to end backup and restore using API" name="should backup and restore animaldb" time="2.244">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="314.187">
<system-out><![CDATA[{ total: 522948 }
{ total: 522948 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:49:59" time="494.496">
<testcase classname="test-iam.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.666">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="491.048">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:58:13" time="3.969">
<testcase classname="test-iam.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="3.702">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:58:17" time="0.271">
<testcase classname="test-iam.Write error tests" name="calls callback with error set when stream is not writeable" time="0.011">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:58:17" time="2.319">
<testcase classname="test-iam.Event tests" name="should get a finished event when using stdout" time="0.9">
</testcase>
<testcase classname="test-iam.Event tests" name="should get a finished event when using file output" time="0.882">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:58:20" time="5.057">
<testcase classname="test-iam.Resume tests using API" name="should create a log file" time="0.823">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.724">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test-iam.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.719">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:58:25" time="6.57">
<testcase classname="test-iam.Resume tests using CLI" name="should create a log file" time="1.292">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.225">
</testcase>
<testcase classname="test-iam.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.255">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:58:31" time="33.523">
<testcase classname="test-iam.Resume tests" name="should correctly backup and restore backup10m" time="16.394">
<system-out><![CDATA[Backup process close null SIGTERM
Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test-iam.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.603">
<system-out><![CDATA[Backup process close null SIGTERM
Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:05" time="47.269">
<testcase classname="test-iam.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="47.003">
</testcase>
</testsuite>
</testsuites>

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,119 +14,406 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
/**
* CouchBackup module.
* @module couchbackup
* @see module:couchbackup
*/
const backupFull = require('./includes/backup.js');
const defaults = require('./includes/config.js').apiDefaults;
const error = require('./includes/error.js');
const request = require('./includes/request.js');
const restoreInternal = require('./includes/restore.js');
const backupShallow = require('./includes/shallowbackup.js');
const debug = require('debug')('couchbackup:app');
const events = require('events');
const fs = require('fs');
const u = require('./citestutils.js');
const URL = require('url').URL;
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Basic backup and restore', params), function() {
it('should backup animaldb to a file correctly', function(done) {
// Allow up to 40 s to backup and compare (it should be much faster)!
u.setTimeout(this, 40);
const actualBackup = `./${this.fileName}`;
// Create a file and backup to it
const output = fs.createWriteStream(actualBackup);
output.on('open', function() {
u.testBackup(params, 'animaldb', output, function(err) {
if (err) {
done(err);
} else {
u.readSortAndDeepEqual(actualBackup, './test/fixtures/animaldb_expected.json', done);
}
});
});
/**
* Test for a positive, safe integer.
*
* @param {object} x - Object under test.
*/
function isSafePositiveInteger(x) {
// https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER
const MAX_SAFE_INTEGER = Number.MAX_SAFE_INTEGER || 9007199254740991;
// Is it a number?
return Object.prototype.toString.call(x) === '[object Number]' &&
// Is it an integer?
x % 1 === 0 &&
// Is it positive?
x > 0 &&
// Is it less than the maximum safe integer?
x <= MAX_SAFE_INTEGER;
}
/**
* Validate arguments.
*
* @param {object} url - URL of database.
* @param {object} opts - Options.
* @param {function} cb - Callback to be called on error.
*/
function validateArgs(url, opts, cb) {
if (typeof url !== 'string') {
cb(new error.BackupError('InvalidOption', 'Invalid URL, must be type string'), null);
return;
}
if (opts && typeof opts.bufferSize !== 'undefined' && !isSafePositiveInteger(opts.bufferSize)) {
cb(new error.BackupError('InvalidOption', 'Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'), null);
return;
}
if (opts && typeof opts.iamApiKey !== 'undefined' && typeof opts.iamApiKey !== 'string') {
cb(new error.BackupError('InvalidOption', 'Invalid iamApiKey option, must be type string'), null);
return;
}
if (opts && typeof opts.log !== 'undefined' && typeof opts.log !== 'string') {
cb(new error.BackupError('InvalidOption', 'Invalid log option, must be type string'), null);
return;
}
if (opts && typeof opts.mode !== 'undefined' && ['full', 'shallow'].indexOf(opts.mode) === -1) {
cb(new error.BackupError('InvalidOption', 'Invalid mode option, must be either "full" or "shallow"'), null);
return;
}
if (opts && typeof opts.output !== 'undefined' && typeof opts.output !== 'string') {
cb(new error.BackupError('InvalidOption', 'Invalid output option, must be type string'), null);
return;
}
if (opts && typeof opts.parallelism !== 'undefined' && !isSafePositiveInteger(opts.parallelism)) {
cb(new error.BackupError('InvalidOption', 'Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'), null);
return;
}
if (opts && typeof opts.requestTimeout !== 'undefined' && !isSafePositiveInteger(opts.requestTimeout)) {
cb(new error.BackupError('InvalidOption', 'Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'), null);
return;
}
if (opts && typeof opts.resume !== 'undefined' && typeof opts.resume !== 'boolean') {
cb(new error.BackupError('InvalidOption', 'Invalid resume option, must be type boolean'), null);
return;
}
// Validate URL and ensure no auth if using key
try {
const urlObject = new URL(url);
// We require a protocol, host and path (for db), fail if any is missing.
if (urlObject.protocol !== 'https:' && urlObject.protocol !== 'http:') {
cb(new error.BackupError('InvalidOption', 'Invalid URL protocol.'));
return;
}
if (!urlObject.host) {
cb(new error.BackupError('InvalidOption', 'Invalid URL host.'));
return;
}
if (!urlObject.pathname || urlObject.pathname === '/') {
cb(new error.BackupError('InvalidOption', 'Invalid URL, missing path element (no database).'));
return;
}
if (opts && opts.iamApiKey && (urlObject.username || urlObject.password)) {
cb(new error.BackupError('InvalidOption', 'URL user information must not be supplied when using IAM API key.'));
return;
}
} catch (err) {
cb(err);
return;
}
// Perform validation of invalid options for shallow mode and WARN
// We don't error for backwards compatibility with scripts that may have been
// written passing complete sets of options through
if (opts && opts.mode === 'shallow') {
if (opts.log || opts.resume) {
console.warn('WARNING: the options "log" and "resume" are invalid when using shallow mode.');
}
if (opts.parallelism) {
console.warn('WARNING: the option "parallelism" has no effect when using shallow mode.');
}
}
if (opts && opts.resume) {
if (!opts.log) {
// This is the second place we check for the presence of the log option in conjunction with resume
// It has to be here for the API case
cb(new error.BackupError('NoLogFileName', 'To resume a backup, a log file must be specified'), null);
return;
} else if (!fs.existsSync(opts.log)) {
cb(new error.BackupError('LogDoesNotExist', 'To resume a backup, the log file must exist'), null);
return;
}
}
return true;
}
function addEventListener(indicator, emitter, event, f) {
emitter.on(event, function(...args) {
if (!indicator.errored) {
if (event === 'error') indicator.errored = true;
f(...args);
}
});
}
/*
Check the backup database exists and that the credentials used have
visibility. Callback with a fatal error if there is a problem with the DB.
@param {string} db - database object
@param {function(err)} callback - error is undefined if DB exists
*/
function proceedIfBackupDbValid(db, callback) {
db.service.headDatabase({ db: db.db }).then(() => callback()).catch(err => {
err = error.convertResponseError(err, err => parseIfDbValidResponseError(db, err));
callback(err);
});
}
/*
Check that the restore database exists, is new and is empty. Also verify that the credentials used have
visibility. Callback with a fatal error if there is a problem with the DB.
@param {string} db - database object
@param {function(err)} callback - error is undefined if DB exists, new and empty
*/
function proceedIfRestoreDbValid(db, callback) {
db.service.getDatabaseInformation({ db: db.db }).then(response => {
const { doc_count: docCount, doc_del_count: deletedDocCount } = response.result;
// The system databases can have a validation ddoc(s) injected in them on creation.
// This sets the doc count off, so we just complitely exclude the system databases from this check.
// The assumption here is that users restoring system databases know what they are doing.
if (!db.db.startsWith('_') && (docCount !== 0 || deletedDocCount !== 0)) {
const notEmptyDBErr = new Error(`Target database ${db.url}${db.db} is not empty.`);
notEmptyDBErr.name = 'DatabaseNotEmpty';
callback(notEmptyDBErr);
} else {
callback();
}
}).catch(err => {
err = error.convertResponseError(err, err => parseIfDbValidResponseError(db, err));
callback(err);
});
}
/*
Convert the database validation response error to a special DatabaseNotFound error
in case the database is missing. Otherwise delegate to the default error factory.
@param {object} db - database object
@param {object} err - HTTP response error
*/
function parseIfDbValidResponseError(db, err) {
if (err && err.status === 404) {
// Override the error type and message for the DB not found case
const msg = `Database ${db.url}` +
`${db.db} does not exist. ` +
'Check the URL and database name have been specified correctly.';
const noDBErr = new Error(msg);
noDBErr.name = 'DatabaseNotFound';
return noDBErr;
}
// Delegate to the default error factory if it wasn't a 404
return error.convertResponseError(err);
}
module.exports = {
/**
* Backup a Cloudant database to a stream.
*
* @param {string} srcUrl - URL of database to backup.
* @param {stream.Writable} targetStream - Stream to write content to.
* @param {object} opts - Backup options.
* @param {number} [opts.parallelism=5] - Number of parallel HTTP requests to use.
* @param {number} [opts.bufferSize=500] - Number of documents per batch request.
* @param {number} [opts.requestTimeout=120000] - Milliseconds to wait before retrying a HTTP request.
* @param {string} [opts.iamApiKey] - IAM API key to use to access Cloudant database.
* @param {string} [opts.log] - Log file name. Default uses a temporary file.
* @param {boolean} [opts.resume] - Whether to resume from existing log.
* @param {string} [opts.mode=full] - Use `full` or `shallow` mode.
* @param {backupRestoreCallback} callback - Called on completion.
*/
backup: function(srcUrl, targetStream, opts, callback) {
const listenerErrorIndicator = { errored: false };
if (typeof callback === 'undefined' && typeof opts === 'function') {
callback = opts;
opts = {};
}
if (!validateArgs(srcUrl, opts, callback)) {
// bad args, bail
return;
}
// if there is an error writing to the stream, call the completion
// callback with the error set
addEventListener(listenerErrorIndicator, targetStream, 'error', function(err) {
debug('Error ' + JSON.stringify(err));
if (callback) callback(err);
});
it('should restore animaldb to a database correctly', function(done) {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_expected.json');
const dbName = this.dbName;
input.on('open', function() {
u.testRestore(params, input, dbName, function(err) {
if (err) {
done(err);
} else {
u.dbCompare('animaldb', dbName, done);
opts = Object.assign({}, defaults(), opts);
const ee = new events.EventEmitter();
// Set up the DB client
const backupDB = request.client(srcUrl, opts);
// Validate the DB exists, before proceeding to backup
proceedIfBackupDbValid(backupDB, function(err) {
if (err) {
if (err.name === 'DatabaseNotFound') {
err.message = `${err.message} Ensure the backup source database exists.`;
}
// Didn't exist, or another fatal error, exit
callback(err);
return;
}
let backup = null;
if (opts.mode === 'shallow') {
backup = backupShallow;
} else { // full mode
backup = backupFull;
}
// If resuming write a newline as it's possible one would be missing from
// an interruption of the previous backup. If the backup was clean this
// will cause an empty line that will be gracefully handled by the restore.
if (opts.resume) {
targetStream.write('\n');
}
// Get the event emitter from the backup process so we can handle events
// before passing them on to the app's event emitter if needed.
const internalEE = backup(backupDB, opts);
addEventListener(listenerErrorIndicator, internalEE, 'changes', function(batch) {
ee.emit('changes', batch);
});
addEventListener(listenerErrorIndicator, internalEE, 'received', function(obj, q, logCompletedBatch) {
// this may be too verbose to have as well as the "backed up" message
// debug(' received batch', obj.batch, ' docs: ', obj.total, 'Time', obj.time);
// Callback to emit the written event when the content is flushed
function writeFlushed() {
ee.emit('written', { total: obj.total, time: obj.time, batch: obj.batch });
if (logCompletedBatch) {
logCompletedBatch(obj.batch);
}
});
debug(' backed up batch', obj.batch, ' docs: ', obj.total, 'Time', obj.time);
}
// Write the received content to the targetStream
const continueWriting = targetStream.write(JSON.stringify(obj.data) + '\n',
'utf8',
writeFlushed);
if (!continueWriting) {
// The buffer was full, pause the queue to stop the writes until we
// get a drain event
if (q && !q.paused) {
q.pause();
targetStream.once('drain', function() {
q.resume();
});
}
}
});
// For errors we expect, may or may not be fatal
addEventListener(listenerErrorIndicator, internalEE, 'error', function(err) {
debug('Error ' + JSON.stringify(err));
callback(err);
});
addEventListener(listenerErrorIndicator, internalEE, 'finished', function(obj) {
function emitFinished() {
debug('Backup complete - written ' + JSON.stringify(obj));
const summary = { total: obj.total };
ee.emit('finished', summary);
if (callback) callback(null, summary);
}
if (targetStream === process.stdout) {
// stdout cannot emit a finish event so use a final write + callback
targetStream.write('', 'utf8', emitFinished);
} else {
// If we're writing to a file, end the writes and register the
// emitFinished function for a callback when the file stream's finish
// event is emitted.
targetStream.end('', 'utf8', emitFinished);
}
});
});
return ee;
},
it('should execute a shallow mode backup successfully', function(done) {
// Allow 30 s
u.setTimeout(this, 30);
const actualBackup = `./${this.fileName}`;
const output = fs.createWriteStream(actualBackup);
// Add the shallow mode option
const p = u.p(params, { opts: { mode: 'shallow' } });
output.on('open', function() {
u.testBackup(p, 'animaldb', output, function(err) {
/**
* Restore a backup from a stream.
*
* @param {stream.Readable} srcStream - Stream containing backed up data.
* @param {string} targetUrl - Target database.
* @param {object} opts - Restore options.
* @param {number} opts.parallelism - Number of parallel HTTP requests to use. Default 5.
* @param {number} opts.bufferSize - Number of documents per batch request. Default 500.
* @param {number} opts.requestTimeout - Milliseconds to wait before retrying a HTTP request. Default 120000.
* @param {string} opts.iamApiKey - IAM API key to use to access Cloudant database.
* @param {backupRestoreCallback} callback - Called on completion.
*/
restore: function(srcStream, targetUrl, opts, callback) {
const listenerErrorIndicator = { errored: false };
if (typeof callback === 'undefined' && typeof opts === 'function') {
callback = opts;
opts = {};
}
validateArgs(targetUrl, opts, callback);
opts = Object.assign({}, defaults(), opts);
const ee = new events.EventEmitter();
// Set up the DB client
const restoreDB = request.client(targetUrl, opts);
// Validate the DB exists, before proceeding to restore
proceedIfRestoreDbValid(restoreDB, function(err) {
if (err) {
if (err.name === 'DatabaseNotFound') {
err.message = `${err.message} Create the target database before restoring.`;
} else if (err.name === 'DatabaseNotEmpty') {
err.message = `${err.message} A target database must be a new and empty database.`;
}
// Didn't exist, or another fatal error, exit
callback(err);
return;
}
restoreInternal(
restoreDB,
opts,
srcStream,
ee,
function(err, writer) {
if (err) {
done(err);
} else {
u.readSortAndDeepEqual(actualBackup, './test/fixtures/animaldb_expected_shallow.json', done);
callback(err, null);
return;
}
});
});
});
describe(u.scenario('Buffer size tests', params), function() {
it('should backup/restore animaldb with the same buffer size', function(done) {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const p = u.p(params, { opts: { log: logFile, bufferSize: 1 } });
u.testBackupAndRestoreViaFile(p, 'animaldb', actualBackup, this.dbName, done);
});
it('should backup/restore animaldb with backup buffer > restore buffer', function(done) {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const dbName = this.dbName;
const p = u.p(params, { opts: { log: logFile, bufferSize: 2 } }); // backup
const q = u.p(params, { opts: { bufferSize: 1 } }); // restore
u.testBackupToFile(p, 'animaldb', actualBackup, function(err) {
if (err) {
done(err);
} else {
// restore
u.testRestoreFromFile(q, actualBackup, dbName, function(err) {
if (!err) {
u.dbCompare('animaldb', dbName, done);
} else {
done(err);
}
if (writer != null) {
addEventListener(listenerErrorIndicator, writer, 'restored', function(obj) {
debug(' restored ', obj.total);
ee.emit('restored', { documents: obj.documents, total: obj.total });
});
}
});
});
it('should backup/restore animaldb with backup buffer < restore buffer', function(done) {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const dbName = this.dbName;
const p = u.p(params, { opts: { log: logFile, bufferSize: 1 } }); // backup
const q = u.p(params, { opts: { bufferSize: 2 } }); // restore
u.testBackupToFile(p, 'animaldb', actualBackup, function(err) {
if (err) {
done(err);
} else {
// restore
u.testRestoreFromFile(q, actualBackup, dbName, function(err) {
if (!err) {
u.dbCompare('animaldb', dbName, done);
} else {
done(err);
addEventListener(listenerErrorIndicator, writer, 'error', function(err) {
debug('Error ' + JSON.stringify(err));
// Only call destroy if it is available on the stream
if (srcStream.destroy && srcStream.destroy instanceof Function) {
srcStream.destroy();
}
callback(err);
});
addEventListener(listenerErrorIndicator, writer, 'finished', function(obj) {
debug('restore complete');
ee.emit('finished', { total: obj.total });
callback(null, obj);
});
}
});
});
}
);
});
});
});
return ee;
}
};
/**
* Backup/restore callback
* @callback backupRestoreCallback
* @param {Error} err - Error object if operation failed.
* @param {object} data - summary data for backup/restore
*/

@@ -1,35 +0,509 @@

// Copyright © 2017 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* global describe it */
'use strict';
const assert = require('assert');
const logfilegetbatches = require('../includes/logfilegetbatches.js');
describe('#unit Fetching batches from a log file', function() {
it('should fetch multiple batches correctly', function(done) {
logfilegetbatches('./test/fixtures/test.log', [1, 4], function(err, data) {
assert.ok(!err);
assert.ok(data);
assert.strictEqual(typeof data, 'object');
assert.strictEqual(Object.keys(data).length, 2);
assert.deepStrictEqual(data['1'].docs, [{ id: '6' }, { id: '7' }, { id: '8' }, { id: '9' }, { id: '10' }]);
assert.strictEqual(data['1'].batch, 1);
assert.deepStrictEqual(data['4'].docs, [{ id: '21' }, { id: '22' }]);
assert.strictEqual(data['4'].batch, 4);
done();
});
});
});
<testsuites name="test">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:43:48" time="0.097">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.024">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid buffer size type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float buffer size" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.007">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid log type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid log type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode string" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid output type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid output type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid parallelism type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero parallelism" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid request timeout type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero request timeout" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float request timout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid resume type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid key type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for resume arg in shallow mode" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for parallism arg in shallow mode" time="0">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:43:48" time="4.526">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.921">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.839">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.643">
<system-out><![CDATA[{ total: 11 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:43:52" time="11.446">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="3.718">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.431">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.508">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:44:04" time="7.856">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.343">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.21">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="3.505">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:44:12" time="14.334">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="4.569">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.487">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.474">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:44:26" time="6.516">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.898">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="2.64">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.174">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:44:33" time="8.409">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.339">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="3.576">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.691">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:44:41" time="320.814">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.175">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="317.865">
<system-out><![CDATA[{ total: 522948 }
{ total: 522948 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:50:02" time="503.934">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="3.301">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="499.255">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:58:26" time="3.86">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="3.595">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:58:30" time="0.27">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.011">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:58:30" time="2.266">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.856">
</testcase>
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.888">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:58:32" time="5.836">
<testcase classname="test.Resume tests using API" name="should create a log file" time="1.445">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.799">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.774">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:58:38" time="6.661">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.436">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.219">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.215">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:58:45" time="33.403">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="16.154">
<system-out><![CDATA[Backup process close null SIGTERM
Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.727">
<system-out><![CDATA[Backup process close null SIGTERM
Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:18" time="0.009">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_DATABASE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_BUFFER_SIZE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_PARALLELISM env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_REQUEST_TIMEOUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_API_KEY env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_TOKEN_URL env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_LOG env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_RESUME env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_OUTPUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_MODE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:18" time="0.084">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.009">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.015">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on NoLogFileName" time="0">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on LogDoesNotExist" time="0">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on IncompleteChangesInLogFile" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.015">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.013">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:18" time="0.136">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.015">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on RestoreDatabaseNotFound" time="0.006">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.005">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.011">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.035">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.028">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.015">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:18" time="3.78">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.379">
<system-out><![CDATA[Backup process close 10 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.383">
<system-out><![CDATA[Backup process close 50 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.38">
<system-out><![CDATA[Backup process close 11 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.358">
<system-out><![CDATA[Backup process close 12 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.465">
<system-out><![CDATA[Backup process close 40 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.335">
<system-out><![CDATA[Backup process close 20 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.305">
<system-out><![CDATA[Backup process close 21 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.385">
<system-out><![CDATA[Backup process close 22 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.405">
<system-out><![CDATA[Backup process close 40 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.377">
<system-out><![CDATA[Backup process close 30 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:22" time="3.569">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.396">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.4">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.34">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.343">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.341">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.368">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.454">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.517">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.401">
</testcase>
</testsuite>
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:26" time="0.001">
<testcase classname="test.#unit Fetching batches from a log file" name="should fetch multiple batches correctly" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:26" time="0.001">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:26" time="0.049">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.02">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing" time="0.002">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing" time="0.002">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --db command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --request-timeout command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --iam-api-key command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode full command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode shallow command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --quiet command-line parameter" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:26" time="0.015">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --url command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --request-timeout command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --quiet command-line parameter" time="0">
</testcase>
</testsuite>
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:26" time="0.004">
<testcase classname="test.#unit Check request headers" name="should have a couchbackup user-agent" time="0.004">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:26" time="11.353">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.003">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.014">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.017">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.016">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with fatal error for 404 response" time="0.003">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with same error for no status code error response" time="2.011">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.512">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.011">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:37" time="0.013">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.013">
</testcase>
</testsuite>
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:37" time="0.563">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.02">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.524">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.017">
</testcase>
</testsuite>
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:38" time="15.515">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.012">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.015">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="2.109">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="9.363">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-25T08:59:53" time="51.978">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="51.701">
</testcase>
</testsuite>
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2023-09-25T09:00:45" time="4.115">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.024">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should terminate on a fatal error" time="0.007">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.034">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.016">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.022">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should get a batch error for non-empty array response with new_edits false" time="0.007">
</testcase>
</testsuite>
</testsuites>

@@ -1,2 +0,2 @@

// Copyright © 2018, 2021 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -15,79 +15,101 @@ // Licensed under the Apache License, Version 2.0 (the "License");

/* global describe it */
/* global after before describe */
'use strict';
const fs = require('fs');
const readline = require('readline');
const u = require('./citestutils.js');
const uuid = require('uuid').v4;
const url = require('url');
const toxy = require('toxy');
// Import the common hooks
require('../test/hooks.js');
const params = { useApi: true };
const tpoisons = toxy.poisons;
const trules = toxy.rules;
describe(u.scenario('Concurrent database backups', params), function() {
it('should run concurrent API database backups correctly #slower', function(done) {
// Allow up to 900 s to backup and compare (it should be much faster)!
u.setTimeout(this, 900);
function setupProxy(poison) {
const backendUrl = new url.URL(process.env.COUCH_BACKEND_URL);
const proxy = toxy({
auth: `${backendUrl.username}:${backendUrl.password}`,
changeOrigin: true
});
let doneCount = 0;
let doneErr;
const finished = function(err) {
doneCount++;
if (doneCount === 2) {
done(doneErr || err);
}
doneErr = err;
};
// Forward traffic to DB
proxy.forward(process.env.COUCH_BACKEND_URL);
const checkForEmptyBatches = function(fileName, cb) {
let foundEmptyBatch = false;
switch (poison) {
case 'normal':
// No poisons to add
break;
case 'bandwidth-limit':
// https://github.com/h2non/toxy#bandwidth
// Note the implementation of bandwidth is simplistic and the threshold
// delay is applied to every write of the buffer, so use the smallest
// delay possible and adjust the rate using the bytes size instead.
proxy
.poison(tpoisons.bandwidth({ bytes: 512, threshold: 1 })); // 0.5 MB/s
break;
case 'latency':
// https://github.com/h2non/toxy#latency
proxy
.poison(tpoisons.latency({ max: 1500, min: 250 }))
.withRule(trules.probability(60));
break;
case 'slow-read':
// https://github.com/h2non/toxy#slow-read
// Note this only impacts read of data from requests so only for non-GET
// In practice this means that it impacts restore much more than backup
// since although backup POSTs to _bulk_get the content is much smaller
// than what is POSTed to _bulk_docs for a restore.
// Similarly to bandwidth-limit use a 1 ms threshold
proxy
.poison(tpoisons.slowRead({ chunk: 256, threshold: 1 }))
// Slow read for 10 % of the time e.g. 10 ms in every 100
.withRule(trules.timeThreshold({ duration: 10, period: 100 }));
break;
case 'rate-limit':
// https://github.com/h2non/toxy#rate-limit
// Simulate the Cloudant free plan with 20 lookups ps and 10 writes ps
proxy.post('/*/_bulk_get')
.poison(tpoisons.rateLimit({ limit: 20, threshold: 1000 }));
proxy.post('/*/_bulk_docs')
.poison(tpoisons.rateLimit({ limit: 10, threshold: 1000 }));
break;
default:
throw Error('Unknown toxy poison ' + poison);
}
const rd = readline.createInterface({
input: fs.createReadStream(fileName),
output: fs.createWriteStream('/dev/null'),
terminal: false
});
// Catch remaining traffic
proxy.all('/*');
return proxy;
}
rd.on('line', function(line) {
if (JSON.parse(line).length === 0) {
// Note: Empty batch arrays indicate that the running backup is
// incorrectly sharing a log file with another ongoing backup job.
foundEmptyBatch = true;
}
});
const poisons = [
'normal',
'bandwidth-limit',
'latency',
'slow-read',
'rate-limit'
];
rd.on('close', function() {
if (foundEmptyBatch) {
cb(new Error(`Log file '${fileName}' contains empty batches`));
} else {
cb();
}
});
};
poisons.forEach(function(poison) {
describe('unreliable network tests (using toxy poison ' + poison + ')', function() {
let proxy;
// [1] Run 'largedb2g' database backup
const actualBackup1 = `./${uuid()}`;
const output1 = fs.createWriteStream(actualBackup1);
output1.on('open', function() {
u.testBackup(params, 'largedb2g', output1, function(err) {
if (err) {
finished(err);
} else {
checkForEmptyBatches(actualBackup1, finished);
}
});
before('start toxy server', function() {
proxy = setupProxy(poison);
console.log('Using toxy poison ' + poison);
// For these tests COUCH_URL points to the toxy proxy on localhost whereas
// COUCH_BACKEND_URL is the real CouchDb instance.
const toxyUrl = new url.URL(process.env.COUCH_URL);
// Listen on the specified hostname only, so if using localhost we don't
// need external connections.
proxy.listen(toxyUrl.port, toxyUrl.hostname);
});
// [2] Run 'largedb1g' database backup
const actualBackup2 = `./${uuid()}`;
const output2 = fs.createWriteStream(actualBackup2);
output2.on('open', function() {
u.testBackup(params, 'largedb1g', output2, function(err) {
if (err) {
finished(err);
} else {
checkForEmptyBatches(actualBackup2, finished);
}
});
after('stop toxy server', function() {
proxy.close();
});
delete require.cache[require.resolve('../test/ci_e2e.js')];
require('../test/ci_e2e.js');
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//

@@ -14,24 +14,42 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const u = require('./citestutils.js');
/**
* Utility methods for the command line interface.
* @module cliutils
* @see module:cliutils
*/
describe('Encryption tests', function() {
// Note CLI only to use openssl command
const p = { useApi: false, encryption: true };
const url = require('url');
it('should backup and restore animaldb via an encrypted file', function(done) {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const encryptedBackup = `./${this.fileName}`;
u.testBackupAndRestoreViaFile(p, 'animaldb', encryptedBackup, this.dbName, function(err) {
if (err) {
done(err);
} else {
u.assertEncryptedFile(encryptedBackup, done);
}
});
});
});
module.exports = {
/**
* Combine a base URL and a database name, ensuring at least single slash
* between root and database name. This allows users to have Couch behind
* proxies that mount Couch's / endpoint at some other mount point.
* @param {string} root - root URL
* @param {string} databaseName - database name
* @return concatenated URL.
*
* @private
*/
databaseUrl: function databaseUrl(root, databaseName) {
if (!root.endsWith('/')) {
root = root + '/';
}
return new url.URL(encodeURIComponent(databaseName), root).toString();
},
/**
* Generate CLI argument usage text.
*
* @param {string} description - argument description.
* @param {string} defaultValue - default argument value.
*
* @private
*/
getUsage: function getUsage(description, defaultValue) {
return `${description} ${defaultValue !== undefined ? ` (default: ${defaultValue})` : ''}`;
}
};

@@ -1,2 +0,2 @@

// Copyright © 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,88 +14,269 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
'use strict';
const chunk = require('lodash/chunk');
const difference = require('lodash/difference');
const forOwn = require('lodash/forOwn');
const isEmpty = require('lodash/isEmpty');
const union = require('lodash/union');
const async = require('async');
const events = require('events');
const fs = require('fs');
const error = require('./error.js');
const spoolchanges = require('./spoolchanges.js');
const logfilesummary = require('./logfilesummary.js');
const logfilegetbatches = require('./logfilegetbatches.js');
const compare = async(database1, database2, client) => {
// check docs same in both dbs
const allDocs1 = await getAllDocs(client, database1);
const allDocs2 = await getAllDocs(client, database2);
/**
* Read documents from a database to be backed up.
*
* @param {string} db - `@cloudant/cloudant` DB object for source database.
* @param {number} blocksize - number of documents to download in single request
* @param {number} parallelism - number of concurrent downloads
* @param {string} log - path to log file to use
* @param {boolean} resume - whether to resume from an existing log file
* @returns EventEmitter with following events:
* - `received` - called with a block of documents to write to backup
* - `error` - on error
* - `finished` - when backup process is finished (either complete or errored)
*/
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime(); // backup start time
const batchesPerDownloadSession = 50; // max batches to read from log file for download at a time (prevent OOM)
const onlyInDb1 = (difference(allDocs1, allDocs2));
const onlyInDb2 = (difference(allDocs2, allDocs1));
function proceedWithBackup() {
if (options.resume) {
// pick up from existing log file from previous run
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
} else {
// create new log file and process
spoolchanges(db, options.log, options.bufferSize, ee, function(err) {
if (err) {
ee.emit('error', err);
} else {
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
}
});
}
}
let databasesSame = isEmpty(onlyInDb1) && isEmpty(onlyInDb2);
validateBulkGetSupport(db, function(err) {
if (err) {
return ee.emit('error', err);
} else {
proceedWithBackup();
}
});
if (!databasesSame) {
console.log(onlyInDb1.length + ' documents only in db 1.');
console.log('Document IDs only in db 1: ' + onlyInDb1);
console.log(onlyInDb2.length + ' documents only in db 2.');
console.log('Document IDs only in db 2: ' + onlyInDb2);
}
return ee;
};
// check revs same in docs common to both dbs
const partitionSize = 500;
const batches = chunk(union(allDocs1, allDocs2), partitionSize);
/**
* Validate /_bulk_get support for a specified database.
*
* @param {string} db - nodejs-cloudant db
* @param {function} callback - called on completion with signature (err)
*/
function validateBulkGetSupport(db, callback) {
db.service.postBulkGet({ db: db.db, docs: [] }).then(() => { callback(); }).catch(err => {
err = error.convertResponseError(err, function(err) {
switch (err.status) {
case undefined:
// There was no status code on the error
return err;
case 404:
return new error.BackupError('BulkGetError', 'Database does not support /_bulk_get endpoint');
default:
return new error.HTTPError(err);
}
});
callback(err);
});
}
const missingRevsInDb2 = await getMissingRevs(client, database1, database2, batches);
const missingRevsInDb1 = await getMissingRevs(client, database2, database1, batches);
/**
* Download remaining batches in a log file, splitting batches into sets
* to avoid enqueueing too many in one go.
*
* @param {string} log - log file name to maintain download state
* @param {string} db - nodejs-cloudant db
* @param {events.EventEmitter} ee - event emitter to emit received events on
* @param {time} startTime - start time for backup process
* @param {number} batchesPerDownloadSession - max batches to enqueue for
* download at a time. As batches contain many doc IDs, this helps avoid
* exhausting memory.
* @param {number} parallelism - number of concurrent downloads
* @returns function to call do download remaining batches with signature
* (err, {batches: batch, docs: doccount}) {@see spoolchanges}.
*/
function downloadRemainingBatches(log, db, ee, startTime, batchesPerDownloadSession, parallelism) {
let total = 0; // running total of documents downloaded so far
let noRemainingBatches = false;
databasesSame = databasesSame && isEmpty(missingRevsInDb1) && isEmpty(missingRevsInDb2);
// Generate a set of batches (up to batchesPerDownloadSession) to download from the
// log file and download them. Set noRemainingBatches to `true` for last batch.
function downloadSingleBatchSet(done) {
// Fetch the doc IDs for the batches in the current set to
// download them.
function batchSetComplete(err, data) {
if (!err) {
total = data.total;
}
done(err);
}
function processRetrievedBatches(err, batches) {
if (!err) {
// process them in parallelised queue
processBatchSet(db, parallelism, log, batches, ee, startTime, total, batchSetComplete);
} else {
batchSetComplete(err);
}
}
if (!databasesSame) {
console.log('Missing revs in db 1:' + JSON.stringify(missingRevsInDb1));
console.log('Missing revs in db 2:' + JSON.stringify(missingRevsInDb2));
readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, function(err, batchSetIds) {
if (err) {
ee.emit('error', err);
// Stop processing changes file for fatal errors
noRemainingBatches = true;
done();
} else {
if (batchSetIds.length === 0) {
noRemainingBatches = true;
return done();
}
logfilegetbatches(log, batchSetIds, processRetrievedBatches);
}
});
}
return databasesSame;
};
// Return true if all batches in log file have been downloaded
function isFinished(callback) { callback(null, noRemainingBatches); }
const getMissingRevs = async(client, databaseName1, databaseName2, batcheses) => {
const fakeRevisionId = '9999-a';
function onComplete() {
ee.emit('finished', { total: total });
}
const missing = {};
async.doUntil(downloadSingleBatchSet, isFinished, onComplete);
}
// look in db1 - use a fake revision ID to fetch all leaf revisions
/**
* Return a set of uncompleted download batch IDs from the log file.
*
* @param {string} log - log file path
* @param {number} batchesPerDownloadSession - maximum IDs to return
* @param {function} callback - sign (err, batchSetIds array)
*/
function readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, callback) {
logfilesummary(log, function processSummary(err, summary) {
if (!err) {
if (!summary.changesComplete) {
callback(new error.BackupError('IncompleteChangesInLogFile',
'WARNING: Changes did not finish spooling'));
return;
}
if (Object.keys(summary.batches).length === 0) {
return callback(null, []);
}
for (const batches of batcheses) {
const documentRevisions = {};
batches.forEach(id => (documentRevisions[id] = [fakeRevisionId]));
// batch IDs are the property names of summary.batches
const batchSetIds = getPropertyNames(summary.batches, batchesPerDownloadSession);
callback(null, batchSetIds);
} else {
callback(err);
}
});
}
const result1 = await client.postRevsDiff({ db: databaseName1, documentRevisions });
const revsDiffRequestDb2 = {};
forOwn(result1.result, (v, k) => (revsDiffRequestDb2[k] = v.possible_ancestors));
// look in db2
const result2 = await client.postRevsDiff({ db: databaseName2, documentRevisions: revsDiffRequestDb2 });
forOwn(result2.result, (v, k) => {
if ('missing' in v) {
missing[k] = v.missing;
/**
* Download a set of batches retrieved from a log file. When a download is
* complete, add a line to the logfile indicating such.
*
* @param {any} db - nodejs-cloudant database
* @param {any} parallelism - number of concurrent requests to make
* @param {any} log - log file to drive downloads from
* @param {any} batches - batches to download
* @param {any} ee - event emitter for progress. This funciton emits
* received and error events.
* @param {any} start - time backup started, to report deltas
* @param {any} grandtotal - count of documents downloaded prior to this set
* of batches
* @param {any} callback - completion callback, (err, {total: number}).
*/
function processBatchSet(db, parallelism, log, batches, ee, start, grandtotal, callback) {
let hasErrored = false;
let total = grandtotal;
// queue to process the fetch requests in an orderly fashion using _bulk_get
const q = async.queue(function(payload, done) {
const output = [];
const thisBatch = payload.batch;
delete payload.batch;
delete payload.command;
function logCompletedBatch(batch) {
if (log) {
fs.appendFile(log, ':d batch' + thisBatch + '\n', done);
} else {
done();
}
}
// do the /db/_bulk_get request
db.service.postBulkGet({
db: db.db,
revs: true,
docs: payload.docs
}).then(response => {
// create an output array with the docs returned
response.result.results.forEach(function(d) {
if (d.docs) {
d.docs.forEach(function(doc) {
if (doc.ok) {
output.push(doc.ok);
}
});
}
});
total += output.length;
const t = (new Date().getTime() - start) / 1000;
ee.emit('received', {
batch: thisBatch,
data: output,
length: output.length,
time: t,
total: total
}, q, logCompletedBatch);
}).catch(err => {
if (!hasErrored) {
hasErrored = true;
err = error.convertResponseError(err);
// Kill the queue for fatal errors
q.kill();
ee.emit('error', err);
}
done();
});
}, parallelism);
for (const i in batches) {
q.push(batches[i]);
}
return missing;
};
const getAllDocs = async(client, database) => {
let allDocIds = [];
const limit = 2000;
let startKey = '\u0000';
do {
const pageOfDocIds = (await client.postAllDocs({ db: database, startKey, limit })).result.rows.map(r => r.id);
allDocIds = allDocIds.concat(pageOfDocIds);
if (pageOfDocIds.length < limit) {
startKey = null;
} else {
startKey = pageOfDocIds[limit - 1] + '\u0000';
}
} while (startKey != null);
return allDocIds;
};
q.drain(function() {
callback(null, { total: total });
});
}
module.exports = {
compare
};
/**
* Returns first N properties on an object.
*
* @param {object} obj - object with properties
* @param {number} count - number of properties to return
*/
function getPropertyNames(obj, count) {
// decide which batch numbers to deal with
const batchestofetch = [];
let j = 0;
for (const i in obj) {
batchestofetch.push(parseInt(i));
j++;
if (j >= count) break;
}
return batchestofetch;
}

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017, 2022 IBM Corp. All rights reserved.
//

@@ -14,232 +14,99 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it beforeEach */
'use strict';
const assert = require('assert');
const nock = require('nock');
const request = require('../includes/request.js');
const error = require('../includes/error.js');
const fs = require('fs');
const liner = require('./liner.js');
const change = require('./change.js');
const error = require('./error.js');
const debug = require('debug')('couchbackup:spoolchanges');
const url = 'http://localhost:7777/testdb';
const db = request.client(url, { parallelism: 1 });
const timeoutDb = request.client(url, { parallelism: 1, requestTimeout: 500 });
const longTestTimeout = 3000;
/**
* Write log file for all changes from a database, ready for downloading
* in batches.
*
* @param {string} dbUrl - URL of database
* @param {string} log - path to log file to use
* @param {number} bufferSize - the number of changes per batch/log line
* @param {function(err)} callback - a callback to run on completion
*/
module.exports = function(db, log, bufferSize, ee, callback) {
// list of document ids to process
const buffer = [];
let batch = 0;
let lastSeq = null;
const logStream = fs.createWriteStream(log);
let pending = 0;
// The number of changes to fetch per request
const limit = 100000;
beforeEach('Clean nock', function() {
nock.cleanAll();
});
// send documents ids to the queue in batches of bufferSize + the last batch
const processBuffer = function(lastOne) {
if (buffer.length >= bufferSize || (lastOne && buffer.length > 0)) {
debug('writing', buffer.length, 'changes to the backup file');
const b = { docs: buffer.splice(0, bufferSize), batch: batch };
logStream.write(':t batch' + batch + ' ' + JSON.stringify(b.docs) + '\n');
ee.emit('changes', batch);
batch++;
}
};
describe('#unit Check request headers', function() {
it('should have a couchbackup user-agent', function(done) {
const couch = nock(url)
.matchHeader('user-agent', /couchbackup-cloudant\/\d+\.\d+\.\d+(?:-SNAPSHOT)? \(Node.js v\d+\.\d+\.\d+\)/)
.head('/good')
.reply(200);
// called once per received change
const onChange = function(c) {
if (c) {
if (c.error) {
ee.emit('error', new error.BackupError('InvalidChange', `Received invalid change: ${c}`));
} else if (c.changes) {
const obj = { id: c.id };
buffer.push(obj);
processBuffer(false);
} else if (c.last_seq) {
lastSeq = c.last_seq;
pending = c.pending;
}
}
};
db.service.headDocument({ db: db.db, docId: 'good' }).then(response => {
assert.ok(couch.isDone());
done();
}).catch(err => {
done(err);
});
});
});
describe('#unit Check request response error callback', function() {
it('should not callback with error for 200 response', function(done) {
const couch = nock(url)
.get('/good')
.reply(200, { ok: true });
db.service.getDocument({ db: db.db, docId: 'good' }).then(response => {
assert.ok(response.result);
assert.ok(couch.isDone());
done();
}).catch(err => {
err = error.convertResponseError(err);
done(err);
});
});
it('should callback with error after 3 500 responses', function(done) {
const couch = nock(url)
.get('/bad')
.times(3)
.reply(500, function(uri, requestBody) {
this.req.response.statusMessage = 'Internal Server Error';
return { error: 'foo', reason: 'bar' };
function getChanges(since = 0) {
debug('making changes request since ' + since);
return db.service.postChangesAsStream({ db: db.db, since: since, limit: limit, seqInterval: limit })
.then(response => {
response.result.pipe(liner())
.on('error', function(err) {
logStream.end();
callback(err);
})
.pipe(change(onChange))
.on('error', function(err) {
logStream.end();
callback(err);
})
.on('finish', function() {
processBuffer(true);
if (!lastSeq) {
logStream.end();
debug('changes request terminated before last_seq was sent');
callback(new error.BackupError('SpoolChangesError', 'Changes request terminated before last_seq was sent'));
} else {
debug(`changes request completed with last_seq: ${lastSeq} and ${pending} changes pending.`);
if (pending > 0) {
// Return the next promise
return getChanges(lastSeq);
} else {
debug('finished streaming database changes');
logStream.end(':changes_complete ' + lastSeq + '\n', 'utf8', callback);
}
}
});
})
.catch(err => {
logStream.end();
if (err.status && err.status >= 400) {
callback(error.convertResponseError(err));
} else if (err.name !== 'SpoolChangesError') {
callback(new error.BackupError('SpoolChangesError', `Failed changes request - ${err.message}`));
}
});
}
db.service.getDocument({ db: db.db, docId: 'bad' }).then(response => {
done(new Error('Successful response when error expected.'));
}).catch(err => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `500 Internal Server Error: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
done();
}).catch(err => {
// Handle assertion errors
done(err);
});
}).timeout(longTestTimeout);
it('should callback with error after 3 POST 503 responses', function(done) {
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.times(3)
.reply(503, function(uri, requestBody) {
this.req.response.statusMessage = 'Service Unavailable';
return { error: 'service_unavailable', reason: 'Service unavailable' };
});
db.service.postBulkGet({ db: db.db, revs: true, docs: [] }).then(response => {
done(new Error('Successful response when error expected.'));
}).catch(err => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 Service Unavailable: post ${url}/_bulk_get - Error: service_unavailable, Reason: Service unavailable`);
assert.ok(couch.isDone());
done();
}).catch(err => {
// Handle assertion errors
done(err);
});
}).timeout(longTestTimeout);
it('should callback with error after 3 429 responses', function(done) {
const couch = nock(url)
.get('/bad')
.times(3)
.reply(429, function(uri, requestBody) {
this.req.response.statusMessage = 'Too Many Requests';
return { error: 'foo', reason: 'bar' };
});
db.service.getDocument({ db: db.db, docId: 'bad' }).then(response => {
done(new Error('Successful response when error expected.'));
}).catch(err => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `429 Too Many Requests: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
done();
}).catch(err => {
// Handle assertion errors
done(err);
});
}).timeout(longTestTimeout);
it('should callback with fatal error for 404 response', function(done) {
const couch = nock(url)
.get('/bad')
.reply(404, function(uri, requestBody) {
this.req.response.statusMessage = 'Not Found';
return { error: 'foo', reason: 'bar' };
});
db.service.getDocument({ db: db.db, docId: 'bad' }).then(response => {
done(new Error('Successful response when error expected.'));
}).catch(err => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `404 Not Found: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
done();
}).catch(err => {
// Handle assertion errors
done(err);
});
});
it('should callback with same error for no status code error response', function(done) {
const couch = nock(url)
.get('/bad')
.times(3)
.replyWithError('testing badness');
db.service.getDocument({ db: db.db, docId: 'bad' }).then(response => {
done(new Error('Successful response when error expected.'));
}).catch(err => {
const err2 = error.convertResponseError(err);
assert.strictEqual(err, err2);
assert.ok(couch.isDone());
done();
}).catch(err => {
// Handle assertion errors
done(err);
});
}).timeout(longTestTimeout);
it('should retry request if HTTP request gets timed out', function(done) {
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.delay(1000)
.reply(200, { results: { docs: [{ id: '1', ok: { _id: '1' } }] } })
.post('/_bulk_get')
.query(true)
.reply(200, { results: { docs: [{ id: '1', ok: { _id: '1' } }, { id: '2', ok: { _id: '2' } }] } });
timeoutDb.service.postBulkGet({ db: db.db, revs: true, docs: [] }).then(response => {
assert.ok(response);
assert.ok(response.result);
assert.ok(response.result.results);
assert.ok(response.result.results.docs);
assert.strictEqual(response.result.results.docs.length, 2);
assert.ok(couch.isDone());
done();
}).catch(err => {
done(err);
});
});
it('should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out', function(done) {
// Increase the timeout for this test to allow for the delays
this.timeout(3000);
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.delay(1000)
.times(3)
.reply(200, { ok: true });
timeoutDb.service.postBulkGet({ db: db.db, revs: true, docs: [] }).then(response => {
done(new Error('Successful response when error expected.'));
}).catch(err => {
err = error.convertResponseError(err);
// Note axios returns ECONNABORTED rather than ESOCKETTIMEDOUT
// See https://github.com/axios/axios/issues/2710 via https://github.com/axios/axios/issues/1543`
assert.strictEqual(err.statusText, 'ECONNABORTED');
assert.strictEqual(err.message, `timeout of 500ms exceeded: post ${url}/_bulk_get ECONNABORTED`);
assert.ok(couch.isDone());
done();
}).catch(err => {
// Handle assertion errors
done(err);
});
});
describe('#unit Check credentials', function() {
it('should properly decode username and password', function(done) {
const username = 'user%123';
const password = 'colon:at@321';
const url = `http://${encodeURIComponent(username)}:${encodeURIComponent(password)}@localhost:7777/testdb`;
const sessionUrl = 'http://localhost:7777';
const couch = nock(sessionUrl)
.post('/_session', { username: username, password: password })
.reply(200, { ok: true }, { 'Set-Cookie': 'AuthSession=ABC123DEF4356;' })
.get('/')
.reply(200);
const db = request.client(url, { parallelism: 1 });
db.service.getServerInformation().then(response => {
assert.ok(response);
assert.ok(couch.isDone());
done();
}).catch(err => {
done(err);
});
});
});
});
getChanges();
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -14,106 +14,24 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const path = require('path');
const tmp = require('tmp');
delete require.cache[require.resolve('./citestutils.js')];
const u = require('./citestutils.js');
/**
Return API default settings.
*/
function apiDefaults() {
return {
parallelism: 5,
bufferSize: 500,
requestTimeout: 120000,
log: tmp.tmpNameSync(),
resume: false,
mode: 'full'
};
}
/**
Return CLI default settings.
*/
function cliDefaults() {
const defaults = apiDefaults();
// add additional legacy settings
defaults.db = 'test';
defaults.url = 'http://localhost:5984';
// add CLI only option
defaults.quiet = false;
return defaults;
}
/**
Override settings **in-place** with environment variables.
*/
function applyEnvironmentVariables(opts) {
// if we have a custom CouchDB url
if (typeof process.env.COUCH_URL !== 'undefined') {
opts.url = process.env.COUCH_URL;
}
// if we have a specified databases
if (typeof process.env.COUCH_DATABASE !== 'undefined') {
opts.db = process.env.COUCH_DATABASE;
}
// if we have a specified buffer size
if (typeof process.env.COUCH_BUFFER_SIZE !== 'undefined') {
opts.bufferSize = parseInt(process.env.COUCH_BUFFER_SIZE);
}
// if we have a specified parallelism
if (typeof process.env.COUCH_PARALLELISM !== 'undefined') {
opts.parallelism = parseInt(process.env.COUCH_PARALLELISM);
}
// if we have a specified request timeout
if (typeof process.env.COUCH_REQUEST_TIMEOUT !== 'undefined') {
opts.requestTimeout = parseInt(process.env.COUCH_REQUEST_TIMEOUT);
}
// if we have a specified log file
if (typeof process.env.COUCH_LOG !== 'undefined') {
opts.log = path.normalize(process.env.COUCH_LOG);
}
// if we are instructed to resume
if (typeof process.env.COUCH_RESUME !== 'undefined' && process.env.COUCH_RESUME === 'true') {
opts.resume = true;
}
// if we are given an output filename
if (typeof process.env.COUCH_OUTPUT !== 'undefined') {
opts.output = path.normalize(process.env.COUCH_OUTPUT);
}
// if we only want a shallow copy
if (typeof process.env.COUCH_MODE !== 'undefined' && process.env.COUCH_MODE === 'shallow') {
opts.mode = 'shallow';
}
// if we are instructed to be quiet
if (typeof process.env.COUCH_QUIET !== 'undefined' && process.env.COUCH_QUIET === 'true') {
opts.quiet = true;
}
// if we have a specified API key
if (typeof process.env.CLOUDANT_IAM_API_KEY !== 'undefined') {
opts.iamApiKey = process.env.CLOUDANT_IAM_API_KEY;
}
// if we have a specified IAM token endpoint
if (typeof process.env.CLOUDANT_IAM_TOKEN_URL !== 'undefined') {
opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
module.exports = {
apiDefaults: apiDefaults,
cliDefaults: cliDefaults,
applyEnvironmentVariables: applyEnvironmentVariables
};
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('End to end backup and restore', params), function() {
it('should backup and restore animaldb', function(done) {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
u.testDirectBackupAndRestore(params, 'animaldb', this.dbName, done);
});
it('should backup and restore largedb1g #slow', function(done) {
// Allow up to 30 m for backup and restore of largedb1g
// This is a long time but when many builds run in parallel it can take a
// while to get this done.
u.setTimeout(this, 30 * 60);
u.testDirectBackupAndRestore(params, 'largedb1g', this.dbName, done);
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -14,613 +14,106 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global */
'use strict';
const assert = require('assert');
const spawn = require('child_process').spawn;
const app = require('../app.js');
const dbUrl = require('../includes/cliutils.js').databaseUrl;
const stream = require('stream');
const fs = require('fs');
const zlib = require('zlib');
const Tail = require('tail').Tail;
const compare = require('./compare.js');
const request = require('../includes/request.js');
const path = require('path');
const tmp = require('tmp');
function scenario(test, params) {
return `${test} ${(params.useApi) ? 'using API' : 'using CLI'}`;
/**
Return API default settings.
*/
function apiDefaults() {
return {
parallelism: 5,
bufferSize: 500,
requestTimeout: 120000,
log: tmp.tmpNameSync(),
resume: false,
mode: 'full'
};
}
function params() {
const p = {};
for (let i = 0; i < arguments.length; i++) {
Object.assign(p, arguments[i]);
}
return p;
}
/**
Return CLI default settings.
*/
function cliDefaults() {
const defaults = apiDefaults();
// Returns the event emitter for API calls, or the child process for CLI calls
function testBackup(params, databaseName, outputStream, callback) {
let gzip;
let openssl;
let backup;
let backupStream = outputStream;
// add additional legacy settings
defaults.db = 'test';
defaults.url = 'http://localhost:5984';
// Configure API key if needed
augmentParamsWithApiKey(params);
// add CLI only option
defaults.quiet = false;
// Pipe via compression if requested
if (params.compression) {
if (params.useApi) {
// If use API use the Node zlib stream
const zlib = require('zlib');
backupStream = zlib.createGzip();
backupStream.pipe(outputStream);
} else {
// Spawn process for gzip
gzip = spawn('gzip', [], { stdio: ['pipe', 'pipe', 'inherit'] });
// Pipe the streams as needed
gzip.stdout.pipe(outputStream);
backupStream = gzip.stdin;
// register an error handler
gzip.on('error', function(err) {
callback(err);
});
}
}
return defaults;
}
// Pipe via encryption if requested
if (params.encryption) {
if (params.useApi) {
// Currently only CLI support for testing encryption
callback(new Error('Not implemented: cannot test encrypted API backups at this time.'));
} else {
// Spawn process for openssl
openssl = spawn('openssl', ['aes-128-cbc', '-pass', 'pass:12345'], { stdio: ['pipe', 'pipe', 'inherit'] });
// Pipe the streams as needed
openssl.stdout.pipe(outputStream);
backupStream = openssl.stdin;
// register an error handler
openssl.on('error', function(err) {
callback(err);
});
}
/**
Override settings **in-place** with environment variables.
*/
function applyEnvironmentVariables(opts) {
// if we have a custom CouchDB url
if (typeof process.env.COUCH_URL !== 'undefined') {
opts.url = process.env.COUCH_URL;
}
let tail;
if (params.abort) {
// Create the log file for abort tests so we can tail it, other tests assert
// the log file is usually created normally by the backup process.
const f = fs.openSync(params.opts.log, 'w');
fs.closeSync(f);
// Use tail to watch the log file for a batch to be completed then abort
tail = new Tail(params.opts.log, { useWatchFile: true, fsWatchOptions: { interval: 500 }, follow: false });
tail.on('line', function(data) {
const matches = data.match(/:d batch\d+/);
if (matches !== null) {
// Turn off the tail.
tail.unwatch();
// Abort the backup
backupAbort(params.useApi, backup);
}
});
tail.on('error', function(err) {
callback(err);
});
// if we have a specified databases
if (typeof process.env.COUCH_DATABASE !== 'undefined') {
opts.db = process.env.COUCH_DATABASE;
}
if (params.useApi) {
backup = app.backup(dbUrl(process.env.COUCH_URL, databaseName), backupStream, params.opts, function(err, data) {
if (err) {
if (params.expectedBackupError) {
try {
assert.strictEqual(err.name, params.expectedBackupError.name, 'The backup should receive the expected error.');
// Got the expected error, so wipe it for the callback
err = null;
} catch (caught) {
// Update the error with the assertion failure
err = caught;
}
}
} else {
console.log(data);
}
callback(err);
});
if (backup) {
backup.on('error', function(err) {
console.error(`Caught non-fatal error: ${err}`);
});
}
} else {
// Default to pipe, but will use 'inherit' if using --output (see params.opts.output)
let destination = 'pipe';
// Set up default args
const args = ['./bin/couchbackup.bin.js', '--db', databaseName];
if (params.opts) {
if (params.opts.mode) {
args.push('--mode');
args.push(params.opts.mode);
}
if (params.opts.output) {
args.push('--output');
args.push(params.opts.output);
destination = 'inherit';
}
if (params.opts.log) {
args.push('--log');
args.push(params.opts.log);
}
if (params.opts.resume) {
args.push('--resume');
args.push(params.opts.resume);
}
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
let count = 0;
/**
* In some tests we need to wait for both the backup process
* and the outputStream to "close". If we callback from either
* event the other might not be ready and lead to flaky tests.
*
* This function delegates to the callback but only after the
* correct number of invocations. That is 2 when we have an
* output stream or 1 otherwise, and only once in the case of
* an error.
*/
function gatingCallback(err) {
count += 1;
if (err) {
if (count === 1) {
callback(err);
}
} else {
// Output stream case we want a callback from process
// and the stream.
if (outputStream && count === 2) {
callback();
} else if (!outputStream && count === 1) {
callback();
}
}
}
// Note use spawn not fork for stdio options not supported with fork in Node 4.x
backup = spawn('node', args, { stdio: ['ignore', destination, 'pipe'] })
.on('error', function(err) {
gatingCallback(err);
})
.on('close', function(code, signal) {
console.log(`Backup process close ${code} ${signal}`);
try {
if (params.abort) {
// The tail should be stopped when we match a line and abort, but if
// something didn't work we need to make sure the tail is stopped
tail.unwatch();
// Assert that the process was aborted as expected
assert.strictEqual(signal, 'SIGTERM', `The backup should have terminated with SIGTERM, but was ${signal}.`);
} else if (params.expectedBackupError) {
assert.strictEqual(code, params.expectedBackupError.code, `The backup exited with unexpected code ${code}.`);
} else {
assert.strictEqual(code, 0, `The backup should exit normally, got exit code ${code} and signal ${signal}.`);
}
gatingCallback();
} catch (err) {
gatingCallback(err);
}
});
// Pipe the stdout to the supplied outputStream
if (destination === 'pipe') {
backup.stdout.pipe(backupStream);
}
// Forward the spawned process stderr (we don't use inherit because we want
// to access this stream directly as well)
backup.stderr.on('data', function(data) {
console.error(`${data}`);
});
// Check for errors on the spawned processes
if (gzip) {
gzip.on('close', function(code) {
try {
assert.strictEqual(code, 0, `The compression should exit normally, got exit code ${code}.`);
} catch (err) {
gatingCallback(err);
}
});
}
if (openssl) {
openssl.on('close', function(code) {
try {
assert.strictEqual(code, 0, `The encryption should exit normally, got exit code ${code}.`);
} catch (err) {
gatingCallback(err);
}
});
}
if (outputStream) {
// Callback when the destination stream closes.
outputStream.on('close', gatingCallback);
} else if (!params.opts.output) {
gatingCallback(new Error('Unexpected test without outputStream or output option.'));
}
// if we have a specified buffer size
if (typeof process.env.COUCH_BUFFER_SIZE !== 'undefined') {
opts.bufferSize = parseInt(process.env.COUCH_BUFFER_SIZE);
}
return backup;
}
function backupAbort(usingApi, backup) {
setImmediate(function() {
if (usingApi) {
// Currently no way to abort an API backup
console.error('UNSUPPORTED: cannot abort API backups at this time.');
} else {
backup.kill();
}
});
}
function testRestore(params, inputStream, databaseName, callback) {
let restoreStream = inputStream;
// Configure API key if needed
augmentParamsWithApiKey(params);
// Pipe via decompression if requested
if (params.compression) {
if (params.useApi) {
// If use API use the Node zlib stream
restoreStream = zlib.createGunzip();
inputStream.pipe(restoreStream);
} else {
// Spawn process for gunzip
const gunzip = spawn('gunzip', [], { stdio: ['pipe', 'pipe', 'inherit'] });
// Pipe the streams as needed
inputStream.pipe(gunzip.stdin);
restoreStream = gunzip.stdout;
}
// if we have a specified parallelism
if (typeof process.env.COUCH_PARALLELISM !== 'undefined') {
opts.parallelism = parseInt(process.env.COUCH_PARALLELISM);
}
// Pipe via decryption if requested
if (params.encryption) {
if (params.useApi) {
callback(new Error('Not implemented: cannot test encrypted API backups at this time.'));
} else {
// Spawn process for openssl
const dopenssl = spawn('openssl', ['aes-128-cbc', '-d', '-pass', 'pass:12345'], { stdio: ['pipe', 'pipe', 'inherit'] });
// Pipe the streams as needed
inputStream.pipe(dopenssl.stdin);
restoreStream = dopenssl.stdout;
}
// if we have a specified request timeout
if (typeof process.env.COUCH_REQUEST_TIMEOUT !== 'undefined') {
opts.requestTimeout = parseInt(process.env.COUCH_REQUEST_TIMEOUT);
}
if (params.useApi) {
app.restore(restoreStream, dbUrl(process.env.COUCH_URL, databaseName), params.opts, function(err, data) {
if (err) {
if (params.expectedRestoreError) {
try {
assert.strictEqual(err.name, params.expectedRestoreError.name, 'The restore should receive the expected error.');
err = null;
} catch (caught) {
err = caught;
}
}
} else {
console.log(data);
}
callback(err);
}).on('error', function(err) {
console.error(`Caught non-fatal error: ${err}`);
});
} else {
// Set up default args
const args = ['./bin/couchrestore.bin.js', '--db', databaseName];
if (params.opts) {
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.parallelism) {
args.push('--parallelism');
args.push(params.opts.parallelism);
}
if (params.opts.requestTimeout) {
args.push('--request-timeout');
args.push(params.opts.requestTimeout);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
// Note use spawn not fork for stdio options not supported with fork in Node 4.x
const restore = spawn('node', args, { stdio: ['pipe', 'inherit', 'inherit'] });
// Pipe to write the readable inputStream into stdin
restoreStream.pipe(restore.stdin);
restore.stdin.on('error', function(err) {
// Suppress errors that might arise from piping of input streams
// from the test process to the child process (this appears to be handled
// gracefully in the shell)
console.error(`Test stream error code ${err.code}`);
});
restore.on('close', function(code) {
try {
if (params.expectedRestoreError) {
assert.strictEqual(code, params.expectedRestoreError.code, `The backup exited with unexpected code ${code}.`);
} else {
assert.strictEqual(code, 0, `The restore should exit normally, got exit code ${code}`);
}
callback();
} catch (err) {
callback(err);
}
});
restore.on('error', function(err) {
callback(err);
});
// if we have a specified log file
if (typeof process.env.COUCH_LOG !== 'undefined') {
opts.log = path.normalize(process.env.COUCH_LOG);
}
}
// Serial backup and restore via a file on disk
function testBackupAndRestoreViaFile(params, srcDb, backupFile, targetDb, callback) {
testBackupToFile(params, srcDb, backupFile, function(err) {
if (err) {
callback(err);
} else {
testRestoreFromFile(params, backupFile, targetDb, function(err) {
if (!err) {
dbCompare(srcDb, targetDb, callback);
} else {
callback(err);
}
});
}
});
}
function testBackupToFile(params, srcDb, backupFile, callback, processCallback) {
// Open the file for appending if this is a resume
const output = fs.createWriteStream(backupFile, { flags: (params.opts && params.opts.resume) ? 'a' : 'w' });
output.on('open', function() {
const backupProcess = testBackup(params, srcDb, output, function(err) {
if (err) {
callback(err);
} else {
callback();
}
});
if (processCallback) {
processCallback(backupProcess);
}
});
}
function testRestoreFromFile(params, backupFile, targetDb, callback) {
const input = fs.createReadStream(backupFile);
input.on('open', function() {
testRestore(params, input, targetDb, function(err) {
if (err) {
callback(err);
} else {
callback();
}
});
});
}
function testDirectBackupAndRestore(params, srcDb, targetDb, callback) {
// Allow a 64 MB highWaterMark for the passthrough during testing
const passthrough = new stream.PassThrough({ highWaterMark: 67108864 });
testBackupAndRestore(params, srcDb, passthrough, passthrough, targetDb, callback);
}
function testBackupAndRestore(params, srcDb, backupStream, restoreStream, targetDb, callback) {
testBackup(params, srcDb, backupStream, function(err) {
if (err) {
callback(err);
}
});
testRestore(params, restoreStream, targetDb, function(err) {
if (err) {
callback(err);
} else {
dbCompare(srcDb, targetDb, callback);
}
});
}
function assertResumedBackup(params, resumedBackup, restoreCallback) {
// Validate that the resume backup didn't need to write all the docs
if (params.useApi) {
resumedBackup.once('finished', function(summary) {
assertWrittenFewerThan(summary.total, params.exclusiveMaxExpected, restoreCallback);
});
} else {
// For the CLI case we need to see the output because we don't have
// the finished event.
const listener = function(data) {
const matches = data.toString().match(/.*Finished - Total document revisions written: (\d+).*/);
if (matches !== null) {
assertWrittenFewerThan(matches[1], params.exclusiveMaxExpected, restoreCallback);
resumedBackup.stderr.removeListener('data', listener);
}
};
resumedBackup.stderr.on('data', listener);
// if we are instructed to resume
if (typeof process.env.COUCH_RESUME !== 'undefined' && process.env.COUCH_RESUME === 'true') {
opts.resume = true;
}
}
function testBackupAbortResumeRestore(params, srcDb, backupFile, targetDb, callback) {
const restore = function(err) {
if (err) {
callback(err);
} else {
testRestoreFromFile(params, backupFile, targetDb, function(err) {
if (err) {
callback(err);
} else {
dbCompare(srcDb, targetDb, callback);
}
});
}
};
const resume = function(err) {
if (err) {
callback(err);
}
// Remove the abort parameter and add the resume parameter
delete params.abort;
params.opts.resume = true;
// Resume backup and restore to validate it was successful.
if (params.opts && params.opts.output) {
const resumedBackup = testBackup(params, srcDb, null, function(err) {
if (err) {
callback(err);
}
});
assertResumedBackup(params, resumedBackup, restore);
} else {
testBackupToFile(params, srcDb, backupFile, function(err) {
if (err) {
callback(err);
}
},
function(backupProcess) {
assertResumedBackup(params, backupProcess, restore);
});
}
};
if (params.opts && params.opts.output) {
testBackup(params, srcDb, null, resume);
} else {
testBackupToFile(params, srcDb, backupFile, resume);
// if we are given an output filename
if (typeof process.env.COUCH_OUTPUT !== 'undefined') {
opts.output = path.normalize(process.env.COUCH_OUTPUT);
}
}
function dbCompare(db1Name, db2Name, callback) {
const client = request.client(process.env.COUCH_BACKEND_URL, {});
compare.compare(db1Name, db2Name, client.service)
.then(result => {
try {
assert.strictEqual(result, true, 'The database comparison should succeed, but failed');
callback();
} catch (err) {
callback(err);
}
})
.catch(err => callback(err));
}
function sortByIdThenRev(o1, o2) {
if (o1._id < o2._id) return -1;
if (o1._id > o2._id) return 1;
if (o1._rev < o2._rev) return -1;
if (o1._rev > o2._rev) return 1;
return 0;
}
function readSortAndDeepEqual(actualContentPath, expectedContentPath, callback) {
const backupContent = JSON.parse(fs.readFileSync(actualContentPath, 'utf8'));
const expectedContent = JSON.parse(fs.readFileSync(expectedContentPath, 'utf8'));
// Array order of the docs is important for equality, but not for backup
backupContent.sort(sortByIdThenRev);
expectedContent.sort(sortByIdThenRev);
// Assert that the backup matches the expected
try {
assert.deepStrictEqual(backupContent, expectedContent);
callback();
} catch (err) {
callback(err);
// if we only want a shallow copy
if (typeof process.env.COUCH_MODE !== 'undefined' && process.env.COUCH_MODE === 'shallow') {
opts.mode = 'shallow';
}
}
function setTimeout(context, timeout) {
// Increase timeout using TEST_TIMEOUT_MULTIPLIER
const multiplier = (typeof process.env.TEST_TIMEOUT_MULTIPLIER !== 'undefined') ? parseInt(process.env.TEST_TIMEOUT_MULTIPLIER) : 1;
timeout *= multiplier;
// Set the mocha timeout
context.timeout(timeout * 1000);
}
function assertGzipFile(path, callback) {
try {
// 1f 8b is the gzip magic number
const expectedBytes = Buffer.from([0x1f, 0x8b]);
const buffer = Buffer.alloc(2);
const fd = fs.openSync(path, 'r');
// Read the first two bytes
fs.readSync(fd, buffer, 0, 2, 0);
fs.closeSync(fd);
// Assert the magic number corresponds to gz extension
assert.deepStrictEqual(buffer, expectedBytes, 'The backup file should be gz compressed.');
callback();
} catch (err) {
callback(err);
// if we are instructed to be quiet
if (typeof process.env.COUCH_QUIET !== 'undefined' && process.env.COUCH_QUIET === 'true') {
opts.quiet = true;
}
}
function assertEncryptedFile(path, callback) {
try {
// Openssl encrypted files start with Salted
const expectedBytes = Buffer.from('Salted');
const buffer = Buffer.alloc(6);
const fd = fs.openSync(path, 'r');
// Read the first six bytes
fs.readSync(fd, buffer, 0, 6, 0);
fs.closeSync(fd);
// Assert first 6 characters of the file are "Salted"
assert.deepStrictEqual(buffer, expectedBytes, 'The backup file should be encrypted.');
callback();
} catch (err) {
callback(err);
// if we have a specified API key
if (typeof process.env.CLOUDANT_IAM_API_KEY !== 'undefined') {
opts.iamApiKey = process.env.CLOUDANT_IAM_API_KEY;
}
}
function assertWrittenFewerThan(total, number, callback) {
try {
assert(total < number && total > 0, `Saw ${total} but expected between 1 and ${number - 1} documents for the resumed backup.`);
callback();
} catch (err) {
callback(err);
// if we have a specified IAM token endpoint
if (typeof process.env.CLOUDANT_IAM_TOKEN_URL !== 'undefined') {
opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
function augmentParamsWithApiKey(params) {
if (process.env.COUCHBACKUP_TEST_IAM_API_KEY) {
if (!params.opts) {
params.opts = {};
}
params.opts.iamApiKey = process.env.COUCHBACKUP_TEST_IAM_API_KEY;
params.opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
module.exports = {
scenario,
p: params,
setTimeout,
dbCompare,
readSortAndDeepEqual,
assertGzipFile,
assertEncryptedFile,
testBackup,
testRestore,
testDirectBackupAndRestore,
testBackupToFile,
testRestoreFromFile,
testBackupAndRestoreViaFile,
testBackupAbortResumeRestore
apiDefaults: apiDefaults,
cliDefaults: cliDefaults,
applyEnvironmentVariables: applyEnvironmentVariables
};

@@ -14,51 +14,80 @@ // Copyright © 2017 IBM Corp. All rights reserved.

// limitations under the License.
/* global describe it */
'use strict';
const fs = require('fs');
const u = require('./citestutils.js');
const stream = require('stream');
const liner = require('./liner.js');
describe('Event tests', function() {
it('should get a finished event when using stdout', function(done) {
u.setTimeout(this, 40);
// Use the API so we can get events
const params = { useApi: true };
const backup = u.testBackup(params, 'animaldb', process.stdout, function(err) {
if (err) {
done(err);
const onLine = function(onCommand, getDocs) {
const change = new stream.Transform({ objectMode: true });
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
let matches;
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
});
backup.on('finished', function() {
try {
// Test will time out if the finished event is not emitted
done();
} catch (err) {
done(err);
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
// extract doc ids
if (getDocs && obj.command === 't') {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
}
onCommand(obj);
}
done();
};
return change;
};
/**
* Generate a list of remaining batches from a download file.
*
* @param {string} log - log file name
* @param {function} callback - callback with err, {changesComplete: N, batches: N}.
* changesComplete signifies whether the log file appeared to
* have completed reading the changes feed (contains :changes_complete).
* batches are remaining batch IDs for download.
*/
module.exports = function(log, callback) {
// our sense of state
const state = {
};
let changesComplete = false;
// called with each line from the log file
const onCommand = function(obj) {
if (obj.command === 't') {
state[obj.batch] = true;
} else if (obj.command === 'd') {
delete state[obj.batch];
} else if (obj.command === 'changes_complete') {
changesComplete = true;
}
};
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, false))
.on('finish', function() {
const obj = { changesComplete: changesComplete, batches: state };
callback(null, obj);
});
});
it('should get a finished event when using file output', function(done) {
u.setTimeout(this, 40);
// Use the API so we can get events
const params = { useApi: true };
const actualBackup = `./${this.fileName}`;
// Create a file and backup to it
const output = fs.createWriteStream(actualBackup);
output.on('open', function() {
const backup = u.testBackup(params, 'animaldb', output, function(err) {
if (err) {
done(err);
}
});
backup.on('finished', function() {
try {
// Test will time out if the finished event is not emitted
done();
} catch (err) {
done(err);
}
});
});
});
});
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2018 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -14,213 +14,34 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const assert = require('assert');
const backup = require('../app.js').backup;
const fs = require('fs');
const nock = require('nock');
// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
const goodUrl = 'http://localhost:5984/db';
// The real validateArgs function of app.js isn't
// exported - so we call the exported backup method
// instead. We don't get as far as a real backup when
// testing error cases. For success cases we nock the
// goodUrl and
const validateArgs = function(url, opts, callback) {
const nullStream = fs.createWriteStream('/dev/null');
let cb = callback;
if (url === goodUrl) {
// Nock the goodUrl
nock(goodUrl).head('').reply(404, { error: 'not_found', reason: 'missing' });
// replace the callback to handle the nock response
// to avoid attempting a real backup
cb = function(err) {
nullStream.end();
if (err.name === 'DatabaseNotFound') {
// This is what we expect if we reached the backup
// This is success for valid args cases.
err = null;
}
callback(err);
};
}
backup(url, nullStream, opts, cb);
return true;
};
module.exports = function() {
const liner = new stream.Transform({ objectMode: true });
const stderrWriteFun = process.stderr.write;
let capturedStderr;
liner._transform = function(chunk, encoding, done) {
let data = chunk.toString();
if (this._lastLineData) {
data = this._lastLineData + data;
}
function captureStderr() {
process.stderr.write = function(string, encoding, fd) {
capturedStderr += string;
};
}
const lines = data.split('\n');
this._lastLineData = lines.splice(lines.length - 1, 1)[0];
function releaseStderr() {
process.stderr.write = stderrWriteFun;
capturedStderr = null;
}
function assertErrorMessage(msg, done) {
return function(err, data) {
try {
assert(err.message, 'There should be an error message');
assert(err.message.indexOf(msg) >= 0);
assert(data === null || data === undefined, 'There should only be an error.');
done();
} catch (e) {
done(e);
for (const i in lines) {
this.push(lines[i]);
}
done();
};
}
function assertNoError(done) {
return function(err, data) {
try {
assert(err === null, 'There should be no error message.');
done();
} catch (e) {
done(e);
liner._flush = function(done) {
if (this._lastLineData) {
this.push(this._lastLineData);
}
this._lastLineData = null;
done();
};
}
describe('#unit Validate arguments', function() {
it('returns error for invalid URL type', function(done) {
validateArgs(true, {}, assertErrorMessage('Invalid URL, must be type string', done));
});
it('returns no error for valid URL type', function(done) {
validateArgs(goodUrl, {}, assertNoError(done));
});
it('returns error for invalid (no host) URL', function(done) {
validateArgs('http://', {}, assertErrorMessage('Invalid URL', done));
});
it('returns error for invalid (no protocol) URL', function(done) {
validateArgs('invalid', {}, assertErrorMessage('Invalid URL', done));
});
it('returns error for invalid (wrong protocol) URL', function(done) {
validateArgs('ftp://invalid.example.com', {}, assertErrorMessage('Invalid URL protocol.', done));
});
it('returns error for invalid (no path) URL', function(done) {
validateArgs('https://invalid.example.com', {}, assertErrorMessage('Invalid URL, missing path element (no database).', done));
});
it('returns error for invalid (no protocol, no host) URL', function(done) {
validateArgs('invalid', {}, assertErrorMessage('Invalid URL', done));
});
it('returns error for invalid buffer size type', function(done) {
validateArgs(goodUrl, { bufferSize: '123' }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns error for zero buffer size', function(done) {
validateArgs(goodUrl, { bufferSize: 0 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns error for float buffer size', function(done) {
validateArgs(goodUrl, { bufferSize: 1.23 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns no error for valid buffer size type', function(done) {
validateArgs(goodUrl, { bufferSize: 123 }, assertNoError(done));
});
it('returns error for invalid log type', function(done) {
validateArgs(goodUrl, { log: true }, assertErrorMessage('Invalid log option, must be type string', done));
});
it('returns no error for valid log type', function(done) {
validateArgs(goodUrl, { log: 'log.txt' }, assertNoError(done));
});
it('returns error for invalid mode type', function(done) {
validateArgs(goodUrl, { mode: true }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"', done));
});
it('returns error for invalid mode string', function(done) {
validateArgs(goodUrl, { mode: 'foobar' }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"', done));
});
it('returns no error for valid mode type', function(done) {
validateArgs(goodUrl, { mode: 'full' }, assertNoError(done));
});
it('returns error for invalid output type', function(done) {
validateArgs(goodUrl, { output: true }, assertErrorMessage('Invalid output option, must be type string', done));
});
it('returns no error for valid output type', function(done) {
validateArgs(goodUrl, { output: 'output.txt' }, assertNoError(done));
});
it('returns error for invalid parallelism type', function(done) {
validateArgs(goodUrl, { parallelism: '123' }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns error for zero parallelism', function(done) {
validateArgs(goodUrl, { parallelism: 0 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns error for float parallelism', function(done) {
validateArgs(goodUrl, { parallelism: 1.23 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns no error for valid parallelism type', function(done) {
validateArgs(goodUrl, { parallelism: 123 }, assertNoError(done));
});
it('returns error for invalid request timeout type', function(done) {
validateArgs(goodUrl, { requestTimeout: '123' }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns error for zero request timeout', function(done) {
validateArgs(goodUrl, { requestTimeout: 0 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns error for float request timout', function(done) {
validateArgs(goodUrl, { requestTimeout: 1.23 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]', done));
});
it('returns no error for valid request timeout type', function(done) {
validateArgs(goodUrl, { requestTimeout: 123 }, assertNoError(done));
});
it('returns error for invalid resume type', function(done) {
validateArgs(goodUrl, { resume: 'true' }, assertErrorMessage('Invalid resume option, must be type boolean', done));
});
it('returns no error for valid resume type', function(done) {
validateArgs(goodUrl, { resume: false }, assertNoError(done));
});
it('returns error for invalid key type', function(done) {
validateArgs(goodUrl, { iamApiKey: true }, assertErrorMessage('Invalid iamApiKey option, must be type string', done));
});
it('returns error for key and URL credentials supplied', function(done) {
validateArgs('https://a:b@example.com/db', { iamApiKey: 'abc123' }, assertErrorMessage('URL user information must not be supplied when using IAM API key.', done));
});
it('warns for log arg in shallow mode', function(done) {
captureStderr();
try {
validateArgs(goodUrl, { mode: 'shallow', log: 'test' }, function(err, data) {
assert.ok(err);
assert.ok(!data);
assert(capturedStderr.indexOf('The options "log" and "resume" are invalid when using shallow mode.') > -1, 'Log warning message was not present');
});
done();
} catch (e) {
done(e);
} finally {
releaseStderr();
}
});
it('warns for resume arg in shallow mode', function(done) {
captureStderr();
try {
validateArgs(goodUrl, { mode: 'shallow', log: 'test', resume: true }, function(err, data) {
assert.ok(err);
assert.ok(!data);
assert(capturedStderr.indexOf('The options "log" and "resume" are invalid when using shallow mode.') > -1, 'Log warning message was not present');
});
done();
} catch (e) {
done(e);
} finally {
releaseStderr();
}
});
it('warns for parallism arg in shallow mode', function(done) {
captureStderr();
try {
validateArgs(goodUrl, { mode: 'shallow', parallelsim: 10 }, function(err, data) {
assert.ok(err);
assert.ok(!data);
assert(capturedStderr.indexOf('The option "parallelism" has no effect when using shallow mode.') > -1, 'Log warning message was not present');
});
done();
} catch (e) {
done(e);
} finally {
releaseStderr();
}
});
});
return liner;
};

@@ -14,354 +14,166 @@ // Copyright © 2017, 2021 IBM Corp. All rights reserved.

// limitations under the License.
/* global describe afterEach before after it */
'use strict';
const assert = require('assert');
const parser = require('../includes/parser.js');
const pkg = require('../package.json');
const stream = require('stream');
const { CloudantV1, CouchdbSessionAuthenticator } = require('@ibm-cloud/cloudant');
const { IamAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const retryPlugin = require('retry-axios');
describe('#unit Default parameters', function() {
let processEnvCopy;
let processArgvCopy;
const userAgent = 'couchbackup-cloudant/' + pkg.version + ' (Node.js ' +
process.version + ')';
before('Set process data for test', function() {
// Copy env and argv so we can reset them after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
processArgvCopy = JSON.parse(JSON.stringify(process.argv));
// Class for streaming _changes error responses into
// In general the response is a small error/reason JSON object
// so it is OK to have this in memory.
class ResponseWriteable extends stream.Writable {
constructor(options) {
super(options);
this.data = [];
}
// setup environment variables
process.env.COUCH_URL = 'http://user:pass@myurl.com';
process.env.COUCH_DATABASE = 'mydb';
process.env.COUCH_BUFFER_SIZE = '1000';
process.env.COUCH_PARALLELISM = '20';
process.env.COUCH_REQUEST_TIMEOUT = '20000';
process.env.COUCH_LOG = 'my.log';
process.env.COUCH_RESUME = 'true';
process.env.COUCH_OUTPUT = 'myfile.txt';
process.env.COUCH_MODE = 'shallow';
process.env.CLOUDANT_IAM_API_KEY = 'ABC123-ZYX987_cba789-xyz321';
process.env.COUCH_QUIET = 'true';
});
_write(chunk, encoding, callback) {
this.data.push(chunk);
callback();
}
after('Reset process data', function() {
process.env = processEnvCopy;
process.argv = processArgvCopy;
});
stringBody() {
return Buffer.concat(this.data).toString();
}
}
afterEach(function() {
delete require.cache[require.resolve('commander')];
});
// An interceptor function to help augment error bodies with a little
// extra information so we can continue to use consistent messaging
// after the ugprade to @ibm-cloud/cloudant
const errorHelper = async function(err) {
let method;
let requestUrl;
if (err.response) {
if (err.response.config.url) {
requestUrl = err.response.config.url;
method = err.response.config.method;
}
// Override the status text with an improved message
let errorMsg = `${err.response.status} ${err.response.statusText || ''}: ` +
`${method} ${requestUrl}`;
if (err.response.data) {
// Check if we have a JSON response and try to get the error/reason
if (err.response.headers['content-type'] === 'application/json') {
if (!err.response.data.error && err.response.data.pipe) {
// If we didn't find a JSON object with `error` then we might have a stream response.
// Detect the stream by the presence of `pipe` and use it to get the body and parse
// the error information.
const p = new Promise((resolve, reject) => {
const errorBody = new ResponseWriteable();
err.response.data.pipe(errorBody)
.on('finish', () => { resolve(JSON.parse(errorBody.stringBody())); })
.on('error', () => { reject(err); });
});
// Replace the stream on the response with the parsed object
err.response.data = await p;
}
// Append the error/reason if available
if (err.response.data.error) {
// Override the status text with our more complete message
errorMsg += ` - Error: ${err.response.data.error}`;
if (err.response.data.reason) {
errorMsg += `, Reason: ${err.response.data.reason}`;
}
}
} else {
errorMsg += err.response.data;
}
// Set a new message for use by the node-sdk-core
// We use the errors array because it gets processed
// ahead of all other service errors.
err.response.data.errors = [{ message: errorMsg }];
}
} else if (err.request) {
if (!err.message.includes(err.config.url)) {
// Augment the message with the URL and method
// but don't do it again if we already have the URL.
err.message = `${err.message}: ${err.config.method} ${err.config.url}`;
}
}
return Promise.reject(err);
};
describe('Backup command-line', function() {
it('respects the COUCH_URL env variable if the --url backup command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, process.env.COUCH_URL);
done();
});
module.exports = {
client: function(rawUrl, opts) {
const url = new URL(rawUrl);
// Split the URL to separate service from database
// Use origin as the "base" to remove auth elements
const actUrl = new URL(url.pathname.substring(0, url.pathname.lastIndexOf('/')), url.origin);
const dbName = url.pathname.substring(url.pathname.lastIndexOf('/') + 1);
let authenticator;
// Default to cookieauth unless an IAM key is provided
if (opts.iamApiKey) {
const iamAuthOpts = { apikey: opts.iamApiKey };
if (opts.iamTokenUrl) {
iamAuthOpts.url = opts.iamTokenUrl;
}
authenticator = new IamAuthenticator(iamAuthOpts);
} else if (url.username) {
authenticator = new CouchdbSessionAuthenticator({
username: decodeURIComponent(url.username),
password: decodeURIComponent(url.password)
});
} else {
authenticator = new NoAuthAuthenticator();
}
const serviceOpts = {
authenticator: authenticator,
timeout: opts.requestTimeout,
// Axios performance options
maxContentLength: -1
};
it('respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, process.env.COUCH_DATABASE);
done();
});
const service = new CloudantV1(serviceOpts);
// Configure retries
const maxRetries = 2; // for 3 total attempts
service.getHttpClient().defaults.raxConfig = {
// retries for status codes
retry: maxRetries,
// retries for non-response e.g. ETIMEDOUT
noResponseRetries: maxRetries,
backoffType: 'exponential',
httpMethodsToRetry: ['GET', 'HEAD', 'POST'],
statusCodesToRetry: [
[429, 429],
[500, 599]
],
shouldRetry: err => {
const cfg = retryPlugin.getConfig(err);
// cap at max retries regardless of response/non-response type
if (cfg.currentRetryAttempt >= maxRetries) {
return false;
} else {
return retryPlugin.shouldRetryRequest(err);
}
},
instance: service.getHttpClient()
};
retryPlugin.attach(service.getHttpClient());
it('respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, parseInt(process.env.COUCH_BUFFER_SIZE, 10));
done();
});
service.setServiceUrl(actUrl.toString());
if (authenticator instanceof CouchdbSessionAuthenticator) {
// Awkward workaround for known Couch issue with compression on _session requests
// It is not feasible to disable compression on all requests with the amount of
// data this lib needs to move, so override the property in the tokenManager instance.
authenticator.tokenManager.requestWrapperInstance.compressRequestData = false;
}
if (authenticator.tokenManager && authenticator.tokenManager.requestWrapperInstance) {
authenticator.tokenManager.requestWrapperInstance.axiosInstance.interceptors.response.use(null, errorHelper);
}
// Add error interceptors to put URLs in error messages
service.getHttpClient().interceptors.response.use(null, errorHelper);
it('respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parseInt(process.env.COUCH_PARALLELISM, 10));
done();
});
// Add request interceptor to add user-agent (adding it with custom request headers gets overwritten)
service.getHttpClient().interceptors.request.use(function(requestConfig) {
requestConfig.headers['User-Agent'] = userAgent;
return requestConfig;
}, null);
it('respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, parseInt(process.env.COUCH_REQUEST_TIMEOUT, 10));
done();
});
it('respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, process.env.CLOUDANT_IAM_API_KEY);
done();
});
it('respects the COUCH_LOG env variable if the --log backup command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.log, 'string');
assert.strictEqual(program.log, process.env.COUCH_LOG);
done();
});
it('respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.resume, 'boolean');
assert.strictEqual(program.resume, true);
done();
});
it('respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.output, 'string');
assert.strictEqual(program.output, process.env.COUCH_OUTPUT);
done();
});
it('respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, process.env.COUCH_MODE);
done();
});
it('respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
done();
});
it('respects the backup --url command-line parameter', function(done) {
const url = 'http://user:pass@myurl2.com';
process.argv = ['node', 'test', '--url', url];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, url);
done();
});
it('respects the backup --db command-line parameter', function(done) {
const db = 'mydb2';
process.argv = ['node', 'test', '--db', db];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, db);
done();
});
it('respects the backup --buffer-size command-line parameter', function(done) {
const bufferSize = 500;
process.argv = ['node', 'test', '--buffer-size', bufferSize];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, bufferSize);
done();
});
it('respects the backup --parallelism command-line parameter', function(done) {
const parallelism = 10;
process.argv = ['node', 'test', '--parallelism', parallelism];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parallelism);
done();
});
it('respects the backup --request-timeout command-line parameter', function(done) {
const requestTimeout = 10000;
process.argv = ['node', 'test', '--request-timeout', requestTimeout];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, requestTimeout);
done();
});
it('respects the backup --iam-api-key command-line parameter', function(done) {
const key = '123abc-789zyx_CBA987-XYZ321';
process.argv = ['node', 'test', '--iam-api-key', key];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, key);
done();
});
it('respects the backup --log command-line parameter', function(done) {
const filename = 'my2.log';
process.argv = ['node', 'test', '--log', filename];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.log, 'string');
assert.strictEqual(program.log, filename);
done();
});
it('respects the backup --resume command-line parameter', function(done) {
process.argv = ['node', 'test', '--resume'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.resume, 'boolean');
assert.strictEqual(program.resume, true);
done();
});
it('respects the backup --output command-line parameter', function(done) {
const filename = 'myfile2.txt';
process.argv = ['node', 'test', '--output', filename];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.output, 'string');
assert.strictEqual(program.output, filename);
done();
});
it('respects the backup --mode full command-line parameter', function(done) {
process.argv = ['node', 'test', '--mode', 'full'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, 'full');
done();
});
it('respects the backup --mode shallow command-line parameter', function(done) {
process.argv = ['node', 'test', '--mode', 'shallow'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, 'shallow');
done();
});
it('respects the backup --quiet command-line parameter', function(done) {
process.argv = ['node', 'test', '--quiet'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
done();
});
});
describe('Restore command-line', function() {
it('respects the COUCH_URL env variable if the --url restore command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, process.env.COUCH_URL);
done();
});
it('respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, process.env.COUCH_DATABASE);
done();
});
it('respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, parseInt(process.env.COUCH_BUFFER_SIZE, 10));
done();
});
it('respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parseInt(process.env.COUCH_PARALLELISM, 10));
done();
});
it('respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, parseInt(process.env.COUCH_REQUEST_TIMEOUT, 10));
done();
});
it('respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, process.env.CLOUDANT_IAM_API_KEY);
done();
});
it('respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing', function(done) {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
done();
});
it('respects the restore --url command-line parameter', function(done) {
const url = 'https://a:b@myurl3.com';
process.argv = ['node', 'test', '--url', url];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, url);
done();
});
it('respects the restore --db command-line parameter', function(done) {
const db = 'mydb3';
process.argv = ['node', 'test', '--db', db];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, db);
done();
});
it('respects the restore --buffer-size command-line parameter', function(done) {
const bufferSize = 250;
process.argv = ['node', 'test', '--buffer-size', bufferSize];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, bufferSize);
done();
});
it('respects the restore --parallelism command-line parameter', function(done) {
const parallelism = 5;
process.argv = ['node', 'test', '--parallelism', parallelism];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parallelism);
done();
});
it('respects the restore --request-timeout command-line parameter', function(done) {
const requestTimeout = 10000;
process.argv = ['node', 'test', '--request-timeout', requestTimeout];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, requestTimeout);
done();
});
it('respects the restore --iam-api-key command-line parameter', function(done) {
const key = '123abc-789zyx_CBA987-XYZ321';
process.argv = ['node', 'test', '--iam-api-key', key];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, key);
done();
});
it('respects the restore --quiet command-line parameter', function(done) {
process.argv = ['node', 'test', '--quiet'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
done();
});
});
});
return { service: service, db: dbName, url: actUrl.toString() };
}
};

@@ -1,509 +0,41 @@

<testsuites name="test">
<testsuite name="#unit Validate arguments" tests="33" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:42:23" time="0.114">
<testcase classname="test.#unit Validate arguments" name="returns error for invalid URL type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid URL type" time="0.029">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (wrong protocol) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no path) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid (no protocol, no host) URL" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid buffer size type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero buffer size" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float buffer size" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid buffer size type" time="0.006">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid log type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid log type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid mode string" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid mode type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid output type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid output type" time="0.004">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid parallelism type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero parallelism" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float parallelism" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid parallelism type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid request timeout type" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for zero request timeout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for float request timout" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid request timeout type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid resume type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns no error for valid resume type" time="0.005">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for invalid key type" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="returns error for key and URL credentials supplied" time="0">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for log arg in shallow mode" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for resume arg in shallow mode" time="0.001">
</testcase>
<testcase classname="test.#unit Validate arguments" name="warns for parallism arg in shallow mode" time="0.001">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:42:23" time="4.563">
<testcase classname="test.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="0.939">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.852">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.629">
<system-out><![CDATA[{ total: 11 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:42:28" time="11.645">
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="3.749">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.479">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.591">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:42:40" time="5.643">
<testcase classname="test.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.462">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.285">
</testcase>
<testcase classname="test.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.088">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:42:45" time="14.626">
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="4.739">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.48">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.579">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:43:00" time="6.609">
<testcase classname="test.Compression tests using API" name="should backup animaldb to a compressed file" time="0.922">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="2.681">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.197">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:43:06" time="8.745">
<testcase classname="test.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.362">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="3.785">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.805">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:43:15" time="323.456">
<testcase classname="test.End to end backup and restore using API" name="should backup and restore animaldb" time="2.356">
<system-out><![CDATA[{ total: 15 }
{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="320.326">
<system-out><![CDATA[{ total: 522948 }
{ total: 522948 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:48:39" time="497.979">
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.767">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="494.439">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:56:57" time="4.259">
<testcase classname="test.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="3.997">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:01" time="0.271">
<testcase classname="test.Write error tests" name="calls callback with error set when stream is not writeable" time="0.01">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:01" time="2.285">
<testcase classname="test.Event tests" name="should get a finished event when using stdout" time="0.873">
</testcase>
<testcase classname="test.Event tests" name="should get a finished event when using file output" time="0.881">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:03" time="5.233">
<testcase classname="test.Resume tests using API" name="should create a log file" time="0.907">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.773">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
<testcase classname="test.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.762">
<system-out><![CDATA[{ total: 15 }
]]></system-out>
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:09" time="6.74">
<testcase classname="test.Resume tests using CLI" name="should create a log file" time="1.348">
<system-out><![CDATA[Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.257">
</testcase>
<testcase classname="test.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.343">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:15" time="33.566">
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m" time="16.457">
<system-out><![CDATA[Backup process close null SIGTERM
Backup process close 0 null
]]></system-out>
</testcase>
<testcase classname="test.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.585">
<system-out><![CDATA[Backup process close null SIGTERM
Backup process close 0 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="#unit Configuration" tests="12" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:49" time="0.01">
<testcase classname="test.#unit Configuration" name="respects the COUCH_URL env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_DATABASE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_BUFFER_SIZE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_PARALLELISM env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_REQUEST_TIMEOUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_API_KEY env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the CLOUDANT_IAM_TOKEN_URL env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_LOG env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_RESUME env variable" time="0.001">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_OUTPUT env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_MODE env variable" time="0">
</testcase>
<testcase classname="test.#unit Configuration" name="respects the COUCH_QUIET env variable" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:49" time="0.08">
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate when DB does not exist" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on BulkGetError" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Unauthorized existence check" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on Forbidden no _reader" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.016">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on NoLogFileName" time="0">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on LogDoesNotExist" time="0.001">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on IncompleteChangesInLogFile" time="0.008">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on _changes HTTPFatalError" time="0.012">
</testcase>
<testcase classname="test.#unit Fatal errors using API for backup" name="should terminate on SpoolChangesError" time="0.013">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using API for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:49" time="0.117">
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Unauthorized db existence check" time="0.006">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on Forbidden no _writer" time="0.012">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on RestoreDatabaseNotFound" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.004">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.01">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.036">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.026">
</testcase>
<testcase classname="test.#unit Fatal errors using API for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.008">
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for backup" tests="10" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:49" time="3.8">
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate when DB does not exist" time="0.364">
<system-out><![CDATA[Backup process close 10 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on BulkGetError" time="0.424">
<system-out><![CDATA[Backup process close 50 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Unauthorized existence check" time="0.337">
<system-out><![CDATA[Backup process close 11 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on Forbidden no _reader" time="0.331">
<system-out><![CDATA[Backup process close 12 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _bulk_get HTTPFatalError" time="0.39">
<system-out><![CDATA[Backup process close 40 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on NoLogFileName" time="0.306">
<system-out><![CDATA[Backup process close 20 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on LogDoesNotExist" time="0.29">
<system-out><![CDATA[Backup process close 21 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on IncompleteChangesInLogFile" time="0.585">
<system-out><![CDATA[Backup process close 22 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on _changes HTTPFatalError" time="0.388">
<system-out><![CDATA[Backup process close 40 null
]]></system-out>
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for backup" name="should terminate on SpoolChangesError" time="0.375">
<system-out><![CDATA[Backup process close 30 null
]]></system-out>
</testcase>
</testsuite>
<testsuite name="#unit Fatal errors using CLI for restore" tests="9" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:53" time="3.491">
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Unauthorized db existence check" time="0.337">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on Forbidden no _writer" time="0.359">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on RestoreDatabaseNotFound" time="0.387">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not empty" time="0.364">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on notEmptyDBErr when database is not new" time="0.338">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError" time="0.417">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError from system database" time="0.441">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on _bulk_docs HTTPFatalError large stream" time="0.435">
</testcase>
<testcase classname="test.#unit Fatal errors using CLI for restore" name="should terminate on multiple _bulk_docs HTTPFatalError" time="0.402">
</testcase>
</testsuite>
<testsuite name="#unit Fetching batches from a log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:56" time="0.003">
<testcase classname="test.#unit Fetching batches from a log file" name="should fetch multiple batches correctly" time="0.002">
</testcase>
</testsuite>
<testsuite name="#unit Fetching summary from the log file" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:56" time="0.002">
<testcase classname="test.#unit Fetching summary from the log file" name="should fetch a summary correctly" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Backup command-line" tests="23" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:56" time="0.042">
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_URL env variable if the --url backup command-line parameter is missing" time="0.015">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing" time="0.002">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_LOG env variable if the --log backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --db command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --buffer-size command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --parallelism command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --request-timeout command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --iam-api-key command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --log command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --resume command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --output command-line parameter" time="0">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode full command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --mode shallow command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Backup command-line" name="respects the backup --quiet command-line parameter" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Default parameters Restore command-line" tests="14" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:57" time="0.014">
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_URL env variable if the --url restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --url command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --db command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --buffer-size command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --parallelism command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --request-timeout command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --iam-api-key command-line parameter" time="0.001">
</testcase>
<testcase classname="test.#unit Default parameters Restore command-line" name="respects the restore --quiet command-line parameter" time="0.001">
</testcase>
</testsuite>
<testsuite name="#unit Check request headers" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:57" time="0.005">
<testcase classname="test.#unit Check request headers" name="should have a couchbackup user-agent" time="0.004">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback" tests="8" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:57:57" time="10.592">
<testcase classname="test.#unit Check request response error callback" name="should not callback with error for 200 response" time="0.003">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 500 responses" time="2.015">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 POST 503 responses" time="2.016">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error after 3 429 responses" time="2.013">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with fatal error for 404 response" time="0.005">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with same error for no status code error response" time="2.013">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should retry request if HTTP request gets timed out" time="0.511">
</testcase>
<testcase classname="test.#unit Check request response error callback" name="should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out" time="2.009">
</testcase>
</testsuite>
<testsuite name="#unit Check request response error callback #unit Check credentials" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:58:07" time="0.011">
<testcase classname="test.#unit Check request response error callback #unit Check credentials" name="should properly decode username and password" time="0.011">
</testcase>
</testsuite>
<testsuite name="#unit Perform backup using shallow backup" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:58:07" time="0.554">
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup" time="0.019">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should perform a shallow backup with transient error" time="0.521">
</testcase>
<testcase classname="test.#unit Perform backup using shallow backup" name="should fail to perform a shallow backup on fatal error" time="0.012">
</testcase>
</testsuite>
<testsuite name="#unit Check spool changes" tests="4" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:58:08" time="15.592">
<testcase classname="test.#unit Check spool changes" name="should terminate on request error" time="2.012">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should terminate on bad HTTP status code response" time="2.015">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting changes" time="2.065">
</testcase>
<testcase classname="test.#unit Check spool changes" name="should keep collecting sparse changes" time="9.495">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:58:23" time="46.817">
<testcase classname="test.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="46.55">
</testcase>
</testsuite>
<testsuite name="#unit Check database restore writer" tests="6" errors="0" failures="0" skipped="0" timestamp="2023-09-13T06:59:10" time="4.112">
<testcase classname="test.#unit Check database restore writer" name="should complete successfully" time="0.022">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should terminate on a fatal error" time="0.007">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should retry on transient errors" time="2.035">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should fail after 3 transient errors" time="2.016">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should restore shallow backups without rev info successfully" time="0.022">
</testcase>
<testcase classname="test.#unit Check database restore writer" name="should get a batch error for non-empty array response with new_edits false" time="0.007">
</testcase>
</testsuite>
</testsuites>
// Copyright © 2017 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'use strict';
// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
module.exports = function(onChange) {
const change = new stream.Transform({ objectMode: true });
change._transform = function(line, encoding, done) {
let obj = null;
// one change per line - remove the trailing comma
line = line.trim().replace(/,$/, '');
// extract thee last_seq at the end of the changes feed
if (line.match(/^"last_seq":/)) {
line = '{' + line;
}
try {
obj = JSON.parse(line);
} catch (e) {
}
onChange(obj);
done();
};
return change;
};

@@ -14,269 +14,130 @@ // Copyright © 2017, 2021 IBM Corp. All rights reserved.

// limitations under the License.
/* global describe it before after */
'use strict';
const async = require('async');
const events = require('events');
const fs = require('fs');
const error = require('./error.js');
const spoolchanges = require('./spoolchanges.js');
const logfilesummary = require('./logfilesummary.js');
const logfilegetbatches = require('./logfilegetbatches.js');
const assert = require('assert');
const applyEnvVars = require('../includes/config.js').applyEnvironmentVariables;
/**
* Read documents from a database to be backed up.
*
* @param {string} db - `@cloudant/cloudant` DB object for source database.
* @param {number} blocksize - number of documents to download in single request
* @param {number} parallelism - number of concurrent downloads
* @param {string} log - path to log file to use
* @param {boolean} resume - whether to resume from an existing log file
* @returns EventEmitter with following events:
* - `received` - called with a block of documents to write to backup
* - `error` - on error
* - `finished` - when backup process is finished (either complete or errored)
*/
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime(); // backup start time
const batchesPerDownloadSession = 50; // max batches to read from log file for download at a time (prevent OOM)
describe('#unit Configuration', function() {
let processEnvCopy;
function proceedWithBackup() {
if (options.resume) {
// pick up from existing log file from previous run
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
} else {
// create new log file and process
spoolchanges(db, options.log, options.bufferSize, ee, function(err) {
if (err) {
ee.emit('error', err);
} else {
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
}
});
}
}
before('Save env', function() {
// Copy env so we can reset it after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
});
validateBulkGetSupport(db, function(err) {
if (err) {
return ee.emit('error', err);
} else {
proceedWithBackup();
}
after('Reset env', function() {
process.env = processEnvCopy;
});
return ee;
};
it('respects the COUCH_URL env variable', function(done) {
process.env.COUCH_URL = 'http://user:pass@myurl.com';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.url, 'string');
assert.strictEqual(config.url, process.env.COUCH_URL);
done();
});
/**
* Validate /_bulk_get support for a specified database.
*
* @param {string} db - nodejs-cloudant db
* @param {function} callback - called on completion with signature (err)
*/
function validateBulkGetSupport(db, callback) {
db.service.postBulkGet({ db: db.db, docs: [] }).then(() => { callback(); }).catch(err => {
err = error.convertResponseError(err, function(err) {
switch (err.status) {
case undefined:
// There was no status code on the error
return err;
case 404:
return new error.BackupError('BulkGetError', 'Database does not support /_bulk_get endpoint');
default:
return new error.HTTPError(err);
}
});
callback(err);
it('respects the COUCH_DATABASE env variable', function(done) {
process.env.COUCH_DATABASE = 'mydb';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.db, 'string');
assert.strictEqual(config.db, process.env.COUCH_DATABASE);
done();
});
}
/**
* Download remaining batches in a log file, splitting batches into sets
* to avoid enqueueing too many in one go.
*
* @param {string} log - log file name to maintain download state
* @param {string} db - nodejs-cloudant db
* @param {events.EventEmitter} ee - event emitter to emit received events on
* @param {time} startTime - start time for backup process
* @param {number} batchesPerDownloadSession - max batches to enqueue for
* download at a time. As batches contain many doc IDs, this helps avoid
* exhausting memory.
* @param {number} parallelism - number of concurrent downloads
* @returns function to call do download remaining batches with signature
* (err, {batches: batch, docs: doccount}) {@see spoolchanges}.
*/
function downloadRemainingBatches(log, db, ee, startTime, batchesPerDownloadSession, parallelism) {
let total = 0; // running total of documents downloaded so far
let noRemainingBatches = false;
it('respects the COUCH_BUFFER_SIZE env variable', function(done) {
process.env.COUCH_BUFFER_SIZE = '1000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.bufferSize, 'number');
assert.strictEqual(config.bufferSize, 1000);
done();
});
// Generate a set of batches (up to batchesPerDownloadSession) to download from the
// log file and download them. Set noRemainingBatches to `true` for last batch.
function downloadSingleBatchSet(done) {
// Fetch the doc IDs for the batches in the current set to
// download them.
function batchSetComplete(err, data) {
if (!err) {
total = data.total;
}
done(err);
}
function processRetrievedBatches(err, batches) {
if (!err) {
// process them in parallelised queue
processBatchSet(db, parallelism, log, batches, ee, startTime, total, batchSetComplete);
} else {
batchSetComplete(err);
}
}
it('respects the COUCH_PARALLELISM env variable', function(done) {
process.env.COUCH_PARALLELISM = '20';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.parallelism, 'number');
assert.strictEqual(config.parallelism, 20);
done();
});
readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, function(err, batchSetIds) {
if (err) {
ee.emit('error', err);
// Stop processing changes file for fatal errors
noRemainingBatches = true;
done();
} else {
if (batchSetIds.length === 0) {
noRemainingBatches = true;
return done();
}
logfilegetbatches(log, batchSetIds, processRetrievedBatches);
}
});
}
it('respects the COUCH_REQUEST_TIMEOUT env variable', function(done) {
process.env.COUCH_REQUEST_TIMEOUT = '10000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.requestTimeout, 'number');
assert.strictEqual(config.requestTimeout, 10000);
done();
});
// Return true if all batches in log file have been downloaded
function isFinished(callback) { callback(null, noRemainingBatches); }
it('respects the CLOUDANT_IAM_API_KEY env variable', function(done) {
const key = 'ABC123-ZYX987_cba789-xyz321';
process.env.CLOUDANT_IAM_API_KEY = key;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamApiKey, 'string');
assert.strictEqual(config.iamApiKey, key);
done();
});
function onComplete() {
ee.emit('finished', { total: total });
}
it('respects the CLOUDANT_IAM_TOKEN_URL env variable', function(done) {
const u = 'https://testhost.example:1234/identity/token';
process.env.CLOUDANT_IAM_TOKEN_URL = u;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamTokenUrl, 'string');
assert.strictEqual(config.iamTokenUrl, u);
done();
});
async.doUntil(downloadSingleBatchSet, isFinished, onComplete);
}
it('respects the COUCH_LOG env variable', function(done) {
process.env.COUCH_LOG = 'my.log';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.log, 'string');
assert.strictEqual(config.log, process.env.COUCH_LOG);
done();
});
/**
* Return a set of uncompleted download batch IDs from the log file.
*
* @param {string} log - log file path
* @param {number} batchesPerDownloadSession - maximum IDs to return
* @param {function} callback - sign (err, batchSetIds array)
*/
function readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, callback) {
logfilesummary(log, function processSummary(err, summary) {
if (!err) {
if (!summary.changesComplete) {
callback(new error.BackupError('IncompleteChangesInLogFile',
'WARNING: Changes did not finish spooling'));
return;
}
if (Object.keys(summary.batches).length === 0) {
return callback(null, []);
}
it('respects the COUCH_RESUME env variable', function(done) {
process.env.COUCH_RESUME = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.resume, 'boolean');
assert.strictEqual(config.resume, true);
done();
});
// batch IDs are the property names of summary.batches
const batchSetIds = getPropertyNames(summary.batches, batchesPerDownloadSession);
callback(null, batchSetIds);
} else {
callback(err);
}
it('respects the COUCH_OUTPUT env variable', function(done) {
process.env.COUCH_OUTPUT = 'myfile.txt';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.output, 'string');
assert.strictEqual(config.output, process.env.COUCH_OUTPUT);
done();
});
}
/**
* Download a set of batches retrieved from a log file. When a download is
* complete, add a line to the logfile indicating such.
*
* @param {any} db - nodejs-cloudant database
* @param {any} parallelism - number of concurrent requests to make
* @param {any} log - log file to drive downloads from
* @param {any} batches - batches to download
* @param {any} ee - event emitter for progress. This funciton emits
* received and error events.
* @param {any} start - time backup started, to report deltas
* @param {any} grandtotal - count of documents downloaded prior to this set
* of batches
* @param {any} callback - completion callback, (err, {total: number}).
*/
function processBatchSet(db, parallelism, log, batches, ee, start, grandtotal, callback) {
let hasErrored = false;
let total = grandtotal;
it('respects the COUCH_MODE env variable', function(done) {
process.env.COUCH_MODE = 'shallow';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.mode, 'string');
assert.strictEqual(config.mode, 'shallow');
done();
});
// queue to process the fetch requests in an orderly fashion using _bulk_get
const q = async.queue(function(payload, done) {
const output = [];
const thisBatch = payload.batch;
delete payload.batch;
delete payload.command;
function logCompletedBatch(batch) {
if (log) {
fs.appendFile(log, ':d batch' + thisBatch + '\n', done);
} else {
done();
}
}
// do the /db/_bulk_get request
db.service.postBulkGet({
db: db.db,
revs: true,
docs: payload.docs
}).then(response => {
// create an output array with the docs returned
response.result.results.forEach(function(d) {
if (d.docs) {
d.docs.forEach(function(doc) {
if (doc.ok) {
output.push(doc.ok);
}
});
}
});
total += output.length;
const t = (new Date().getTime() - start) / 1000;
ee.emit('received', {
batch: thisBatch,
data: output,
length: output.length,
time: t,
total: total
}, q, logCompletedBatch);
}).catch(err => {
if (!hasErrored) {
hasErrored = true;
err = error.convertResponseError(err);
// Kill the queue for fatal errors
q.kill();
ee.emit('error', err);
}
done();
});
}, parallelism);
for (const i in batches) {
q.push(batches[i]);
}
q.drain(function() {
callback(null, { total: total });
it('respects the COUCH_QUIET env variable', function(done) {
process.env.COUCH_QUIET = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.quiet, 'boolean');
assert.strictEqual(config.quiet, true);
done();
});
}
/**
* Returns first N properties on an object.
*
* @param {object} obj - object with properties
* @param {number} count - number of properties to return
*/
function getPropertyNames(obj, count) {
// decide which batch numbers to deal with
const batchestofetch = [];
let j = 0;
for (const i in obj) {
batchestofetch.push(parseInt(i));
j++;
if (j >= count) break;
}
return batchestofetch;
}
});

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2022 IBM Corp. All rights reserved.
//

@@ -14,102 +14,68 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global after before describe */
'use strict';
const url = require('url');
const toxy = require('toxy');
// Import the common hooks
require('../test/hooks.js');
const async = require('async');
const error = require('./error.js');
const events = require('events');
const tpoisons = toxy.poisons;
const trules = toxy.rules;
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime();
let batch = 0;
let hasErrored = false;
let startKey = null;
let total = 0;
function setupProxy(poison) {
const backendUrl = new url.URL(process.env.COUCH_BACKEND_URL);
const proxy = toxy({
auth: `${backendUrl.username}:${backendUrl.password}`,
changeOrigin: true
});
async.doUntil(
function(callback) {
// Note, include_docs: true is set automatically when using the
// fetch function.
const opts = { db: db.db, limit: options.bufferSize, includeDocs: true };
// Forward traffic to DB
proxy.forward(process.env.COUCH_BACKEND_URL);
// To avoid double fetching a document solely for the purposes of getting
// the next ID to use as a startKey for the next page we instead use the
// last ID of the current page and append the lowest unicode sort
// character.
if (startKey) opts.startKey = `${startKey}\0`;
db.service.postAllDocs(opts).then(response => {
const body = response.result;
if (!body.rows) {
ee.emit('error', new error.BackupError(
'AllDocsError', 'ERROR: Invalid all docs response'));
callback();
} else {
if (body.rows.length < opts.limit) {
startKey = null; // last batch
} else {
startKey = body.rows[opts.limit - 1].id;
}
switch (poison) {
case 'normal':
// No poisons to add
break;
case 'bandwidth-limit':
// https://github.com/h2non/toxy#bandwidth
// Note the implementation of bandwidth is simplistic and the threshold
// delay is applied to every write of the buffer, so use the smallest
// delay possible and adjust the rate using the bytes size instead.
proxy
.poison(tpoisons.bandwidth({ bytes: 512, threshold: 1 })); // 0.5 MB/s
break;
case 'latency':
// https://github.com/h2non/toxy#latency
proxy
.poison(tpoisons.latency({ max: 1500, min: 250 }))
.withRule(trules.probability(60));
break;
case 'slow-read':
// https://github.com/h2non/toxy#slow-read
// Note this only impacts read of data from requests so only for non-GET
// In practice this means that it impacts restore much more than backup
// since although backup POSTs to _bulk_get the content is much smaller
// than what is POSTed to _bulk_docs for a restore.
// Similarly to bandwidth-limit use a 1 ms threshold
proxy
.poison(tpoisons.slowRead({ chunk: 256, threshold: 1 }))
// Slow read for 10 % of the time e.g. 10 ms in every 100
.withRule(trules.timeThreshold({ duration: 10, period: 100 }));
break;
case 'rate-limit':
// https://github.com/h2non/toxy#rate-limit
// Simulate the Cloudant free plan with 20 lookups ps and 10 writes ps
proxy.post('/*/_bulk_get')
.poison(tpoisons.rateLimit({ limit: 20, threshold: 1000 }));
proxy.post('/*/_bulk_docs')
.poison(tpoisons.rateLimit({ limit: 10, threshold: 1000 }));
break;
default:
throw Error('Unknown toxy poison ' + poison);
}
const docs = [];
body.rows.forEach(function(doc) {
docs.push(doc.doc);
});
// Catch remaining traffic
proxy.all('/*');
return proxy;
}
if (docs.length > 0) {
ee.emit('received', {
batch: batch++,
data: docs,
length: docs.length,
time: (new Date().getTime() - start) / 1000,
total: total += docs.length
});
}
callback();
}
}).catch(err => {
err = error.convertResponseError(err);
ee.emit('error', err);
hasErrored = true;
callback();
});
},
function(callback) { callback(null, hasErrored || startKey == null); },
function() { ee.emit('finished', { total: total }); }
);
const poisons = [
'normal',
'bandwidth-limit',
'latency',
'slow-read',
'rate-limit'
];
poisons.forEach(function(poison) {
describe('unreliable network tests (using toxy poison ' + poison + ')', function() {
let proxy;
before('start toxy server', function() {
proxy = setupProxy(poison);
console.log('Using toxy poison ' + poison);
// For these tests COUCH_URL points to the toxy proxy on localhost whereas
// COUCH_BACKEND_URL is the real CouchDb instance.
const toxyUrl = new url.URL(process.env.COUCH_URL);
// Listen on the specified hostname only, so if using localhost we don't
// need external connections.
proxy.listen(toxyUrl.port, toxyUrl.hostname);
});
after('stop toxy server', function() {
proxy.close();
});
delete require.cache[require.resolve('../test/ci_e2e.js')];
require('../test/ci_e2e.js');
});
});
return ee;
};

@@ -14,29 +14,22 @@ // Copyright © 2017 IBM Corp. All rights reserved.

// limitations under the License.
/* global describe it */
'use strict';
// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
const assert = require('assert');
const logfilesummary = require('../includes/logfilesummary.js');
module.exports = function(onChange) {
const change = new stream.Transform({ objectMode: true });
change._transform = function(line, encoding, done) {
let obj = null;
// one change per line - remove the trailing comma
line = line.trim().replace(/,$/, '');
// extract thee last_seq at the end of the changes feed
if (line.match(/^"last_seq":/)) {
line = '{' + line;
}
try {
obj = JSON.parse(line);
} catch (e) {
}
onChange(obj);
done();
};
return change;
};
describe('#unit Fetching summary from the log file', function() {
it('should fetch a summary correctly', function(done) {
logfilesummary('./test/fixtures/test.log', function(err, data) {
assert.ok(!err);
assert.ok(data);
assert.strictEqual(data.changesComplete, true);
assert.strictEqual(typeof data.batches, 'object');
assert.strictEqual(Object.keys(data.batches).length, 2);
assert.deepStrictEqual(data.batches['1'], true);
assert.deepStrictEqual(data.batches['4'], true);
done();
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -14,152 +14,51 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
const async = require('async');
const stream = require('stream');
const error = require('./error.js');
const debug = require('debug')('couchbackup:writer');
const fs = require('fs');
const u = require('./citestutils.js');
module.exports = function(db, bufferSize, parallelism, ee) {
const writer = new stream.Transform({ objectMode: true });
let buffer = [];
let written = 0;
let linenumber = 0;
// this is the queue of chunks that are written to the database
// the queue's payload will be an array of documents to be written,
// the size of the array will be bufferSize. The variable parallelism
// determines how many HTTP requests will occur at any one time.
const q = async.queue(function(payload, cb) {
// if we are restoring known revisions, we need to supply new_edits=false
if (payload.docs && payload.docs[0] && payload.docs[0]._rev) {
payload.new_edits = false;
debug('Using new_edits false mode.');
}
if (!didError) {
db.service.postBulkDocs({
db: db.db,
bulkDocs: payload
}).then(response => {
if (!response.result || (payload.new_edits === false && response.result.length > 0)) {
throw new Error(`Error writing batch with new_edits:${payload.new_edits !== false}` +
` and ${response.result ? response.result.length : 'unavailable'} items`);
describe('Event tests', function() {
it('should get a finished event when using stdout', function(done) {
u.setTimeout(this, 40);
// Use the API so we can get events
const params = { useApi: true };
const backup = u.testBackup(params, 'animaldb', process.stdout, function(err) {
if (err) {
done(err);
}
});
backup.on('finished', function() {
try {
// Test will time out if the finished event is not emitted
done();
} catch (err) {
done(err);
}
});
});
it('should get a finished event when using file output', function(done) {
u.setTimeout(this, 40);
// Use the API so we can get events
const params = { useApi: true };
const actualBackup = `./${this.fileName}`;
// Create a file and backup to it
const output = fs.createWriteStream(actualBackup);
output.on('open', function() {
const backup = u.testBackup(params, 'animaldb', output, function(err) {
if (err) {
done(err);
}
written += payload.docs.length;
writer.emit('restored', { documents: payload.docs.length, total: written });
cb();
}).catch(err => {
err = error.convertResponseError(err);
debug(`Error writing docs ${err.name} ${err.message}`);
cb(err, payload);
});
}
}, parallelism);
let didError = false;
// write the contents of the buffer to CouchDB in blocks of bufferSize
function processBuffer(flush, callback) {
function taskCallback(err, payload) {
if (err && !didError) {
debug(`Queue task failed with error ${err.name}`);
didError = true;
q.kill();
writer.emit('error', err);
}
}
if (flush || buffer.length >= bufferSize) {
// work through the buffer to break off bufferSize chunks
// and feed the chunks to the queue
do {
// split the buffer into bufferSize chunks
const toSend = buffer.splice(0, bufferSize);
// and add the chunk to the queue
debug(`Adding ${toSend.length} to the write queue.`);
q.push({ docs: toSend }, taskCallback);
} while (buffer.length >= bufferSize);
// send any leftover documents to the queue
if (flush && buffer.length > 0) {
debug(`Adding remaining ${buffer.length} to the write queue.`);
q.push({ docs: buffer }, taskCallback);
}
// wait until the queue size falls to a reasonable level
async.until(
// wait until the queue length drops to twice the paralellism
// or until empty on the last write
function(callback) {
// if we encountered an error, stop this until loop
if (didError) {
return callback(null, true);
}
if (flush) {
callback(null, q.idle() && q.length() === 0);
} else {
callback(null, q.length() <= parallelism * 2);
}
},
function(cb) {
setTimeout(cb, 20);
},
function() {
if (flush && !didError) {
writer.emit('finished', { total: written });
}
// callback when we're happy with the queue size
callback();
});
} else {
callback();
}
}
// take an object
writer._transform = function(obj, encoding, done) {
// each obj that arrives here is a line from the backup file
// it should contain an array of objects. The length of the array
// depends on the bufferSize at backup time.
linenumber++;
if (!didError && obj !== '') {
// see if it parses as JSON
try {
const arr = JSON.parse(obj);
// if it's an array with a length
if (typeof arr === 'object' && arr.length > 0) {
// push each document into a buffer
buffer = buffer.concat(arr);
// pause the stream
// it's likely that the speed with which data can be read from disk
// may exceed the rate it can be written to CouchDB. To prevent
// the whole file being buffered in memory, we pause the stream here.
// it is resumed, when processBuffer calls back and we call done()
this.pause();
// break the buffer in to bufferSize chunks to be written to the database
processBuffer(false, done);
} else {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - not an array`));
backup.on('finished', function() {
try {
// Test will time out if the finished event is not emitted
done();
} catch (err) {
done(err);
}
} catch (e) {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - cannot parse as JSON`));
// Could be an incomplete write that was subsequently resumed
done();
}
} else {
done();
}
};
// called when we need to flush everything
writer._flush = function(done) {
processBuffer(true, done);
};
return writer;
};
});
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2018, 2021 IBM Corp. All rights reserved.
//

@@ -14,34 +14,80 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global describe it */
'use strict';
// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
const fs = require('fs');
const readline = require('readline');
const u = require('./citestutils.js');
const uuid = require('uuid').v4;
module.exports = function() {
const liner = new stream.Transform({ objectMode: true });
const params = { useApi: true };
liner._transform = function(chunk, encoding, done) {
let data = chunk.toString();
if (this._lastLineData) {
data = this._lastLineData + data;
}
describe(u.scenario('Concurrent database backups', params), function() {
it('should run concurrent API database backups correctly #slower', function(done) {
// Allow up to 900 s to backup and compare (it should be much faster)!
u.setTimeout(this, 900);
const lines = data.split('\n');
this._lastLineData = lines.splice(lines.length - 1, 1)[0];
let doneCount = 0;
let doneErr;
const finished = function(err) {
doneCount++;
if (doneCount === 2) {
done(doneErr || err);
}
doneErr = err;
};
for (const i in lines) {
this.push(lines[i]);
}
done();
};
const checkForEmptyBatches = function(fileName, cb) {
let foundEmptyBatch = false;
liner._flush = function(done) {
if (this._lastLineData) {
this.push(this._lastLineData);
}
this._lastLineData = null;
done();
};
const rd = readline.createInterface({
input: fs.createReadStream(fileName),
output: fs.createWriteStream('/dev/null'),
terminal: false
});
return liner;
};
rd.on('line', function(line) {
if (JSON.parse(line).length === 0) {
// Note: Empty batch arrays indicate that the running backup is
// incorrectly sharing a log file with another ongoing backup job.
foundEmptyBatch = true;
}
});
rd.on('close', function() {
if (foundEmptyBatch) {
cb(new Error(`Log file '${fileName}' contains empty batches`));
} else {
cb();
}
});
};
// [1] Run 'largedb2g' database backup
const actualBackup1 = `./${uuid()}`;
const output1 = fs.createWriteStream(actualBackup1);
output1.on('open', function() {
u.testBackup(params, 'largedb2g', output1, function(err) {
if (err) {
finished(err);
} else {
checkForEmptyBatches(actualBackup1, finished);
}
});
});
// [2] Run 'largedb1g' database backup
const actualBackup2 = `./${uuid()}`;
const output2 = fs.createWriteStream(actualBackup2);
output2.on('open', function() {
u.testBackup(params, 'largedb1g', output2, function(err) {
if (err) {
finished(err);
} else {
checkForEmptyBatches(actualBackup2, finished);
}
});
});
});
});
{
"name": "@cloudant/couchbackup",
"version": "2.9.13-SNAPSHOT.141",
"version": "2.9.13-SNAPSHOT.142",
"description": "CouchBackup - command-line backup utility for Cloudant/CouchDB",

@@ -40,3 +40,3 @@ "homepage": "https://github.com/IBM/couchbackup",

"devDependencies": {
"eslint": "8.49.0",
"eslint": "8.50.0",
"eslint-config-semistandard": "17.0.0",

@@ -43,0 +43,0 @@ "eslint-config-standard": "17.1.0",

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc