Socket
Socket
Sign inDemoInstall

@cloudant/couchbackup

Package Overview
Dependencies
Maintainers
6
Versions
479
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@cloudant/couchbackup - npm Package Compare versions

Comparing version 2.9.14-SNAPSHOT.154 to 2.9.14-SNAPSHOT.155

.scannerwork/scanner-report/changesets-18.pb

4

.scannerwork/report-task.txt

@@ -6,3 +6,3 @@ projectKey=couchbackup

dashboardUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/dashboard?id=couchbackup&branch=main
ceTaskId=AYtctQF82p71_FFMCu9P
ceTaskUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/api/ce/task?id=AYtctQF82p71_FFMCu9P
ceTaskId=AYtdGGVBnLCOWNuhTKbF
ceTaskUrl=https://sonar.cloudantnosqldb.test.cloud.ibm.com/api/ce/task?id=AYtdGGVBnLCOWNuhTKbF

@@ -1,2 +0,2 @@

// Copyright © 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -15,87 +15,266 @@ // Licensed under the Apache License, Version 2.0 (the "License");

/* global describe it before after beforeEach */
'use strict';
const chunk = require('lodash/chunk');
const difference = require('lodash/difference');
const forOwn = require('lodash/forOwn');
const isEmpty = require('lodash/isEmpty');
const union = require('lodash/union');
const assert = require('assert');
const fs = require('fs');
const u = require('./citestutils.js');
const mockServerPort = +process.env.COUCHBACKUP_MOCK_SERVER_PORT || 7777;
const { once } = require('node:events');
const url = `http://localhost:${mockServerPort}`;
const nock = require('nock');
const httpProxy = require('http-proxy');
const Readable = require('stream').Readable;
const compare = async function(database1, database2, client) {
// check docs same in both dbs
const allDocs1 = await getAllDocs(client, database1);
const allDocs2 = await getAllDocs(client, database2);
// Create an infinite stream to read.
// It just keeps sending a backup line, useful for testing cases of
// termination while a stream has content remaining (the animaldb backup
// is too small for that).
class InfiniteBackupStream extends Readable {
constructor(opt) {
super(opt);
this.contents = Buffer.from('[{"_id":"giraffe","_rev":"3-7665c3e66315ff40616cceef62886bd8","min_weight":830,"min_length":5,"max_weight":1600,"max_length":6,"wiki_page":"http://en.wikipedia.org/wiki/Giraffe","class":"mammal","diet":"herbivore","_revisions":{"start":3,"ids":["7665c3e66315ff40616cceef62886bd8","aaaf10d5a68cdf22d95a5482a0e95549","967a00dff5e02add41819138abb3284d"]}}]\n', 'utf8');
}
const onlyInDb1 = (difference(allDocs1, allDocs2));
const onlyInDb2 = (difference(allDocs2, allDocs1));
_read() {
let proceed;
do {
proceed = this.push(this.contents);
} while (proceed);
}
}
let databasesSame = isEmpty(onlyInDb1) && isEmpty(onlyInDb2);
if (!databasesSame) {
console.log(onlyInDb1.length + ' documents only in db 1.');
console.log('Document IDs only in db 1: ' + onlyInDb1);
console.log(onlyInDb2.length + ' documents only in db 2.');
console.log('Document IDs only in db 2: ' + onlyInDb2);
function assertNock() {
try {
assert.ok(nock.isDone());
} catch (err) {
console.error('pending mocks: %j', nock.pendingMocks());
throw err;
}
}
// check revs same in docs common to both dbs
const partitionSize = 500;
const batches = chunk(union(allDocs1, allDocs2), partitionSize);
async function backupHttpError(opts, errorName, errorCode) {
const p = u.p(opts, { expectedBackupError: { name: errorName, code: errorCode } });
const missingRevsInDb2 = await getMissingRevs(client, database1, database2, batches);
const missingRevsInDb1 = await getMissingRevs(client, database2, database1, batches);
// Create a file and attempt a backup to it
const output = fs.createWriteStream('/dev/null');
return once(output, 'open')
.then(() => {
return u.testBackup(p, 'fakenockdb', output);
}).then(() => {
return assertNock();
});
}
databasesSame = databasesSame && isEmpty(missingRevsInDb1) && isEmpty(missingRevsInDb2);
async function restoreHttpError(opts, errorName, errorCode) {
const q = u.p(opts, { expectedRestoreError: { name: errorName, code: errorCode } });
return u.testRestoreFromFile(q, './test/fixtures/animaldb_expected.json', 'fakenockdb')
.then(() => {
return assertNock();
});
}
if (!databasesSame) {
console.log('Missing revs in db 1:' + JSON.stringify(missingRevsInDb1));
console.log('Missing revs in db 2:' + JSON.stringify(missingRevsInDb2));
}
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('#unit Fatal errors', params), function() {
let processEnvCopy;
let proxy;
return databasesSame;
};
before('Set process data for test', function() {
// Copy env and argv so we can reset them after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
const getMissingRevs = async(client, databaseName1, databaseName2, batcheses) => {
const fakeRevisionId = '9999-a';
// Set up a proxy to point to our nock server because the nock override
// isn't visible to the spawned CLI process
if (!params.useApi) {
proxy = httpProxy.createProxyServer({ target: url }).listen(mockServerPort + 1000, 'localhost');
}
const missing = {};
// setup environment variables
process.env.COUCH_URL = (params.useApi) ? url : `http://localhost:${mockServerPort + 1000}`;
});
// look in db1 - use a fake revision ID to fetch all leaf revisions
after('Reset process data', function(done) {
process.env = processEnvCopy;
if (!params.useApi) {
proxy.close(done);
} else {
done();
}
});
for (const batches of batcheses) {
const documentRevisions = {};
batches.forEach(id => (documentRevisions[id] = [fakeRevisionId]));
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
const result1 = await client.postRevsDiff({ db: databaseName1, documentRevisions });
const revsDiffRequestDb2 = {};
forOwn(result1.result, (v, k) => (revsDiffRequestDb2[k] = v.possible_ancestors));
// look in db2
const result2 = await client.postRevsDiff({ db: databaseName2, documentRevisions: revsDiffRequestDb2 });
forOwn(result2.result, (v, k) => {
if ('missing' in v) {
missing[k] = v.missing;
}
describe('for backup', function() {
it('should terminate when DB does not exist', function() {
// Simulate existence check
nock(url).head('/fakenockdb').reply(404, { error: 'not_found', reason: 'missing' });
return backupHttpError(params, 'DatabaseNotFound', 10);
});
it('should terminate on BulkGetError', function() {
// Simulate existence check
const n = nock(url).head('/fakenockdb').reply(200);
// Simulate _bulk_get not available
n.post('/fakenockdb/_bulk_get').reply(404, { error: 'not_found', reason: 'missing' });
return backupHttpError(params, 'BulkGetError', 50);
});
it('should terminate on Unauthorized existence check', function() {
// Simulate a 401
nock(url).head('/fakenockdb').reply(401, { error: 'unauthorized', reason: '_reader access is required for this request' });
return backupHttpError(params, 'Unauthorized', 11);
});
it('should terminate on Forbidden no _reader', function() {
// Simulate a 403
nock(url).head('/fakenockdb').reply(403, { error: 'forbidden', reason: '_reader access is required for this request' });
return backupHttpError(params, 'Forbidden', 12);
});
it('should terminate on _bulk_get HTTPFatalError', function() {
// Provide a mock complete changes log to allow a resume to skip ahead
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/test.log' } });
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a fatal HTTP error when trying to fetch docs
// Note: 2 outstanding batches, so 2 responses, 1 mock is optional because we can't guarantee timing
n.post('/fakenockdb/_bulk_get').query(true).reply(400, { error: 'bad_request', reason: 'testing bad response' });
n.post('/fakenockdb/_bulk_get').query(true).optionally().reply(400, { error: 'bad_request', reason: 'testing bad response' });
return backupHttpError(p, 'HTTPFatalError', 40);
});
it('should terminate on NoLogFileName', function() {
// Don't supply a log file name with resume
const p = u.p(params, { opts: { resume: true } });
return backupHttpError(p, 'NoLogFileName', 20);
});
it('should terminate on LogDoesNotExist', function() {
// Use a non-existent log file
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/doesnotexist.log' } });
return backupHttpError(p, 'LogDoesNotExist', 21);
});
it('should terminate on IncompleteChangesInLogFile', function() {
// Use an incomplete changes log file
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/incomplete_changes.log' } });
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Should fail when it reads the incomplete changes
return backupHttpError(p, 'IncompleteChangesInLogFile', 22);
});
it('should terminate on _changes HTTPFatalError', function() {
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a fatal HTTP error when trying to fetch docs (note 2 outstanding batches)
n.post('/fakenockdb/_changes').query(true).reply(400, { error: 'bad_request', reason: 'testing bad response' });
return backupHttpError(params, 'HTTPFatalError', 40);
});
it('should terminate on SpoolChangesError', function() {
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a changes without a last_seq
n.post('/fakenockdb/_changes').query(true).reply(200,
{
results: [{
seq: '2-g1AAAAEbeJzLYWBgYMlgTmFQSElKzi9KdUhJstTLTS3KLElMT9VLzskvTUnMK9HLSy3JAapkSmRIsv___39WBnMiUy5QgN3MzDIxOdEMWb85dv0gSxThigyN8diS5AAkk-pBFiUyoOkzxKMvjwVIMjQAKaDW_Zh6TQnqPQDRC7I3CwDPDV1k',
id: 'badger',
changes: [{ rev: '4-51aa94e4b0ef37271082033bba52b850' }]
}]
});
return backupHttpError(params, 'SpoolChangesError', 30);
});
});
}
return missing;
};
const getAllDocs = async function(client, database) {
let allDocIds = [];
const limit = 2000;
let startKey = '\u0000';
do {
const pageOfDocIds = (await client.postAllDocs({ db: database, startKey, limit })).result.rows.map(r => r.id);
allDocIds = allDocIds.concat(pageOfDocIds);
if (pageOfDocIds.length < limit) {
startKey = null;
} else {
startKey = pageOfDocIds[limit - 1] + '\u0000';
}
} while (startKey != null);
return allDocIds;
};
describe('for restore', function() {
it('should terminate on Unauthorized db existence check', function() {
// Simulate a 401
nock(url).get('/fakenockdb').reply(401, { error: 'unauthorized', reason: '_reader access is required for this request' });
return restoreHttpError(params, 'Unauthorized', 11);
});
module.exports = {
compare
};
it('should terminate on Forbidden no _writer', function() {
// Simulate the DB exists (i.e. you can read it)
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 403 trying to write
n.post('/fakenockdb/_bulk_docs').reply(403, { error: 'forbidden', reason: '_writer access is required for this request' });
return restoreHttpError(params, 'Forbidden', 12);
});
it('should terminate on RestoreDatabaseNotFound', function() {
// Simulate the DB does not exist
nock(url).get('/fakenockdb').reply(404, { error: 'not_found', reason: 'Database does not exist.' });
return restoreHttpError(params, 'DatabaseNotFound', 10);
});
it('should terminate on notEmptyDBErr when database is not empty', function() {
// Simulate the DB that does exist and not empty
nock(url).get('/fakenockdb').reply(200, { doc_count: 10, doc_del_count: 0 });
return restoreHttpError(params, 'DatabaseNotEmpty', 13);
});
it('should terminate on notEmptyDBErr when database is not new', function() {
// Simulate the DB that does exist and not new
nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 10 });
return restoreHttpError(params, 'DatabaseNotEmpty', 13);
});
it('should terminate on _bulk_docs HTTPFatalError', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Use a parallelism of one and mock one response
const p = u.p(params, { opts: { parallelism: 1 } });
// Simulate a 400 trying to write
n.post('/fakenockdb/_bulk_docs').reply(400, { error: 'bad_request', reason: 'testing bad response' });
return restoreHttpError(p, 'HTTPFatalError', 40);
});
it('should terminate on _bulk_docs HTTPFatalError from system database', function() {
// Simulate that target database exists and is _not_ empty.
// This should pass validator as we exclude system databases from the check.
const n = nock(url).get('/_replicator').reply(200, { doc_count: 1, doc_del_count: 0 });
// Simulate a 400 trying to write
n.post('/_replicator/_bulk_docs').reply(400, { error: 'bad_request', reason: 'testing bad response' });
// Use a parallelism of one and mock one response
const q = u.p(params, { opts: { parallelism: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return u.testRestore(q, new InfiniteBackupStream(), '_replicator').then(() => {
return assertNock();
});
});
it('should terminate on _bulk_docs HTTPFatalError large stream', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 400 trying to write
// Provide a body function to handle the stream, but allow any body
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).reply(400, { error: 'bad_request', reason: 'testing bad response' });
// Use only parallelism 1 so we don't have to mock up loads of responses
const q = u.p(params, { opts: { parallelism: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return u.testRestore(q, new InfiniteBackupStream(), 'fakenockdb').then(() => {
return assertNock();
});
});
it('should terminate on multiple _bulk_docs HTTPFatalError', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 400 trying to write docs, 5 times because of default parallelism
// Provide a body function to handle the stream, but allow any body
// Four of the mocks are optional because of parallelism 5 we can't guarantee that the exit will happen
// after all 5 requests, but we must get at least one of them
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).reply(400, { error: 'bad_request', reason: 'testing bad response' });
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).times(4).optionally().reply(400, { error: 'bad_request', reason: 'testing bad response' });
const q = u.p(params, { opts: { bufferSize: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return restoreHttpError(q, 'HTTPFatalError', 40);
});
});
});
});

@@ -18,25 +18,13 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

const assert = require('assert');
const logfilegetbatches = require('../includes/logfilegetbatches.js');
const u = require('./citestutils.js');
describe('#unit Fetching batches from a log file', function() {
it('should fetch multiple batches correctly', async function() {
return new Promise((resolve, reject) => {
logfilegetbatches('./test/fixtures/test.log', [1, 4], function(err, data) {
try {
assert.ok(!err);
assert.ok(data);
assert.strictEqual(typeof data, 'object');
assert.strictEqual(Object.keys(data).length, 2);
assert.deepStrictEqual(data['1'].docs, [{ id: '6' }, { id: '7' }, { id: '8' }, { id: '9' }, { id: '10' }]);
assert.strictEqual(data['1'].batch, 1);
assert.deepStrictEqual(data['4'].docs, [{ id: '21' }, { id: '22' }]);
assert.strictEqual(data['4'].batch, 4);
resolve();
} catch (err) {
reject(err);
}
});
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('#slowest End to end backup and restore', params), function() {
// 10 GB is about the largest the CI can handle before getting very upset
// about how long things are taking
it('should backup and restore largedb10g', async function() {
u.setTimeout(this, 350 * 60);
return u.testDirectBackupAndRestore(params, 'largedb10g', this.dbName);
});
});
});

@@ -18,2 +18,3 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

const assert = require('assert');
const fs = require('fs');

@@ -24,85 +25,79 @@ const { once } = require('node:events');

[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Basic backup and restore', params), function() {
it('should backup animaldb to a file correctly', async function() {
// Allow up to 40 s to backup and compare (it should be much faster)!
u.setTimeout(this, 40);
describe(u.scenario('Resume tests', params), function() {
it('should create a log file', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
// Create a file and backup to it
const output = fs.createWriteStream(actualBackup);
return once(output, 'open')
const logFile = `./${this.fileName}` + '.log';
const p = u.p(params, { opts: { log: logFile } });
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
assert.ok(fs.existsSync(logFile), 'The log file should exist.');
});
});
it('should restore corrupted animaldb to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_corrupted.json');
const dbName = this.dbName;
const p = u.p(params, { expectedRestoreErrorRecoverable: { name: 'BackupFileJsonError' } });
return once(input, 'open')
.then(() => {
return u.testBackup(params, 'animaldb', output);
return u.testRestore(p, input, dbName);
}).then(() => {
return u.readSortAndDeepEqual(actualBackup, './test/fixtures/animaldb_expected.json');
return u.dbCompare('animaldb', dbName);
});
});
it('should restore animaldb to a database correctly', async function() {
it('should restore resumed animaldb with blank line to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_expected.json');
const input = fs.createReadStream('./test/fixtures/animaldb_resumed_blank.json');
const dbName = this.dbName;
return once(input, 'open').then(() => {
return u.testRestore(params, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
it('should execute a shallow mode backup successfully', async function() {
// Allow 30 s
u.setTimeout(this, 30);
const actualBackup = `./${this.fileName}`;
const output = fs.createWriteStream(actualBackup);
// Add the shallow mode option
const p = u.p(params, { opts: { mode: 'shallow' } });
return once(output, 'open')
return once(input, 'open')
.then(() => {
return u.testBackup(p, 'animaldb', output);
return u.testRestore(params, input, dbName);
}).then(() => {
return u.readSortAndDeepEqual(actualBackup, './test/fixtures/animaldb_expected_shallow.json');
return u.dbCompare('animaldb', dbName);
});
});
});
});
describe(u.scenario('Buffer size tests', params), function() {
it('should backup/restore animaldb with the same buffer size', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const p = u.p(params, { opts: { log: logFile, bufferSize: 1 } });
return u.testBackupAndRestoreViaFile(p, 'animaldb', actualBackup, this.dbName);
});
describe('Resume tests', function() {
// Currently cannot abort API backups, when we do this test should be run for
// both API and CLI
it('should correctly backup and restore backup10m', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 90);
it('should backup/restore animaldb with backup buffer > restore buffer', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const dbName = this.dbName;
const p = u.p(params, { opts: { log: logFile, bufferSize: 2 } }); // backup
const q = u.p(params, { opts: { bufferSize: 1 } }); // restore
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
return u.testRestoreFromFile(q, actualBackup, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
// Use abort parameter to terminate the backup
const p = u.p(params, { abort: true }, { opts: { log: logFile } });
const restoreDb = this.dbName;
// Set the database doc count as fewer than this should be written during
// resumed backup.
p.exclusiveMaxExpected = 5096;
it('should backup/restore animaldb with backup buffer < restore buffer', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const dbName = this.dbName;
const p = u.p(params, { opts: { log: logFile, bufferSize: 1 } }); // backup
const q = u.p(params, { opts: { bufferSize: 2 } }); // restore
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
return u.testRestoreFromFile(q, actualBackup, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
});
return u.testBackupAbortResumeRestore(p, 'backup10m', actualBackup, restoreDb);
});
// Note --output is only valid for CLI usage, this test should only run for CLI
const params = { useApi: false };
it('should correctly backup and restore backup10m using --output', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 90);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
// Use abort parameter to terminate the backup
const p = u.p(params, { abort: true }, { opts: { output: actualBackup, log: logFile } });
const restoreDb = this.dbName;
// Set the database doc count as fewer than this should be written during
// resumed backup.
p.exclusiveMaxExpected = 5096;
return await u.testBackupAbortResumeRestore(p, 'backup10m', actualBackup, restoreDb);
});
});

@@ -18,26 +18,21 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

const assert = require('assert');
const fs = require('fs');
const u = require('./citestutils.js');
describe('Write error tests', function() {
it('calls callback with error set when stream is not writeable', async function() {
u.setTimeout(this, 10);
const dirname = fs.mkdtempSync('test_backup_');
// make temp dir read only
fs.chmodSync(dirname, 0o444);
const filename = dirname + '/test.backup';
const backupStream = fs.createWriteStream(filename, { flags: 'w' });
describe('Event tests', function() {
it('should get a finished event when using stdout', async function() {
u.setTimeout(this, 40);
// Use the API so we can get events, pass eventEmitter so we get the emitter back
const params = { useApi: true, useStdOut: true };
// All API backups now set an event listener for finished and it is part of the backup
// promise, so if the backup passes the finished event fired.
return u.testBackup(params, 'animaldb', process.stdout);
});
it('should get a finished event when using file output', async function() {
u.setTimeout(this, 40);
// Use the API so we can get events, pass eventEmitter so we get the emitter back
const params = { useApi: true };
// try to do backup and check err was set in callback
return u.testBackup(params, 'animaldb', backupStream).then(() => {
assert.fail('Should throw an "EACCES" error');
}).catch((resultErr) => {
// cleanup temp dir
fs.rmdirSync(dirname);
// error should have been set
assert.ok(resultErr);
assert.strictEqual(resultErr.code, 'EACCES');
});
const actualBackup = `./${this.fileName}`;
return u.testBackupToFile(params, 'animaldb', actualBackup);
});
});

@@ -15,24 +15,52 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it */
/* global beforeEach afterEach */
'use strict';
const u = require('./citestutils.js');
const { CloudantV1 } = require('@ibm-cloud/cloudant');
const url = new URL((process.env.COUCH_BACKEND_URL) ? process.env.COUCH_BACKEND_URL : 'https://no-couch-backend-url-set.test');
const { BasicAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const authenticator = (url.username) ? new BasicAuthenticator({ username: url.username, password: decodeURIComponent(url.password) }) : new NoAuthAuthenticator();
const serviceOpts = {
authenticator: authenticator
};
const cloudant = new CloudantV1(serviceOpts);
// Remove auth from URL before using for service
cloudant.setServiceUrl(new URL(url.pathname, url.origin).toString());
const uuid = require('uuid').v4;
const fs = require('fs');
describe('Event tests', function() {
it('should get a finished event when using stdout', async function() {
u.setTimeout(this, 40);
// Use the API so we can get events, pass eventEmitter so we get the emitter back
const params = { useApi: true, useStdOut: true };
// All API backups now set an event listener for finished and it is part of the backup
// promise, so if the backup passes the finished event fired.
return u.testBackup(params, 'animaldb', process.stdout);
});
// Mocha hooks that will be at the root context so run for all tests
it('should get a finished event when using file output', async function() {
u.setTimeout(this, 40);
// Use the API so we can get events, pass eventEmitter so we get the emitter back
const params = { useApi: true };
const actualBackup = `./${this.fileName}`;
return u.testBackupToFile(params, 'animaldb', actualBackup);
beforeEach('Create test database', async function() {
// Don't run hook for unit tests, just for CI
if (!this.currentTest.fullTitle().includes('#unit')) {
// Allow 10 seconds to create the DB
this.timeout(10 * 1000);
const unique = uuid();
this.fileName = `${unique}`;
this.dbName = 'couchbackup_test_' + unique;
return cloudant.putDatabase({ db: this.dbName });
}
});
afterEach('Delete test database', async function() {
// Don't run hook for unit tests, just for CI
if (!this.currentTest.fullTitle().includes('#unit')) {
// Allow 10 seconds to delete the DB
this.timeout(10 * 1000);
deleteIfExists(this.fileName);
deleteIfExists(`${this.fileName}.log`);
return cloudant.deleteDatabase({ db: this.dbName });
}
});
function deleteIfExists(fileName) {
fs.unlink(fileName, function(err) {
if (err) {
if (err.code !== 'ENOENT') {
console.error(`${err.code} ${err.message}`);
}
}
});
});
}

@@ -15,19 +15,170 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it */
/* global describe it beforeEach */
'use strict';
const u = require('./citestutils.js');
const assert = require('assert');
const backup = require('../includes/shallowbackup.js');
const request = require('../includes/request.js');
const fs = require('fs');
const nock = require('nock');
describe('Encryption tests', function() {
// Note CLI only to use openssl command
const p = { useApi: false, encryption: true };
// Function to create a DB object and call the shallow backup function
// This is normally done by app.js
function shallowBackup(dbUrl, opts) {
const db = request.client(dbUrl, opts);
// Disable compression to make body assertions easier
db.service.setEnableGzipCompression(false);
return backup(db, opts);
}
it('should backup and restore animaldb via an encrypted file', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const encryptedBackup = `./${this.fileName}`;
return u.testBackupAndRestoreViaFile(p, 'animaldb', encryptedBackup, this.dbName).then(() => {
return u.assertEncryptedFile(encryptedBackup);
// Note all these tests include a body parameter of include_docs and a query
// string of include_docs because of a quirk of nano that when using the fetch
// method always adds the include_docs query string.
describe('#unit Perform backup using shallow backup', function() {
const dbUrl = 'http://localhost:5984/animaldb';
// Query string keys are stringified by Nano
const badgerKey = 'badger\0';
const kookaburraKey = 'kookaburra\0';
const snipeKey = 'snipe\0';
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
it('should perform a shallow backup', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_3.json', 'utf8')))
// batch 4
.post('/_all_docs', { limit: 3, start_key: snipeKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_4.json', 'utf8')));
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
reject(err);
})
.on('received', function(data) {
try {
if (data.batch === 3) {
assert.strictEqual(data.length, 2); // smaller last batch
} else {
assert.strictEqual(data.length, 3);
}
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 11);
assert.ok(couch.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
it('should perform a shallow backup with transient error', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3 - transient error
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(500, { error: 'Internal Server Error' })
// batch 3 - retry
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_3.json', 'utf8')))
// batch 4
.post('/_all_docs', { limit: 3, start_key: snipeKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_4.json', 'utf8')));
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
try {
assert.strictEqual(err.name, 'HTTPError');
} catch (err) {
reject(err);
}
})
.on('received', function(data) {
try {
if (data.batch === 3) {
assert.strictEqual(data.length, 2); // smaller last batch
} else {
assert.strictEqual(data.length, 3);
}
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 11);
assert.ok(couch.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
it('should fail to perform a shallow backup on fatal error', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3 - fatal error
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(401, { error: 'Unauthorized' });
let errCount = 0;
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
try {
errCount++;
assert.strictEqual(err.name, 'Unauthorized');
} catch (err) {
reject(err);
}
})
.on('received', function(data) {
try {
assert.strictEqual(data.length, 3);
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 6);
assert.ok(couch.isDone());
assert.strictEqual(errCount, 1);
resolve();
} catch (err) {
reject(err);
}
});
});
});
});

@@ -18,21 +18,25 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

delete require.cache[require.resolve('./citestutils.js')];
const u = require('./citestutils.js');
const assert = require('assert');
const logfilegetbatches = require('../includes/logfilegetbatches.js');
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('End to end backup and restore', params), function() {
it('should backup and restore animaldb', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
return u.testDirectBackupAndRestore(params, 'animaldb', this.dbName);
describe('#unit Fetching batches from a log file', function() {
it('should fetch multiple batches correctly', async function() {
return new Promise((resolve, reject) => {
logfilegetbatches('./test/fixtures/test.log', [1, 4], function(err, data) {
try {
assert.ok(!err);
assert.ok(data);
assert.strictEqual(typeof data, 'object');
assert.strictEqual(Object.keys(data).length, 2);
assert.deepStrictEqual(data['1'].docs, [{ id: '6' }, { id: '7' }, { id: '8' }, { id: '9' }, { id: '10' }]);
assert.strictEqual(data['1'].batch, 1);
assert.deepStrictEqual(data['4'].docs, [{ id: '21' }, { id: '22' }]);
assert.strictEqual(data['4'].batch, 4);
resolve();
} catch (err) {
reject(err);
}
});
});
it('should backup and restore largedb1g #slow', async function() {
// Allow up to 30 m for backup and restore of largedb1g
// This is a long time but when many builds run in parallel it can take a
// while to get this done.
u.setTimeout(this, 30 * 60);
return u.testDirectBackupAndRestore(params, 'largedb1g', this.dbName);
});
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2018, 2023 IBM Corp. All rights reserved.
//

@@ -15,170 +15,62 @@ // Licensed under the Apache License, Version 2.0 (the "License");

/* global describe it beforeEach */
/* global describe it */
'use strict';
const assert = require('assert');
const backup = require('../includes/shallowbackup.js');
const request = require('../includes/request.js');
const fs = require('fs');
const nock = require('nock');
const { once } = require('node:events');
const readline = require('readline');
const u = require('./citestutils.js');
const uuid = require('uuid').v4;
// Function to create a DB object and call the shallow backup function
// This is normally done by app.js
function shallowBackup(dbUrl, opts) {
const db = request.client(dbUrl, opts);
// Disable compression to make body assertions easier
db.service.setEnableGzipCompression(false);
return backup(db, opts);
}
const params = { useApi: true };
// Note all these tests include a body parameter of include_docs and a query
// string of include_docs because of a quirk of nano that when using the fetch
// method always adds the include_docs query string.
describe('#unit Perform backup using shallow backup', function() {
const dbUrl = 'http://localhost:5984/animaldb';
// Query string keys are stringified by Nano
const badgerKey = 'badger\0';
const kookaburraKey = 'kookaburra\0';
const snipeKey = 'snipe\0';
describe(u.scenario('Concurrent database backups', params), function() {
it('should run concurrent API database backups correctly #slower', async function() {
// Allow up to 900 s to backup and compare (it should be much faster)!
u.setTimeout(this, 900);
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
const checkForEmptyBatches = async function(fileName) {
let foundEmptyBatch = false;
it('should perform a shallow backup', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_3.json', 'utf8')))
// batch 4
.post('/_all_docs', { limit: 3, start_key: snipeKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_4.json', 'utf8')));
const rd = readline.createInterface({
input: fs.createReadStream(fileName),
output: fs.createWriteStream('/dev/null'),
terminal: false
});
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
reject(err);
})
.on('received', function(data) {
try {
if (data.batch === 3) {
assert.strictEqual(data.length, 2); // smaller last batch
} else {
assert.strictEqual(data.length, 3);
}
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 11);
assert.ok(couch.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
rd.on('line', function(line) {
if (JSON.parse(line).length === 0) {
// Note: Empty batch arrays indicate that the running backup is
// incorrectly sharing a log file with another ongoing backup job.
foundEmptyBatch = true;
}
});
it('should perform a shallow backup with transient error', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3 - transient error
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(500, { error: 'Internal Server Error' })
// batch 3 - retry
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_3.json', 'utf8')))
// batch 4
.post('/_all_docs', { limit: 3, start_key: snipeKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_4.json', 'utf8')));
rd.on('close', function() {
if (foundEmptyBatch) {
return Promise.reject(new Error(`Log file '${fileName}' contains empty batches`));
} else {
return Promise.resolve();
}
});
};
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
try {
assert.strictEqual(err.name, 'HTTPError');
} catch (err) {
reject(err);
}
})
.on('received', function(data) {
try {
if (data.batch === 3) {
assert.strictEqual(data.length, 2); // smaller last batch
} else {
assert.strictEqual(data.length, 3);
}
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 11);
assert.ok(couch.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
const backupPromise = async function() {
const actualBackup = `./${uuid()}`;
const output = fs.createWriteStream(actualBackup);
return once(output, 'open').then(() => {
return u.testBackup(params, 'largedb1g', output);
}).then(() => {
return checkForEmptyBatches(actualBackup);
});
};
it('should fail to perform a shallow backup on fatal error', async function() {
const couch = nock(dbUrl)
// batch 1
.post('/_all_docs', { limit: 3, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_1.json', 'utf8')))
// batch 2
.post('/_all_docs', { limit: 3, start_key: badgerKey, include_docs: true })
.reply(200, JSON.parse(fs.readFileSync('./test/fixtures/animaldb_all_docs_2.json', 'utf8')))
// batch 3 - fatal error
.post('/_all_docs', { limit: 3, start_key: kookaburraKey, include_docs: true })
.reply(401, { error: 'Unauthorized' });
// [1] Run 'largedb1g' database backup
const backup1 = backupPromise();
let errCount = 0;
// [2] Run 'largedb1g' database backup
const backup2 = backupPromise();
return new Promise((resolve, reject) => {
shallowBackup(dbUrl, { bufferSize: 3, parallelism: 1 })
.on('error', function(err) {
try {
errCount++;
assert.strictEqual(err.name, 'Unauthorized');
} catch (err) {
reject(err);
}
})
.on('received', function(data) {
try {
assert.strictEqual(data.length, 3);
} catch (err) {
reject(err);
}
})
.on('finished', function(data) {
try {
assert.strictEqual(data.total, 6);
assert.ok(couch.isDone());
assert.strictEqual(errCount, 1);
resolve();
} catch (err) {
reject(err);
}
});
});
return Promise.all([backup1, backup2]);
});
});

@@ -1,2 +0,2 @@

// Copyright © 2018, 2023 IBM Corp. All rights reserved.
// Copyright © 2017, 2023 IBM Corp. All rights reserved.
//

@@ -18,59 +18,24 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const fs = require('fs');
const { once } = require('node:events');
const readline = require('readline');
const u = require('./citestutils.js');
const uuid = require('uuid').v4;
const assert = require('assert');
const logfilesummary = require('../includes/logfilesummary.js');
const params = { useApi: true };
describe(u.scenario('Concurrent database backups', params), function() {
it('should run concurrent API database backups correctly #slower', async function() {
// Allow up to 900 s to backup and compare (it should be much faster)!
u.setTimeout(this, 900);
const checkForEmptyBatches = async function(fileName) {
let foundEmptyBatch = false;
const rd = readline.createInterface({
input: fs.createReadStream(fileName),
output: fs.createWriteStream('/dev/null'),
terminal: false
});
rd.on('line', function(line) {
if (JSON.parse(line).length === 0) {
// Note: Empty batch arrays indicate that the running backup is
// incorrectly sharing a log file with another ongoing backup job.
foundEmptyBatch = true;
describe('#unit Fetching summary from the log file', function() {
it('should fetch a summary correctly', function() {
return new Promise((resolve, reject) => {
logfilesummary('./test/fixtures/test.log', function(err, data) {
try {
assert.ok(!err);
assert.ok(data);
assert.strictEqual(data.changesComplete, true);
assert.strictEqual(typeof data.batches, 'object');
assert.strictEqual(Object.keys(data.batches).length, 2);
assert.deepStrictEqual(data.batches['1'], true);
assert.deepStrictEqual(data.batches['4'], true);
resolve();
} catch (err) {
reject(err);
}
});
rd.on('close', function() {
if (foundEmptyBatch) {
return Promise.reject(new Error(`Log file '${fileName}' contains empty batches`));
} else {
return Promise.resolve();
}
});
};
const backupPromise = async function() {
const actualBackup = `./${uuid()}`;
const output = fs.createWriteStream(actualBackup);
return once(output, 'open').then(() => {
return u.testBackup(params, 'largedb1g', output);
}).then(() => {
return checkForEmptyBatches(actualBackup);
});
};
// [1] Run 'largedb1g' database backup
const backup1 = backupPromise();
// [2] Run 'largedb1g' database backup
const backup2 = backupPromise();
return Promise.all([backup1, backup2]);
});
});
});

@@ -18,46 +18,21 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

const fs = require('fs');
const { once } = require('node:events');
delete require.cache[require.resolve('./citestutils.js')];
const u = require('./citestutils.js');
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Compression tests', params), function() {
const p = u.p(params, { compression: true });
it('should backup animaldb to a compressed file', async function() {
// Allow up to 60 s for backup of animaldb
u.setTimeout(this, 60);
const compressedBackup = `./${this.fileName}`;
const output = fs.createWriteStream(compressedBackup);
return once(output, 'open')
.then(() => {
return u.testBackup(p, 'animaldb', output);
}).then(() => {
return u.assertGzipFile(compressedBackup);
});
});
it('should backup and restore animaldb via a compressed file', async function() {
describe(u.scenario('End to end backup and restore', params), function() {
it('should backup and restore animaldb', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const compressedBackup = `./${this.fileName}`;
return u.testBackupAndRestoreViaFile(p, 'animaldb', compressedBackup, this.dbName).then(() => {
return u.assertGzipFile(compressedBackup);
});
return u.testDirectBackupAndRestore(params, 'animaldb', this.dbName);
});
it('should backup and restore animaldb via a compressed stream', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
return u.testDirectBackupAndRestore(p, 'animaldb', this.dbName);
it('should backup and restore largedb1g #slow', async function() {
// Allow up to 30 m for backup and restore of largedb1g
// This is a long time but when many builds run in parallel it can take a
// while to get this done.
u.setTimeout(this, 30 * 60);
return u.testDirectBackupAndRestore(params, 'largedb1g', this.dbName);
});
it('should backup and restore largedb2g via a compressed file #slower', async function() {
// Takes ~ 25 min using CLI, but sometimes over an hour with API
u.setTimeout(this, 180 * 60);
const compressedBackup = `./${this.fileName}`;
params.compression = true;
return u.testBackupAndRestoreViaFile(p, 'largedb2g', compressedBackup, this.dbName);
});
});
});

@@ -18,24 +18,90 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

const assert = require('assert');
const logfilesummary = require('../includes/logfilesummary.js');
const fs = require('fs');
const { once } = require('node:events');
const u = require('./citestutils.js');
describe('#unit Fetching summary from the log file', function() {
it('should fetch a summary correctly', function() {
return new Promise((resolve, reject) => {
logfilesummary('./test/fixtures/test.log', function(err, data) {
try {
assert.ok(!err);
assert.ok(data);
assert.strictEqual(data.changesComplete, true);
assert.strictEqual(typeof data.batches, 'object');
assert.strictEqual(Object.keys(data.batches).length, 2);
assert.deepStrictEqual(data.batches['1'], true);
assert.deepStrictEqual(data.batches['4'], true);
resolve();
} catch (err) {
reject(err);
}
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Basic backup and restore', params), function() {
it('should backup animaldb to a file correctly', async function() {
// Allow up to 40 s to backup and compare (it should be much faster)!
u.setTimeout(this, 40);
const actualBackup = `./${this.fileName}`;
// Create a file and backup to it
const output = fs.createWriteStream(actualBackup);
return once(output, 'open')
.then(() => {
return u.testBackup(params, 'animaldb', output);
}).then(() => {
return u.readSortAndDeepEqual(actualBackup, './test/fixtures/animaldb_expected.json');
});
});
it('should restore animaldb to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_expected.json');
const dbName = this.dbName;
return once(input, 'open').then(() => {
return u.testRestore(params, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
it('should execute a shallow mode backup successfully', async function() {
// Allow 30 s
u.setTimeout(this, 30);
const actualBackup = `./${this.fileName}`;
const output = fs.createWriteStream(actualBackup);
// Add the shallow mode option
const p = u.p(params, { opts: { mode: 'shallow' } });
return once(output, 'open')
.then(() => {
return u.testBackup(p, 'animaldb', output);
}).then(() => {
return u.readSortAndDeepEqual(actualBackup, './test/fixtures/animaldb_expected_shallow.json');
});
});
describe(u.scenario('Buffer size tests', params), function() {
it('should backup/restore animaldb with the same buffer size', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const p = u.p(params, { opts: { log: logFile, bufferSize: 1 } });
return u.testBackupAndRestoreViaFile(p, 'animaldb', actualBackup, this.dbName);
});
it('should backup/restore animaldb with backup buffer > restore buffer', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const dbName = this.dbName;
const p = u.p(params, { opts: { log: logFile, bufferSize: 2 } }); // backup
const q = u.p(params, { opts: { bufferSize: 1 } }); // restore
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
return u.testRestoreFromFile(q, actualBackup, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
it('should backup/restore animaldb with backup buffer < restore buffer', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const dbName = this.dbName;
const p = u.p(params, { opts: { log: logFile, bufferSize: 1 } }); // backup
const q = u.p(params, { opts: { bufferSize: 2 } }); // restore
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
return u.testRestoreFromFile(q, actualBackup, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
});
});
});

@@ -15,138 +15,87 @@ // Copyright © 2023 IBM Corp. All rights reserved.

/* global */
'use strict';
const { fork, spawn } = require('node:child_process');
const { once } = require('node:events');
const { Duplex } = require('node:stream');
const debug = require('debug');
const logProcess = debug('couchbackup:test:process');
const chunk = require('lodash/chunk');
const difference = require('lodash/difference');
const forOwn = require('lodash/forOwn');
const isEmpty = require('lodash/isEmpty');
const union = require('lodash/union');
class TestProcess {
constructor(cmd, args, mode) {
this.cmd = cmd;
// Child process stdio [stdin, stdout, stderr, ...extra channels]
const childProcessOptions = { stdio: [] };
switch (mode) {
case 'readable':
// Readable only, no writing to stdin so ignore it
childProcessOptions.stdio = ['ignore', 'pipe', 'inherit'];
break;
case 'writable':
// Writable only, no reading from stdout so ignore it
childProcessOptions.stdio = ['pipe', 'ignore', 'inherit'];
break;
default:
// Default Duplex mode pipe both stdin and stdout
childProcessOptions.stdio = ['pipe', 'pipe', 'inherit'];
break;
}
if (cmd.endsWith('.js')) {
// Add Node fork ipc channel
childProcessOptions.stdio.push('ipc');
logProcess(`Forking Node process for ${cmd} with stdio:[${childProcessOptions.stdio}]`);
this.childProcess = fork(cmd, args, childProcessOptions);
} else {
logProcess(`Spawning process for ${cmd} with stdio:[${childProcessOptions.stdio}]`);
this.childProcess = spawn(cmd, args, childProcessOptions);
}
const compare = async function(database1, database2, client) {
// check docs same in both dbs
const allDocs1 = await getAllDocs(client, database1);
const allDocs2 = await getAllDocs(client, database2);
this.childProcessPromise = once(this.childProcess, 'close').then(() => {
const code = this.childProcess.exitCode;
const signal = this.childProcess.signalCode;
logProcess(`Test process ${cmd} closed with code ${code} and signal ${signal}`);
if (code === 0) {
logProcess(`Resolving process promise for ${cmd}`);
return Promise.resolve(code);
} else {
const e = new Error(`Test child process ${cmd} exited with code ${code} and ${signal}. This may be normal for error case testing.`);
e.code = code;
e.signal = signal;
logProcess(`Will reject process promise for ${cmd} with ${e}`);
return Promise.reject(e);
}
});
const onlyInDb1 = (difference(allDocs1, allDocs2));
const onlyInDb2 = (difference(allDocs2, allDocs1));
switch (mode) {
case 'readable':
this.duplexFrom = this.childProcess.stdout;
break;
case 'writable':
this.duplexFrom = this.childProcess.stdin;
break;
default:
// Default is duplex
this.duplexFrom = { writable: this.childProcess.stdin, readable: this.childProcess.stdout };
}
let databasesSame = isEmpty(onlyInDb1) && isEmpty(onlyInDb2);
this.stream = Duplex.from(this.duplexFrom);
if (!databasesSame) {
console.log(onlyInDb1.length + ' documents only in db 1.');
console.log('Document IDs only in db 1: ' + onlyInDb1);
console.log(onlyInDb2.length + ' documents only in db 2.');
console.log('Document IDs only in db 2: ' + onlyInDb2);
}
}
module.exports = {
TestProcess,
cliBackup: function(databaseName, params = {}) {
const args = ['--db', databaseName];
if (params.opts) {
if (params.opts.mode) {
args.push('--mode');
args.push(params.opts.mode);
// check revs same in docs common to both dbs
const partitionSize = 500;
const batches = chunk(union(allDocs1, allDocs2), partitionSize);
const missingRevsInDb2 = await getMissingRevs(client, database1, database2, batches);
const missingRevsInDb1 = await getMissingRevs(client, database2, database1, batches);
databasesSame = databasesSame && isEmpty(missingRevsInDb1) && isEmpty(missingRevsInDb2);
if (!databasesSame) {
console.log('Missing revs in db 1:' + JSON.stringify(missingRevsInDb1));
console.log('Missing revs in db 2:' + JSON.stringify(missingRevsInDb2));
}
return databasesSame;
};
const getMissingRevs = async(client, databaseName1, databaseName2, batcheses) => {
const fakeRevisionId = '9999-a';
const missing = {};
// look in db1 - use a fake revision ID to fetch all leaf revisions
for (const batches of batcheses) {
const documentRevisions = {};
batches.forEach(id => (documentRevisions[id] = [fakeRevisionId]));
const result1 = await client.postRevsDiff({ db: databaseName1, documentRevisions });
const revsDiffRequestDb2 = {};
forOwn(result1.result, (v, k) => (revsDiffRequestDb2[k] = v.possible_ancestors));
// look in db2
const result2 = await client.postRevsDiff({ db: databaseName2, documentRevisions: revsDiffRequestDb2 });
forOwn(result2.result, (v, k) => {
if ('missing' in v) {
missing[k] = v.missing;
}
if (params.opts.output) {
args.push('--output');
args.push(params.opts.output);
}
if (params.opts.log) {
args.push('--log');
args.push(params.opts.log);
}
if (params.opts.resume) {
args.push('--resume');
args.push(params.opts.resume);
}
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
return new TestProcess('./bin/couchbackup.bin.js', args, 'readable');
},
cliRestore: function(databaseName, params) {
const args = ['--db', databaseName];
if (params.opts) {
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.parallelism) {
args.push('--parallelism');
args.push(params.opts.parallelism);
}
if (params.opts.requestTimeout) {
args.push('--request-timeout');
args.push(params.opts.requestTimeout);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
return new TestProcess('./bin/couchrestore.bin.js', args, 'writable');
},
cliGzip: function() {
return new TestProcess('gzip', []);
},
cliGunzip: function() {
return new TestProcess('gunzip', []);
},
cliEncrypt: function() {
return new TestProcess('openssl', ['aes-128-cbc', '-pass', 'pass:12345']);
},
cliDecrypt: function() {
return new TestProcess('openssl', ['aes-128-cbc', '-d', '-pass', 'pass:12345']);
});
}
return missing;
};
const getAllDocs = async function(client, database) {
let allDocIds = [];
const limit = 2000;
let startKey = '\u0000';
do {
const pageOfDocIds = (await client.postAllDocs({ db: database, startKey, limit })).result.rows.map(r => r.id);
allDocIds = allDocIds.concat(pageOfDocIds);
if (pageOfDocIds.length < limit) {
startKey = null;
} else {
startKey = pageOfDocIds[limit - 1] + '\u0000';
}
} while (startKey != null);
return allDocIds;
};
module.exports = {
compare
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2023 IBM Corp. All rights reserved.
// Copyright © 2023 IBM Corp. All rights reserved.
//

@@ -15,16 +15,138 @@ // Licensed under the Apache License, Version 2.0 (the "License");

/* global describe it */
/* global */
'use strict';
const u = require('./citestutils.js');
const { fork, spawn } = require('node:child_process');
const { once } = require('node:events');
const { Duplex } = require('node:stream');
const debug = require('debug');
const logProcess = debug('couchbackup:test:process');
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('#slowest End to end backup and restore', params), function() {
// 10 GB is about the largest the CI can handle before getting very upset
// about how long things are taking
it('should backup and restore largedb10g', async function() {
u.setTimeout(this, 350 * 60);
return u.testDirectBackupAndRestore(params, 'largedb10g', this.dbName);
class TestProcess {
constructor(cmd, args, mode) {
this.cmd = cmd;
// Child process stdio [stdin, stdout, stderr, ...extra channels]
const childProcessOptions = { stdio: [] };
switch (mode) {
case 'readable':
// Readable only, no writing to stdin so ignore it
childProcessOptions.stdio = ['ignore', 'pipe', 'inherit'];
break;
case 'writable':
// Writable only, no reading from stdout so ignore it
childProcessOptions.stdio = ['pipe', 'ignore', 'inherit'];
break;
default:
// Default Duplex mode pipe both stdin and stdout
childProcessOptions.stdio = ['pipe', 'pipe', 'inherit'];
break;
}
if (cmd.endsWith('.js')) {
// Add Node fork ipc channel
childProcessOptions.stdio.push('ipc');
logProcess(`Forking Node process for ${cmd} with stdio:[${childProcessOptions.stdio}]`);
this.childProcess = fork(cmd, args, childProcessOptions);
} else {
logProcess(`Spawning process for ${cmd} with stdio:[${childProcessOptions.stdio}]`);
this.childProcess = spawn(cmd, args, childProcessOptions);
}
this.childProcessPromise = once(this.childProcess, 'close').then(() => {
const code = this.childProcess.exitCode;
const signal = this.childProcess.signalCode;
logProcess(`Test process ${cmd} closed with code ${code} and signal ${signal}`);
if (code === 0) {
logProcess(`Resolving process promise for ${cmd}`);
return Promise.resolve(code);
} else {
const e = new Error(`Test child process ${cmd} exited with code ${code} and ${signal}. This may be normal for error case testing.`);
e.code = code;
e.signal = signal;
logProcess(`Will reject process promise for ${cmd} with ${e}`);
return Promise.reject(e);
}
});
});
});
switch (mode) {
case 'readable':
this.duplexFrom = this.childProcess.stdout;
break;
case 'writable':
this.duplexFrom = this.childProcess.stdin;
break;
default:
// Default is duplex
this.duplexFrom = { writable: this.childProcess.stdin, readable: this.childProcess.stdout };
}
this.stream = Duplex.from(this.duplexFrom);
}
}
module.exports = {
TestProcess,
cliBackup: function(databaseName, params = {}) {
const args = ['--db', databaseName];
if (params.opts) {
if (params.opts.mode) {
args.push('--mode');
args.push(params.opts.mode);
}
if (params.opts.output) {
args.push('--output');
args.push(params.opts.output);
}
if (params.opts.log) {
args.push('--log');
args.push(params.opts.log);
}
if (params.opts.resume) {
args.push('--resume');
args.push(params.opts.resume);
}
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
return new TestProcess('./bin/couchbackup.bin.js', args, 'readable');
},
cliRestore: function(databaseName, params) {
const args = ['--db', databaseName];
if (params.opts) {
if (params.opts.bufferSize) {
args.push('--buffer-size');
args.push(params.opts.bufferSize);
}
if (params.opts.parallelism) {
args.push('--parallelism');
args.push(params.opts.parallelism);
}
if (params.opts.requestTimeout) {
args.push('--request-timeout');
args.push(params.opts.requestTimeout);
}
if (params.opts.iamApiKey) {
args.push('--iam-api-key');
args.push(params.opts.iamApiKey);
}
}
return new TestProcess('./bin/couchrestore.bin.js', args, 'writable');
},
cliGzip: function() {
return new TestProcess('gzip', []);
},
cliGunzip: function() {
return new TestProcess('gunzip', []);
},
cliEncrypt: function() {
return new TestProcess('openssl', ['aes-128-cbc', '-pass', 'pass:12345']);
},
cliDecrypt: function() {
return new TestProcess('openssl', ['aes-128-cbc', '-d', '-pass', 'pass:12345']);
}
};

@@ -15,266 +15,19 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it before after beforeEach */
/* global describe it */
'use strict';
const assert = require('assert');
const fs = require('fs');
const u = require('./citestutils.js');
const mockServerPort = +process.env.COUCHBACKUP_MOCK_SERVER_PORT || 7777;
const { once } = require('node:events');
const url = `http://localhost:${mockServerPort}`;
const nock = require('nock');
const httpProxy = require('http-proxy');
const Readable = require('stream').Readable;
// Create an infinite stream to read.
// It just keeps sending a backup line, useful for testing cases of
// termination while a stream has content remaining (the animaldb backup
// is too small for that).
class InfiniteBackupStream extends Readable {
constructor(opt) {
super(opt);
this.contents = Buffer.from('[{"_id":"giraffe","_rev":"3-7665c3e66315ff40616cceef62886bd8","min_weight":830,"min_length":5,"max_weight":1600,"max_length":6,"wiki_page":"http://en.wikipedia.org/wiki/Giraffe","class":"mammal","diet":"herbivore","_revisions":{"start":3,"ids":["7665c3e66315ff40616cceef62886bd8","aaaf10d5a68cdf22d95a5482a0e95549","967a00dff5e02add41819138abb3284d"]}}]\n', 'utf8');
}
describe('Encryption tests', function() {
// Note CLI only to use openssl command
const p = { useApi: false, encryption: true };
_read() {
let proceed;
do {
proceed = this.push(this.contents);
} while (proceed);
}
}
function assertNock() {
try {
assert.ok(nock.isDone());
} catch (err) {
console.error('pending mocks: %j', nock.pendingMocks());
throw err;
}
}
async function backupHttpError(opts, errorName, errorCode) {
const p = u.p(opts, { expectedBackupError: { name: errorName, code: errorCode } });
// Create a file and attempt a backup to it
const output = fs.createWriteStream('/dev/null');
return once(output, 'open')
.then(() => {
return u.testBackup(p, 'fakenockdb', output);
}).then(() => {
return assertNock();
it('should backup and restore animaldb via an encrypted file', async function() {
// Allow up to 60 s for backup and restore of animaldb
u.setTimeout(this, 60);
const encryptedBackup = `./${this.fileName}`;
return u.testBackupAndRestoreViaFile(p, 'animaldb', encryptedBackup, this.dbName).then(() => {
return u.assertEncryptedFile(encryptedBackup);
});
}
async function restoreHttpError(opts, errorName, errorCode) {
const q = u.p(opts, { expectedRestoreError: { name: errorName, code: errorCode } });
return u.testRestoreFromFile(q, './test/fixtures/animaldb_expected.json', 'fakenockdb')
.then(() => {
return assertNock();
});
}
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('#unit Fatal errors', params), function() {
let processEnvCopy;
let proxy;
before('Set process data for test', function() {
// Copy env and argv so we can reset them after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
// Set up a proxy to point to our nock server because the nock override
// isn't visible to the spawned CLI process
if (!params.useApi) {
proxy = httpProxy.createProxyServer({ target: url }).listen(mockServerPort + 1000, 'localhost');
}
// setup environment variables
process.env.COUCH_URL = (params.useApi) ? url : `http://localhost:${mockServerPort + 1000}`;
});
after('Reset process data', function(done) {
process.env = processEnvCopy;
if (!params.useApi) {
proxy.close(done);
} else {
done();
}
});
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
describe('for backup', function() {
it('should terminate when DB does not exist', function() {
// Simulate existence check
nock(url).head('/fakenockdb').reply(404, { error: 'not_found', reason: 'missing' });
return backupHttpError(params, 'DatabaseNotFound', 10);
});
it('should terminate on BulkGetError', function() {
// Simulate existence check
const n = nock(url).head('/fakenockdb').reply(200);
// Simulate _bulk_get not available
n.post('/fakenockdb/_bulk_get').reply(404, { error: 'not_found', reason: 'missing' });
return backupHttpError(params, 'BulkGetError', 50);
});
it('should terminate on Unauthorized existence check', function() {
// Simulate a 401
nock(url).head('/fakenockdb').reply(401, { error: 'unauthorized', reason: '_reader access is required for this request' });
return backupHttpError(params, 'Unauthorized', 11);
});
it('should terminate on Forbidden no _reader', function() {
// Simulate a 403
nock(url).head('/fakenockdb').reply(403, { error: 'forbidden', reason: '_reader access is required for this request' });
return backupHttpError(params, 'Forbidden', 12);
});
it('should terminate on _bulk_get HTTPFatalError', function() {
// Provide a mock complete changes log to allow a resume to skip ahead
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/test.log' } });
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a fatal HTTP error when trying to fetch docs
// Note: 2 outstanding batches, so 2 responses, 1 mock is optional because we can't guarantee timing
n.post('/fakenockdb/_bulk_get').query(true).reply(400, { error: 'bad_request', reason: 'testing bad response' });
n.post('/fakenockdb/_bulk_get').query(true).optionally().reply(400, { error: 'bad_request', reason: 'testing bad response' });
return backupHttpError(p, 'HTTPFatalError', 40);
});
it('should terminate on NoLogFileName', function() {
// Don't supply a log file name with resume
const p = u.p(params, { opts: { resume: true } });
return backupHttpError(p, 'NoLogFileName', 20);
});
it('should terminate on LogDoesNotExist', function() {
// Use a non-existent log file
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/doesnotexist.log' } });
return backupHttpError(p, 'LogDoesNotExist', 21);
});
it('should terminate on IncompleteChangesInLogFile', function() {
// Use an incomplete changes log file
const p = u.p(params, { opts: { resume: true, log: './test/fixtures/incomplete_changes.log' } });
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Should fail when it reads the incomplete changes
return backupHttpError(p, 'IncompleteChangesInLogFile', 22);
});
it('should terminate on _changes HTTPFatalError', function() {
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a fatal HTTP error when trying to fetch docs (note 2 outstanding batches)
n.post('/fakenockdb/_changes').query(true).reply(400, { error: 'bad_request', reason: 'testing bad response' });
return backupHttpError(params, 'HTTPFatalError', 40);
});
it('should terminate on SpoolChangesError', function() {
// Allow the existence and _bulk_get checks to pass
const n = nock(url).head('/fakenockdb').reply(200);
n.post('/fakenockdb/_bulk_get').reply(200, '{"results": []}');
// Simulate a changes without a last_seq
n.post('/fakenockdb/_changes').query(true).reply(200,
{
results: [{
seq: '2-g1AAAAEbeJzLYWBgYMlgTmFQSElKzi9KdUhJstTLTS3KLElMT9VLzskvTUnMK9HLSy3JAapkSmRIsv___39WBnMiUy5QgN3MzDIxOdEMWb85dv0gSxThigyN8diS5AAkk-pBFiUyoOkzxKMvjwVIMjQAKaDW_Zh6TQnqPQDRC7I3CwDPDV1k',
id: 'badger',
changes: [{ rev: '4-51aa94e4b0ef37271082033bba52b850' }]
}]
});
return backupHttpError(params, 'SpoolChangesError', 30);
});
});
describe('for restore', function() {
it('should terminate on Unauthorized db existence check', function() {
// Simulate a 401
nock(url).get('/fakenockdb').reply(401, { error: 'unauthorized', reason: '_reader access is required for this request' });
return restoreHttpError(params, 'Unauthorized', 11);
});
it('should terminate on Forbidden no _writer', function() {
// Simulate the DB exists (i.e. you can read it)
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 403 trying to write
n.post('/fakenockdb/_bulk_docs').reply(403, { error: 'forbidden', reason: '_writer access is required for this request' });
return restoreHttpError(params, 'Forbidden', 12);
});
it('should terminate on RestoreDatabaseNotFound', function() {
// Simulate the DB does not exist
nock(url).get('/fakenockdb').reply(404, { error: 'not_found', reason: 'Database does not exist.' });
return restoreHttpError(params, 'DatabaseNotFound', 10);
});
it('should terminate on notEmptyDBErr when database is not empty', function() {
// Simulate the DB that does exist and not empty
nock(url).get('/fakenockdb').reply(200, { doc_count: 10, doc_del_count: 0 });
return restoreHttpError(params, 'DatabaseNotEmpty', 13);
});
it('should terminate on notEmptyDBErr when database is not new', function() {
// Simulate the DB that does exist and not new
nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 10 });
return restoreHttpError(params, 'DatabaseNotEmpty', 13);
});
it('should terminate on _bulk_docs HTTPFatalError', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Use a parallelism of one and mock one response
const p = u.p(params, { opts: { parallelism: 1 } });
// Simulate a 400 trying to write
n.post('/fakenockdb/_bulk_docs').reply(400, { error: 'bad_request', reason: 'testing bad response' });
return restoreHttpError(p, 'HTTPFatalError', 40);
});
it('should terminate on _bulk_docs HTTPFatalError from system database', function() {
// Simulate that target database exists and is _not_ empty.
// This should pass validator as we exclude system databases from the check.
const n = nock(url).get('/_replicator').reply(200, { doc_count: 1, doc_del_count: 0 });
// Simulate a 400 trying to write
n.post('/_replicator/_bulk_docs').reply(400, { error: 'bad_request', reason: 'testing bad response' });
// Use a parallelism of one and mock one response
const q = u.p(params, { opts: { parallelism: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return u.testRestore(q, new InfiniteBackupStream(), '_replicator').then(() => {
return assertNock();
});
});
it('should terminate on _bulk_docs HTTPFatalError large stream', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 400 trying to write
// Provide a body function to handle the stream, but allow any body
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).reply(400, { error: 'bad_request', reason: 'testing bad response' });
// Use only parallelism 1 so we don't have to mock up loads of responses
const q = u.p(params, { opts: { parallelism: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return u.testRestore(q, new InfiniteBackupStream(), 'fakenockdb').then(() => {
return assertNock();
});
});
it('should terminate on multiple _bulk_docs HTTPFatalError', function() {
// Simulate the DB exists
const n = nock(url).get('/fakenockdb').reply(200, { doc_count: 0, doc_del_count: 0 });
// Simulate a 400 trying to write docs, 5 times because of default parallelism
// Provide a body function to handle the stream, but allow any body
// Four of the mocks are optional because of parallelism 5 we can't guarantee that the exit will happen
// after all 5 requests, but we must get at least one of them
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).reply(400, { error: 'bad_request', reason: 'testing bad response' });
n.post('/fakenockdb/_bulk_docs', function(body) { return true; }).times(4).optionally().reply(400, { error: 'bad_request', reason: 'testing bad response' });
const q = u.p(params, { opts: { bufferSize: 1 }, expectedRestoreError: { name: 'HTTPFatalError', code: 40 } });
return restoreHttpError(q, 'HTTPFatalError', 40);
});
});
});
});

@@ -15,121 +15,316 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it beforeEach */
/* global describe afterEach before after it */
'use strict';
const assert = require('assert');
const fs = require('fs');
const nock = require('nock');
const request = require('../includes/request.js');
const writer = require('../includes/writer.js');
const noopEmitter = new (require('events')).EventEmitter();
const liner = require('../includes/liner.js');
const { once } = require('node:events');
const { pipeline } = require('node:stream/promises');
const longTestTimeout = 3000;
const parser = require('../includes/parser.js');
describe('#unit Check database restore writer', function() {
const dbUrl = 'http://localhost:5984/animaldb';
const db = request.client(dbUrl, { parallelism: 1 });
describe('#unit Default parameters', function() {
let processEnvCopy;
let processArgvCopy;
beforeEach('Reset nocks', function() {
nock.cleanAll();
before('Set process data for test', function() {
// Copy env and argv so we can reset them after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
processArgvCopy = JSON.parse(JSON.stringify(process.argv));
// setup environment variables
process.env.COUCH_URL = 'http://user:pass@myurl.com';
process.env.COUCH_DATABASE = 'mydb';
process.env.COUCH_BUFFER_SIZE = '1000';
process.env.COUCH_PARALLELISM = '20';
process.env.COUCH_REQUEST_TIMEOUT = '20000';
process.env.COUCH_LOG = 'my.log';
process.env.COUCH_RESUME = 'true';
process.env.COUCH_OUTPUT = 'myfile.txt';
process.env.COUCH_MODE = 'shallow';
process.env.CLOUDANT_IAM_API_KEY = 'ABC123-ZYX987_cba789-xyz321';
process.env.COUCH_QUIET = 'true';
});
it('should complete successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, []); // success
after('Reset process data', function() {
process.env = processEnvCopy;
process.argv = processArgvCopy;
});
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
afterEach(function() {
delete require.cache[require.resolve('commander')];
});
it('should terminate on a fatal error', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(401, { error: 'Unauthorized' }); // fatal error
describe('Backup command-line', function() {
it('respects the COUCH_URL env variable if the --url backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, process.env.COUCH_URL);
});
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Unauthorized');
assert.strictEqual(err.message, 'Access is denied due to invalid credentials.');
assert.ok(nock.isDone());
return true;
}
);
});
it('respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, process.env.COUCH_DATABASE);
});
it('should retry on transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(200, { ok: true }); // third time lucky success
it('respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, parseInt(process.env.COUCH_BUFFER_SIZE, 10));
});
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
}).timeout(longTestTimeout);
it('respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parseInt(process.env.COUCH_PARALLELISM, 10));
});
it('should fail after 3 transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(503, { error: 'Service Unavailable' }); // Final transient error
it('respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, parseInt(process.env.COUCH_REQUEST_TIMEOUT, 10));
});
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 : post ${dbUrl}/_bulk_docs - Error: Service Unavailable`);
assert.ok(nock.isDone());
return true;
}
);
}).timeout(longTestTimeout);
it('respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, process.env.CLOUDANT_IAM_API_KEY);
});
it('should restore shallow backups without rev info successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ ok: true, id: 'foo', rev: '1-abc' }]); // success
it('respects the COUCH_LOG env variable if the --log backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.log, 'string');
assert.strictEqual(program.log, process.env.COUCH_LOG);
});
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_old_shallow.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 11);
assert.ok(nock.isDone());
})]);
it('respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.resume, 'boolean');
assert.strictEqual(program.resume, true);
});
it('respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.output, 'string');
assert.strictEqual(program.output, process.env.COUCH_OUTPUT);
});
it('respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, process.env.COUCH_MODE);
});
it('respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
it('respects the backup --url command-line parameter', function() {
const url = 'http://user:pass@myurl2.com';
process.argv = ['node', 'test', '--url', url];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, url);
});
it('respects the backup --db command-line parameter', function() {
const db = 'mydb2';
process.argv = ['node', 'test', '--db', db];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, db);
});
it('respects the backup --buffer-size command-line parameter', function() {
const bufferSize = 500;
process.argv = ['node', 'test', '--buffer-size', bufferSize];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, bufferSize);
});
it('respects the backup --parallelism command-line parameter', function() {
const parallelism = 10;
process.argv = ['node', 'test', '--parallelism', parallelism];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parallelism);
});
it('respects the backup --request-timeout command-line parameter', function() {
const requestTimeout = 10000;
process.argv = ['node', 'test', '--request-timeout', requestTimeout];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, requestTimeout);
});
it('respects the backup --iam-api-key command-line parameter', function() {
const key = '123abc-789zyx_CBA987-XYZ321';
process.argv = ['node', 'test', '--iam-api-key', key];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, key);
});
it('respects the backup --log command-line parameter', function() {
const filename = 'my2.log';
process.argv = ['node', 'test', '--log', filename];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.log, 'string');
assert.strictEqual(program.log, filename);
});
it('respects the backup --resume command-line parameter', function() {
process.argv = ['node', 'test', '--resume'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.resume, 'boolean');
assert.strictEqual(program.resume, true);
});
it('respects the backup --output command-line parameter', function() {
const filename = 'myfile2.txt';
process.argv = ['node', 'test', '--output', filename];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.output, 'string');
assert.strictEqual(program.output, filename);
});
it('respects the backup --mode full command-line parameter', function() {
process.argv = ['node', 'test', '--mode', 'full'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, 'full');
});
it('respects the backup --mode shallow command-line parameter', function() {
process.argv = ['node', 'test', '--mode', 'shallow'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, 'shallow');
});
it('respects the backup --quiet command-line parameter', function() {
process.argv = ['node', 'test', '--quiet'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
});
it('should get a batch error for non-empty array response with new_edits false', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ id: 'foo', error: 'foo', reason: 'bar' }]);
describe('Restore command-line', function() {
it('respects the COUCH_URL env variable if the --url restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, process.env.COUCH_URL);
});
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Error');
assert.strictEqual(err.message, 'Error writing batch with new_edits:false and 1 items');
assert.ok(nock.isDone());
return true;
}
);
it('respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, process.env.COUCH_DATABASE);
});
it('respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, parseInt(process.env.COUCH_BUFFER_SIZE, 10));
});
it('respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parseInt(process.env.COUCH_PARALLELISM, 10));
});
it('respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, parseInt(process.env.COUCH_REQUEST_TIMEOUT, 10));
});
it('respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, process.env.CLOUDANT_IAM_API_KEY);
});
it('respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
it('respects the restore --url command-line parameter', function() {
const url = 'https://a:b@myurl3.com';
process.argv = ['node', 'test', '--url', url];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, url);
});
it('respects the restore --db command-line parameter', function() {
const db = 'mydb3';
process.argv = ['node', 'test', '--db', db];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, db);
});
it('respects the restore --buffer-size command-line parameter', function() {
const bufferSize = 250;
process.argv = ['node', 'test', '--buffer-size', bufferSize];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, bufferSize);
});
it('respects the restore --parallelism command-line parameter', function() {
const parallelism = 5;
process.argv = ['node', 'test', '--parallelism', parallelism];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parallelism);
});
it('respects the restore --request-timeout command-line parameter', function() {
const requestTimeout = 10000;
process.argv = ['node', 'test', '--request-timeout', requestTimeout];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, requestTimeout);
});
it('respects the restore --iam-api-key command-line parameter', function() {
const key = '123abc-789zyx_CBA987-XYZ321';
process.argv = ['node', 'test', '--iam-api-key', key];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, key);
});
it('respects the restore --quiet command-line parameter', function() {
process.argv = ['node', 'test', '--quiet'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
});
});

@@ -15,174 +15,200 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it */
/* global describe it beforeEach */
'use strict';
const assert = require('assert');
const backup = require('../app.js').backup;
const fs = require('fs');
const nock = require('nock');
const util = require('util');
const backupPromise = util.promisify(backup);
const request = require('../includes/request.js');
const error = require('../includes/error.js');
const goodUrl = 'http://localhost:5984/db';
// The real validateArgs function of app.js isn't
// exported - so we call the exported backup method
// instead. We don't get as far as a real backup when
// testing error cases. For success cases we nock the
// goodUrl and
const validateArgs = async function(url, opts, errorValidationForAssertRejects) {
const nullStream = fs.createWriteStream('/dev/null');
if (url === goodUrl) {
// Nock the goodUrl
nock(goodUrl).head('').reply(404, { error: 'not_found', reason: 'missing' });
}
return assert.rejects(backupPromise(url, nullStream, opts), errorValidationForAssertRejects);
};
const url = 'http://localhost:7777/testdb';
const db = request.client(url, { parallelism: 1 });
const timeoutDb = request.client(url, { parallelism: 1, requestTimeout: 500 });
const longTestTimeout = 3000;
const validateShallowModeArgs = async function(url, opts, msg) {
// We pass assertNoValidationError because for these shallow opts
// we are expecting only a stderr warning
return validateArgs(url, opts, assertNoValidationError()).then(() => {
// Assert the warning message was in stderr
assert(capturedStderr.indexOf(msg) > -1, 'Log warning message was not present');
beforeEach('Clean nock', function() {
nock.cleanAll();
});
describe('#unit Check request headers', function() {
it('should have a couchbackup user-agent', async function() {
const couch = nock(url)
.matchHeader('user-agent', /couchbackup-cloudant\/\d+\.\d+\.\d+(?:-SNAPSHOT)? \(Node.js v\d+\.\d+\.\d+\)/)
.head('/good')
.reply(200);
return db.service.headDocument({ db: db.db, docId: 'good' }).then(() => {
assert.ok(couch.isDone());
});
});
};
});
const stderrWriteFun = process.stderr.write;
let capturedStderr;
describe('#unit Check request response error callback', function() {
it('should not callback with error for 200 response', async function() {
const couch = nock(url)
.get('/good')
.reply(200, { ok: true });
function captureStderr() {
process.stderr.write = function(string, encoding, fd) {
capturedStderr += string;
};
}
return db.service.getDocument({ db: db.db, docId: 'good' }).then(response => {
assert.ok(response.result);
assert.ok(couch.isDone());
});
});
function releaseStderr() {
process.stderr.write = stderrWriteFun;
capturedStderr = null;
}
it('should callback with error after 3 500 responses', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.reply(500, function(uri, requestBody) {
this.req.response.statusMessage = 'Internal Server Error';
return { error: 'foo', reason: 'bar' };
});
// Return a validation object for use with assert.rejects
function assertErrorMessage(msg) {
return { name: 'InvalidOption', message: msg };
}
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `500 Internal Server Error: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
// For cases where validation should pass we reach a real backup that hits a 404
// mock for a DatabaseNotFound, so that it is the expected in the case assertNoValidationError
function assertNoValidationError() { return { name: 'DatabaseNotFound' }; }
it('should callback with error after 3 POST 503 responses', async function() {
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.times(3)
.reply(503, function(uri, requestBody) {
this.req.response.statusMessage = 'Service Unavailable';
return { error: 'service_unavailable', reason: 'Service unavailable' };
});
describe('#unit Validate arguments', function() {
it('returns error for invalid URL type', async function() {
return validateArgs(true, {}, assertErrorMessage('Invalid URL, must be type string'));
return assert.rejects(
db.service.postBulkGet({ db: db.db, revs: true, docs: [] }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 Service Unavailable: post ${url}/_bulk_get - Error: service_unavailable, Reason: Service unavailable`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should callback with error after 3 429 responses', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.reply(429, function(uri, requestBody) {
this.req.response.statusMessage = 'Too Many Requests';
return { error: 'foo', reason: 'bar' };
});
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `429 Too Many Requests: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should callback with fatal error for 404 response', async function() {
const couch = nock(url)
.get('/bad')
.reply(404, function(uri, requestBody) {
this.req.response.statusMessage = 'Not Found';
return { error: 'foo', reason: 'bar' };
});
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
err = error.convertResponseError(err);
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `404 Not Found: get ${url}/bad - Error: foo, Reason: bar`);
assert.ok(couch.isDone());
return true;
});
});
it('returns no error for valid URL type', async function() {
return validateArgs(goodUrl, {}, assertNoValidationError());
});
it('returns error for invalid (no host) URL', async function() {
return validateArgs('http://', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid (no protocol) URL', async function() {
return validateArgs('invalid', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid (wrong protocol) URL', async function() {
return validateArgs('ftp://invalid.example.com', {}, assertErrorMessage('Invalid URL protocol.'));
});
it('returns error for invalid (no path) URL', async function() {
return validateArgs('https://invalid.example.com', {}, assertErrorMessage('Invalid URL, missing path element (no database).'));
});
it('returns error for invalid (no protocol, no host) URL', async function() {
return validateArgs('invalid', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid buffer size type', async function() {
return validateArgs(goodUrl, { bufferSize: '123' }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero buffer size', async function() {
return validateArgs(goodUrl, { bufferSize: 0 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float buffer size', async function() {
return validateArgs(goodUrl, { bufferSize: 1.23 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid buffer size type', async function() {
return validateArgs(goodUrl, { bufferSize: 123 }, assertNoValidationError());
});
it('returns error for invalid log type', async function() {
return validateArgs(goodUrl, { log: true }, assertErrorMessage('Invalid log option, must be type string'));
});
it('returns no error for valid log type', async function() {
return validateArgs(goodUrl, { log: 'log.txt' }, assertNoValidationError());
});
it('returns error for invalid mode type', async function() {
return validateArgs(goodUrl, { mode: true }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"'));
});
it('returns error for invalid mode string', async function() {
return validateArgs(goodUrl, { mode: 'foobar' }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"'));
});
it('returns no error for valid mode type', async function() {
return validateArgs(goodUrl, { mode: 'full' }, assertNoValidationError());
});
it('returns error for invalid output type', async function() {
return validateArgs(goodUrl, { output: true }, assertErrorMessage('Invalid output option, must be type string'));
});
it('returns no error for valid output type', async function() {
return validateArgs(goodUrl, { output: 'output.txt' }, assertNoValidationError());
});
it('returns error for invalid parallelism type', async function() {
return validateArgs(goodUrl, { parallelism: '123' }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero parallelism', async function() {
return validateArgs(goodUrl, { parallelism: 0 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float parallelism', async function() {
return validateArgs(goodUrl, { parallelism: 1.23 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid parallelism type', async function() {
return validateArgs(goodUrl, { parallelism: 123 }, assertNoValidationError());
});
it('returns error for invalid request timeout type', async function() {
return validateArgs(goodUrl, { requestTimeout: '123' }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero request timeout', async function() {
return validateArgs(goodUrl, { requestTimeout: 0 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float request timout', async function() {
return validateArgs(goodUrl, { requestTimeout: 1.23 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid request timeout type', async function() {
return validateArgs(goodUrl, { requestTimeout: 123 }, assertNoValidationError());
});
it('returns error for invalid resume type', async function() {
return validateArgs(goodUrl, { resume: 'true' }, assertErrorMessage('Invalid resume option, must be type boolean'));
});
it('returns no error for valid resume type', async function() {
return validateArgs(goodUrl, { resume: false }, assertNoValidationError());
});
it('returns error for invalid key type', async function() {
return validateArgs(goodUrl, { iamApiKey: true }, assertErrorMessage('Invalid iamApiKey option, must be type string'));
});
it('returns error for key and URL credentials supplied', async function() {
return validateArgs('https://a:b@example.com/db', { iamApiKey: 'abc123' }, assertErrorMessage('URL user information must not be supplied when using IAM API key.'));
});
it('warns for log arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', log: 'test' },
'the options "log" and "resume" are invalid when using shallow mode.').finally(
() => {
releaseStderr();
it('should callback with same error for no status code error response', async function() {
const couch = nock(url)
.get('/bad')
.times(3)
.replyWithError('testing badness');
return assert.rejects(
db.service.getDocument({ db: db.db, docId: 'bad' }),
(err) => {
const err2 = error.convertResponseError(err);
assert.strictEqual(err, err2);
assert.ok(couch.isDone());
return true;
});
}).timeout(longTestTimeout);
it('should retry request if HTTP request gets timed out', async function() {
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.delay(1000)
.reply(200, { results: { docs: [{ id: '1', ok: { _id: '1' } }] } })
.post('/_bulk_get')
.query(true)
.reply(200, { results: { docs: [{ id: '1', ok: { _id: '1' } }, { id: '2', ok: { _id: '2' } }] } });
return timeoutDb.service.postBulkGet({ db: db.db, revs: true, docs: [] }).then((response) => {
assert.ok(response);
assert.ok(response.result);
assert.ok(response.result.results);
assert.ok(response.result.results.docs);
assert.strictEqual(response.result.results.docs.length, 2);
assert.ok(couch.isDone());
});
});
it('warns for resume arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', log: 'test', resume: true },
'the options "log" and "resume" are invalid when using shallow mode.').finally(
() => {
releaseStderr();
it('should callback with error code ESOCKETTIMEDOUT if 3 HTTP requests gets timed out', async function() {
// Increase the timeout for this test to allow for the delays
this.timeout(3000);
const couch = nock(url)
.post('/_bulk_get')
.query(true)
.delay(1000)
.times(3)
.reply(200, { ok: true });
return assert.rejects(
timeoutDb.service.postBulkGet({ db: db.db, revs: true, docs: [] }),
(err) => {
err = error.convertResponseError(err);
// Note axios returns ECONNABORTED rather than ESOCKETTIMEDOUT
// See https://github.com/axios/axios/issues/2710 via https://github.com/axios/axios/issues/1543`
assert.strictEqual(err.statusText, 'ECONNABORTED');
assert.strictEqual(err.message, `timeout of 500ms exceeded: post ${url}/_bulk_get ECONNABORTED`);
assert.ok(couch.isDone());
return true;
});
});
it('warns for parallelism arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', parallelism: 10 },
'the option "parallelism" has no effect when using shallow mode.').finally(
() => {
releaseStderr();
describe('#unit Check credentials', async function() {
it('should properly decode username and password', async function() {
const username = 'user%123';
const password = 'colon:at@321';
const url = `http://${encodeURIComponent(username)}:${encodeURIComponent(password)}@localhost:7777/testdb`;
const sessionUrl = 'http://localhost:7777';
const couch = nock(sessionUrl)
.post('/_session', { username: username, password: password })
.reply(200, { ok: true }, { 'Set-Cookie': 'AuthSession=ABC123DEF4356;' })
.get('/')
.reply(200);
const db = request.client(url, { parallelism: 1 });
return db.service.getServerInformation().then(response => {
assert.ok(response);
assert.ok(couch.isDone());
});
});
});
});

@@ -1,3 +0,2 @@

#!/usr/bin/env node
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -15,68 +14,102 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// limitations under the License.
/* global after before describe */
'use strict';
const error = require('../includes/error.js');
const fs = require('fs');
const cliutils = require('../includes/cliutils.js');
const couchbackup = require('../app.js');
const parser = require('../includes/parser.js');
const debug = require('debug');
const backupDebug = debug('couchbackup:backup');
const backupBatchDebug = debug('couchbackup:backup:batch');
const url = require('url');
const toxy = require('toxy');
// Import the common hooks
require('../test/hooks.js');
backupDebug.enabled = true;
const tpoisons = toxy.poisons;
const trules = toxy.rules;
try {
const program = parser.parseBackupArgs();
const databaseUrl = cliutils.databaseUrl(program.url, program.db);
function setupProxy(poison) {
const backendUrl = new url.URL(process.env.COUCH_BACKEND_URL);
const proxy = toxy({
auth: `${backendUrl.username}:${backendUrl.password}`,
changeOrigin: true
});
const opts = {
bufferSize: program.bufferSize,
log: program.log,
mode: program.mode,
parallelism: program.parallelism,
requestTimeout: program.requestTimeout,
resume: program.resume,
iamApiKey: program.iamApiKey,
iamTokenUrl: program.iamTokenUrl
};
// Forward traffic to DB
proxy.forward(process.env.COUCH_BACKEND_URL);
// log configuration to console
console.error('='.repeat(80));
console.error('Performing backup on ' + databaseUrl.replace(/\/\/.+@/g, '//****:****@') + ' using configuration:');
console.error(JSON.stringify(opts, null, 2).replace(/"iamApiKey": "[^"]+"/, '"iamApiKey": "****"'));
console.error('='.repeat(80));
switch (poison) {
case 'normal':
// No poisons to add
break;
case 'bandwidth-limit':
// https://github.com/h2non/toxy#bandwidth
// Note the implementation of bandwidth is simplistic and the threshold
// delay is applied to every write of the buffer, so use the smallest
// delay possible and adjust the rate using the bytes size instead.
proxy
.poison(tpoisons.bandwidth({ bytes: 512, threshold: 1 })); // 0.5 MB/s
break;
case 'latency':
// https://github.com/h2non/toxy#latency
proxy
.poison(tpoisons.latency({ max: 1500, min: 250 }))
.withRule(trules.probability(60));
break;
case 'slow-read':
// https://github.com/h2non/toxy#slow-read
// Note this only impacts read of data from requests so only for non-GET
// In practice this means that it impacts restore much more than backup
// since although backup POSTs to _bulk_get the content is much smaller
// than what is POSTed to _bulk_docs for a restore.
// Similarly to bandwidth-limit use a 1 ms threshold
proxy
.poison(tpoisons.slowRead({ chunk: 256, threshold: 1 }))
// Slow read for 10 % of the time e.g. 10 ms in every 100
.withRule(trules.timeThreshold({ duration: 10, period: 100 }));
break;
case 'rate-limit':
// https://github.com/h2non/toxy#rate-limit
// Simulate the Cloudant free plan with 20 lookups ps and 10 writes ps
proxy.post('/*/_bulk_get')
.poison(tpoisons.rateLimit({ limit: 20, threshold: 1000 }));
proxy.post('/*/_bulk_docs')
.poison(tpoisons.rateLimit({ limit: 10, threshold: 1000 }));
break;
default:
throw Error('Unknown toxy poison ' + poison);
}
backupBatchDebug.enabled = !program.quiet;
// Catch remaining traffic
proxy.all('/*');
return proxy;
}
let ws = process.stdout;
const poisons = [
'normal',
'bandwidth-limit',
'latency',
'slow-read',
'rate-limit'
];
// open output file
if (program.output) {
let flags = 'w';
if (program.log && program.resume) {
flags = 'a';
}
const fd = fs.openSync(program.output, flags);
ws = fs.createWriteStream(null, { fd });
}
poisons.forEach(function(poison) {
describe('unreliable network tests (using toxy poison ' + poison + ')', function() {
let proxy;
backupDebug('Fetching all database changes...');
before('start toxy server', function() {
proxy = setupProxy(poison);
console.log('Using toxy poison ' + poison);
return couchbackup.backup(
databaseUrl,
ws,
opts,
error.terminationCallback
).on('changes', function(batch) {
backupBatchDebug('Total batches received:', batch + 1);
}).on('written', function(obj) {
backupBatchDebug('Written batch ID:', obj.batch, 'Total document revisions written:', obj.total, 'Time:', obj.time);
}).on('error', function(e) {
backupDebug('ERROR', e);
}).on('finished', function(obj) {
backupDebug('Finished - Total document revisions written:', obj.total);
// For these tests COUCH_URL points to the toxy proxy on localhost whereas
// COUCH_BACKEND_URL is the real CouchDb instance.
const toxyUrl = new url.URL(process.env.COUCH_URL);
// Listen on the specified hostname only, so if using localhost we don't
// need external connections.
proxy.listen(toxyUrl.port, toxyUrl.hostname);
});
after('stop toxy server', function() {
proxy.close();
});
delete require.cache[require.resolve('../test/ci_e2e.js')];
require('../test/ci_e2e.js');
});
} catch (err) {
error.terminationCallback(err);
}
});

@@ -16,115 +16,164 @@ // Copyright © 2017, 2021 IBM Corp. All rights reserved.

const cliutils = require('./cliutils.js');
const config = require('./config.js');
const error = require('./error.js');
const path = require('path');
const pkg = require('../package.json');
const stream = require('stream');
const { CloudantV1, CouchdbSessionAuthenticator } = require('@ibm-cloud/cloudant');
const { IamAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const retryPlugin = require('retry-axios');
function parseBackupArgs() {
const program = require('commander');
const userAgent = 'couchbackup-cloudant/' + pkg.version + ' (Node.js ' +
process.version + ')';
// Option CLI defaults
const defaults = config.cliDefaults();
// Class for streaming _changes error responses into
// In general the response is a small error/reason JSON object
// so it is OK to have this in memory.
class ResponseWriteable extends stream.Writable {
constructor(options) {
super(options);
this.data = [];
}
// Options set by environment variables
const envVarOptions = {};
config.applyEnvironmentVariables(envVarOptions);
program
.version(pkg.version)
.description('Backup a CouchDB/Cloudant database to a backup text file.')
.usage('[options...]')
.option('-b, --buffer-size <n>',
cliutils.getUsage('number of documents fetched at once', defaults.bufferSize),
Number)
.option('-d, --db <db>',
cliutils.getUsage('name of the database to backup', defaults.db))
.option('-k, --iam-api-key <API key>',
cliutils.getUsage('IAM API key to access the Cloudant server'))
.option('-l, --log <file>',
cliutils.getUsage('file to store logging information during backup; invalid in "shallow" mode', 'a temporary file'),
path.normalize)
.option('-m, --mode <mode>',
cliutils.getUsage('"shallow" if only a superficial backup is done (ignoring conflicts and revision tokens), else "full" for complete backup', defaults.mode),
(mode) => { return mode.toLowerCase(); })
.option('-o, --output <file>',
cliutils.getUsage('file name to store the backup data', 'stdout'),
path.normalize)
.option('-p, --parallelism <n>',
cliutils.getUsage('number of HTTP requests to perform in parallel when performing a backup; ignored in "shallow" mode', defaults.parallelism),
Number)
.option('-q, --quiet',
cliutils.getUsage('suppress batch messages', defaults.quiet))
.option('-r, --resume',
cliutils.getUsage('continue a previous backup from its last known position; invalid in "shallow" mode', defaults.resume))
.option('-t, --request-timeout <n>',
cliutils.getUsage('milliseconds to wait for a response to a HTTP request before retrying the request', defaults.requestTimeout),
Number)
.option('-u, --url <url>',
cliutils.getUsage('URL of the CouchDB/Cloudant server', defaults.url))
.parse(process.argv);
// Remove defaults that don't apply when using shallow mode
if (program.opts().mode === 'shallow' || envVarOptions.mode === 'shallow') {
delete defaults.parallelism;
delete defaults.log;
delete defaults.resume;
_write(chunk, encoding, callback) {
this.data.push(chunk);
callback();
}
// Apply the options in order so that the CLI overrides env vars and env variables
// override defaults.
const opts = Object.assign({}, defaults, envVarOptions, program.opts());
if (opts.resume && (opts.log === defaults.log)) {
// If resuming and the log file arg is the newly generated tmp name from defaults then we know that --log wasn't specified.
// We have to do this check here for the CLI case because of the default.
error.terminationCallback(new error.BackupError('NoLogFileName', 'To resume a backup, a log file must be specified'));
stringBody() {
return Buffer.concat(this.data).toString();
}
return opts;
}
function parseRestoreArgs() {
const program = require('commander');
// An interceptor function to help augment error bodies with a little
// extra information so we can continue to use consistent messaging
// after the ugprade to @ibm-cloud/cloudant
const errorHelper = async function(err) {
let method;
let requestUrl;
if (err.response) {
if (err.response.config.url) {
requestUrl = err.response.config.url;
method = err.response.config.method;
}
// Override the status text with an improved message
let errorMsg = `${err.response.status} ${err.response.statusText || ''}: ` +
`${method} ${requestUrl}`;
if (err.response.data) {
// Check if we have a JSON response and try to get the error/reason
if (err.response.headers['content-type'] === 'application/json') {
if (!err.response.data.error && err.response.data.pipe) {
// If we didn't find a JSON object with `error` then we might have a stream response.
// Detect the stream by the presence of `pipe` and use it to get the body and parse
// the error information.
const p = new Promise((resolve, reject) => {
const errorBody = new ResponseWriteable();
err.response.data.pipe(errorBody)
.on('finish', () => { resolve(JSON.parse(errorBody.stringBody())); })
.on('error', () => { reject(err); });
});
// Replace the stream on the response with the parsed object
err.response.data = await p;
}
// Append the error/reason if available
if (err.response.data.error) {
// Override the status text with our more complete message
errorMsg += ` - Error: ${err.response.data.error}`;
if (err.response.data.reason) {
errorMsg += `, Reason: ${err.response.data.reason}`;
}
}
} else {
errorMsg += err.response.data;
}
// Set a new message for use by the node-sdk-core
// We use the errors array because it gets processed
// ahead of all other service errors.
err.response.data.errors = [{ message: errorMsg }];
}
} else if (err.request) {
if (!err.message.includes(err.config.url)) {
// Augment the message with the URL and method
// but don't do it again if we already have the URL.
err.message = `${err.message}: ${err.config.method} ${err.config.url}`;
}
}
return Promise.reject(err);
};
// Option CLI defaults
const defaults = config.cliDefaults();
module.exports = {
client: function(rawUrl, opts) {
const url = new URL(rawUrl);
// Split the URL to separate service from database
// Use origin as the "base" to remove auth elements
const actUrl = new URL(url.pathname.substring(0, url.pathname.lastIndexOf('/')), url.origin);
const dbName = url.pathname.substring(url.pathname.lastIndexOf('/') + 1);
let authenticator;
// Default to cookieauth unless an IAM key is provided
if (opts.iamApiKey) {
const iamAuthOpts = { apikey: opts.iamApiKey };
if (opts.iamTokenUrl) {
iamAuthOpts.url = opts.iamTokenUrl;
}
authenticator = new IamAuthenticator(iamAuthOpts);
} else if (url.username) {
authenticator = new CouchdbSessionAuthenticator({
username: decodeURIComponent(url.username),
password: decodeURIComponent(url.password)
});
} else {
authenticator = new NoAuthAuthenticator();
}
const serviceOpts = {
authenticator: authenticator,
timeout: opts.requestTimeout,
// Axios performance options
maxContentLength: -1
};
// Options set by environment variables
const envVarOptions = {};
config.applyEnvironmentVariables(envVarOptions);
const service = new CloudantV1(serviceOpts);
// Configure retries
const maxRetries = 2; // for 3 total attempts
service.getHttpClient().defaults.raxConfig = {
// retries for status codes
retry: maxRetries,
// retries for non-response e.g. ETIMEDOUT
noResponseRetries: maxRetries,
backoffType: 'exponential',
httpMethodsToRetry: ['GET', 'HEAD', 'POST'],
statusCodesToRetry: [
[429, 429],
[500, 599]
],
shouldRetry: err => {
const cfg = retryPlugin.getConfig(err);
// cap at max retries regardless of response/non-response type
if (cfg.currentRetryAttempt >= maxRetries) {
return false;
} else {
return retryPlugin.shouldRetryRequest(err);
}
},
instance: service.getHttpClient()
};
retryPlugin.attach(service.getHttpClient());
program
.version(pkg.version)
.description('Restore a CouchDB/Cloudant database from a backup text file.')
.usage('[options...]')
.option('-b, --buffer-size <n>',
cliutils.getUsage('number of documents restored at once', defaults.bufferSize),
Number)
.option('-d, --db <db>',
cliutils.getUsage('name of the new, existing database to restore to', defaults.db))
.option('-k, --iam-api-key <API key>',
cliutils.getUsage('IAM API key to access the Cloudant server'))
.option('-p, --parallelism <n>',
cliutils.getUsage('number of HTTP requests to perform in parallel when restoring a backup', defaults.parallelism),
Number)
.option('-q, --quiet',
cliutils.getUsage('suppress batch messages', defaults.quiet))
.option('-t, --request-timeout <n>',
cliutils.getUsage('milliseconds to wait for a response to a HTTP request before retrying the request', defaults.requestTimeout),
Number)
.option('-u, --url <url>',
cliutils.getUsage('URL of the CouchDB/Cloudant server', defaults.url))
.parse(process.argv);
service.setServiceUrl(actUrl.toString());
if (authenticator instanceof CouchdbSessionAuthenticator) {
// Awkward workaround for known Couch issue with compression on _session requests
// It is not feasible to disable compression on all requests with the amount of
// data this lib needs to move, so override the property in the tokenManager instance.
authenticator.tokenManager.requestWrapperInstance.compressRequestData = false;
}
if (authenticator.tokenManager && authenticator.tokenManager.requestWrapperInstance) {
authenticator.tokenManager.requestWrapperInstance.axiosInstance.interceptors.response.use(null, errorHelper);
}
// Add error interceptors to put URLs in error messages
service.getHttpClient().interceptors.response.use(null, errorHelper);
// Apply the options in order so that the CLI overrides env vars and env variables
// override defaults.
const opts = Object.assign({}, defaults, envVarOptions, program.opts());
// Add request interceptor to add user-agent (adding it with custom request headers gets overwritten)
service.getHttpClient().interceptors.request.use(function(requestConfig) {
requestConfig.headers['User-Agent'] = userAgent;
return requestConfig;
}, null);
return opts;
}
module.exports = {
parseBackupArgs: parseBackupArgs,
parseRestoreArgs: parseRestoreArgs
return { service: service, db: dbName, url: actUrl.toString() };
}
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2022 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -16,97 +16,150 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const fs = require('fs');
const liner = require('./liner.js');
const change = require('./change.js');
const async = require('async');
const stream = require('stream');
const error = require('./error.js');
const debug = require('debug')('couchbackup:spoolchanges');
const debug = require('debug')('couchbackup:writer');
/**
* Write log file for all changes from a database, ready for downloading
* in batches.
*
* @param {string} dbUrl - URL of database
* @param {string} log - path to log file to use
* @param {number} bufferSize - the number of changes per batch/log line
* @param {function(err)} callback - a callback to run on completion
*/
module.exports = function(db, log, bufferSize, ee, callback) {
// list of document ids to process
const buffer = [];
let batch = 0;
let lastSeq = null;
const logStream = fs.createWriteStream(log);
let pending = 0;
// The number of changes to fetch per request
const limit = 100000;
module.exports = function(db, bufferSize, parallelism, ee) {
const writer = new stream.Transform({ objectMode: true });
let buffer = [];
let written = 0;
let linenumber = 0;
// send documents ids to the queue in batches of bufferSize + the last batch
const processBuffer = function(lastOne) {
if (buffer.length >= bufferSize || (lastOne && buffer.length > 0)) {
debug('writing', buffer.length, 'changes to the backup file');
const b = { docs: buffer.splice(0, bufferSize), batch: batch };
logStream.write(':t batch' + batch + ' ' + JSON.stringify(b.docs) + '\n');
ee.emit('changes', batch);
batch++;
// this is the queue of chunks that are written to the database
// the queue's payload will be an array of documents to be written,
// the size of the array will be bufferSize. The variable parallelism
// determines how many HTTP requests will occur at any one time.
const q = async.queue(function(payload, cb) {
// if we are restoring known revisions, we need to supply new_edits=false
if (payload.docs && payload.docs[0] && payload.docs[0]._rev) {
payload.new_edits = false;
debug('Using new_edits false mode.');
}
};
// called once per received change
const onChange = function(c) {
if (c) {
if (c.error) {
ee.emit('error', new error.BackupError('InvalidChange', `Received invalid change: ${c}`));
} else if (c.changes) {
const obj = { id: c.id };
buffer.push(obj);
processBuffer(false);
} else if (c.last_seq) {
lastSeq = c.last_seq;
pending = c.pending;
if (!didError) {
db.service.postBulkDocs({
db: db.db,
bulkDocs: payload
}).then(response => {
if (!response.result || (payload.new_edits === false && response.result.length > 0)) {
throw new Error(`Error writing batch with new_edits:${payload.new_edits !== false}` +
` and ${response.result ? response.result.length : 'unavailable'} items`);
}
written += payload.docs.length;
writer.emit('restored', { documents: payload.docs.length, total: written });
cb();
}).catch(err => {
err = error.convertResponseError(err);
debug(`Error writing docs ${err.name} ${err.message}`);
cb(err, payload);
});
}
}, parallelism);
let didError = false;
// write the contents of the buffer to CouchDB in blocks of bufferSize
function processBuffer(flush, callback) {
function taskCallback(err, payload) {
if (err && !didError) {
debug(`Queue task failed with error ${err.name}`);
didError = true;
q.kill();
writer.emit('error', err);
}
}
};
function getChanges(since = 0) {
debug('making changes request since ' + since);
return db.service.postChangesAsStream({ db: db.db, since: since, limit: limit, seqInterval: limit })
.then(response => {
response.result.pipe(liner())
.on('error', function(err) {
logStream.end();
callback(err);
})
.pipe(change(onChange))
.on('error', function(err) {
logStream.end();
callback(err);
})
.on('finish', function() {
processBuffer(true);
if (!lastSeq) {
logStream.end();
debug('changes request terminated before last_seq was sent');
callback(new error.BackupError('SpoolChangesError', 'Changes request terminated before last_seq was sent'));
} else {
debug(`changes request completed with last_seq: ${lastSeq} and ${pending} changes pending.`);
if (pending > 0) {
// Return the next promise
return getChanges(lastSeq);
} else {
debug('finished streaming database changes');
logStream.end(':changes_complete ' + lastSeq + '\n', 'utf8', callback);
}
}
});
})
.catch(err => {
logStream.end();
if (err.status && err.status >= 400) {
callback(error.convertResponseError(err));
} else if (err.name !== 'SpoolChangesError') {
callback(new error.BackupError('SpoolChangesError', `Failed changes request - ${err.message}`));
}
});
if (flush || buffer.length >= bufferSize) {
// work through the buffer to break off bufferSize chunks
// and feed the chunks to the queue
do {
// split the buffer into bufferSize chunks
const toSend = buffer.splice(0, bufferSize);
// and add the chunk to the queue
debug(`Adding ${toSend.length} to the write queue.`);
q.push({ docs: toSend }, taskCallback);
} while (buffer.length >= bufferSize);
// send any leftover documents to the queue
if (flush && buffer.length > 0) {
debug(`Adding remaining ${buffer.length} to the write queue.`);
q.push({ docs: buffer }, taskCallback);
}
// wait until the queue size falls to a reasonable level
async.until(
// wait until the queue length drops to twice the paralellism
// or until empty on the last write
function(callback) {
// if we encountered an error, stop this until loop
if (didError) {
return callback(null, true);
}
if (flush) {
callback(null, q.idle() && q.length() === 0);
} else {
callback(null, q.length() <= parallelism * 2);
}
},
function(cb) {
setTimeout(cb, 20);
},
function() {
if (flush && !didError) {
writer.emit('finished', { total: written });
}
// callback when we're happy with the queue size
callback();
});
} else {
callback();
}
}
getChanges();
// take an object
writer._transform = function(obj, encoding, done) {
// each obj that arrives here is a line from the backup file
// it should contain an array of objects. The length of the array
// depends on the bufferSize at backup time.
linenumber++;
if (!didError && obj !== '') {
// see if it parses as JSON
try {
const arr = JSON.parse(obj);
// if it's an array with a length
if (typeof arr === 'object' && arr.length > 0) {
// push each document into a buffer
buffer = buffer.concat(arr);
// pause the stream
// it's likely that the speed with which data can be read from disk
// may exceed the rate it can be written to CouchDB. To prevent
// the whole file being buffered in memory, we pause the stream here.
// it is resumed, when processBuffer calls back and we call done()
this.pause();
// break the buffer in to bufferSize chunks to be written to the database
processBuffer(false, done);
} else {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - not an array`));
done();
}
} catch (e) {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - cannot parse as JSON`));
// Could be an incomplete write that was subsequently resumed
done();
}
} else {
done();
}
};
// called when we need to flush everything
writer._flush = function(done) {
processBuffer(true, done);
};
return writer;
};

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2022 IBM Corp. All rights reserved.
//

@@ -16,27 +16,97 @@ // Licensed under the Apache License, Version 2.0 (the "License");

// stolen from http://strongloop.com/strongblog/practical-examples-of-the-new-node-js-streams-api/
const stream = require('stream');
const fs = require('fs');
const liner = require('./liner.js');
const change = require('./change.js');
const error = require('./error.js');
const debug = require('debug')('couchbackup:spoolchanges');
module.exports = function(onChange) {
const change = new stream.Transform({ objectMode: true });
/**
* Write log file for all changes from a database, ready for downloading
* in batches.
*
* @param {string} dbUrl - URL of database
* @param {string} log - path to log file to use
* @param {number} bufferSize - the number of changes per batch/log line
* @param {function(err)} callback - a callback to run on completion
*/
module.exports = function(db, log, bufferSize, ee, callback) {
// list of document ids to process
const buffer = [];
let batch = 0;
let lastSeq = null;
const logStream = fs.createWriteStream(log);
let pending = 0;
// The number of changes to fetch per request
const limit = 100000;
change._transform = function(line, encoding, done) {
let obj = null;
// send documents ids to the queue in batches of bufferSize + the last batch
const processBuffer = function(lastOne) {
if (buffer.length >= bufferSize || (lastOne && buffer.length > 0)) {
debug('writing', buffer.length, 'changes to the backup file');
const b = { docs: buffer.splice(0, bufferSize), batch: batch };
logStream.write(':t batch' + batch + ' ' + JSON.stringify(b.docs) + '\n');
ee.emit('changes', batch);
batch++;
}
};
// one change per line - remove the trailing comma
line = line.trim().replace(/,$/, '');
// extract thee last_seq at the end of the changes feed
if (line.match(/^"last_seq":/)) {
line = '{' + line;
// called once per received change
const onChange = function(c) {
if (c) {
if (c.error) {
ee.emit('error', new error.BackupError('InvalidChange', `Received invalid change: ${c}`));
} else if (c.changes) {
const obj = { id: c.id };
buffer.push(obj);
processBuffer(false);
} else if (c.last_seq) {
lastSeq = c.last_seq;
pending = c.pending;
}
}
try {
obj = JSON.parse(line);
} catch (e) {
}
onChange(obj);
done();
};
return change;
function getChanges(since = 0) {
debug('making changes request since ' + since);
return db.service.postChangesAsStream({ db: db.db, since: since, limit: limit, seqInterval: limit })
.then(response => {
response.result.pipe(liner())
.on('error', function(err) {
logStream.end();
callback(err);
})
.pipe(change(onChange))
.on('error', function(err) {
logStream.end();
callback(err);
})
.on('finish', function() {
processBuffer(true);
if (!lastSeq) {
logStream.end();
debug('changes request terminated before last_seq was sent');
callback(new error.BackupError('SpoolChangesError', 'Changes request terminated before last_seq was sent'));
} else {
debug(`changes request completed with last_seq: ${lastSeq} and ${pending} changes pending.`);
if (pending > 0) {
// Return the next promise
return getChanges(lastSeq);
} else {
debug('finished streaming database changes');
logStream.end(':changes_complete ' + lastSeq + '\n', 'utf8', callback);
}
}
});
})
.catch(err => {
logStream.end();
if (err.status && err.status >= 400) {
callback(error.convertResponseError(err));
} else if (err.name !== 'SpoolChangesError') {
callback(new error.BackupError('SpoolChangesError', `Failed changes request - ${err.message}`));
}
});
}
getChanges();
};

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2021 IBM Corp. All rights reserved.
//

@@ -16,61 +16,104 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const fs = require('fs');
const stream = require('stream');
const liner = require('./liner.js');
const path = require('path');
const tmp = require('tmp');
const onLine = function(onCommand, batches) {
const change = new stream.Transform({ objectMode: true });
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
/**
Return API default settings.
*/
function apiDefaults() {
return {
parallelism: 5,
bufferSize: 500,
requestTimeout: 120000,
log: tmp.tmpNameSync(),
resume: false,
mode: 'full'
};
}
let matches;
/**
Return CLI default settings.
*/
function cliDefaults() {
const defaults = apiDefaults();
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
// add additional legacy settings
defaults.db = 'test';
defaults.url = 'http://localhost:5984';
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
// add CLI only option
defaults.quiet = false;
// if this is one we want
if (obj.command === 't' && batches.indexOf(obj.batch) > -1) {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
onCommand(obj);
}
}
done();
};
return change;
};
return defaults;
}
module.exports = function(log, batches, callback) {
// our sense of state
const retval = { };
/**
Override settings **in-place** with environment variables.
*/
function applyEnvironmentVariables(opts) {
// if we have a custom CouchDB url
if (typeof process.env.COUCH_URL !== 'undefined') {
opts.url = process.env.COUCH_URL;
}
// called with each line from the log file
const onCommand = function(obj) {
retval[obj.batch] = obj;
};
// if we have a specified databases
if (typeof process.env.COUCH_DATABASE !== 'undefined') {
opts.db = process.env.COUCH_DATABASE;
}
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, batches))
.on('error', function(err) {
callback(err);
})
.on('finish', function() {
callback(null, retval);
});
// if we have a specified buffer size
if (typeof process.env.COUCH_BUFFER_SIZE !== 'undefined') {
opts.bufferSize = parseInt(process.env.COUCH_BUFFER_SIZE);
}
// if we have a specified parallelism
if (typeof process.env.COUCH_PARALLELISM !== 'undefined') {
opts.parallelism = parseInt(process.env.COUCH_PARALLELISM);
}
// if we have a specified request timeout
if (typeof process.env.COUCH_REQUEST_TIMEOUT !== 'undefined') {
opts.requestTimeout = parseInt(process.env.COUCH_REQUEST_TIMEOUT);
}
// if we have a specified log file
if (typeof process.env.COUCH_LOG !== 'undefined') {
opts.log = path.normalize(process.env.COUCH_LOG);
}
// if we are instructed to resume
if (typeof process.env.COUCH_RESUME !== 'undefined' && process.env.COUCH_RESUME === 'true') {
opts.resume = true;
}
// if we are given an output filename
if (typeof process.env.COUCH_OUTPUT !== 'undefined') {
opts.output = path.normalize(process.env.COUCH_OUTPUT);
}
// if we only want a shallow copy
if (typeof process.env.COUCH_MODE !== 'undefined' && process.env.COUCH_MODE === 'shallow') {
opts.mode = 'shallow';
}
// if we are instructed to be quiet
if (typeof process.env.COUCH_QUIET !== 'undefined' && process.env.COUCH_QUIET === 'true') {
opts.quiet = true;
}
// if we have a specified API key
if (typeof process.env.CLOUDANT_IAM_API_KEY !== 'undefined') {
opts.iamApiKey = process.env.CLOUDANT_IAM_API_KEY;
}
// if we have a specified IAM token endpoint
if (typeof process.env.CLOUDANT_IAM_TOKEN_URL !== 'undefined') {
opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;
}
}
module.exports = {
apiDefaults: apiDefaults,
cliDefaults: cliDefaults,
applyEnvironmentVariables: applyEnvironmentVariables
};

@@ -15,3 +15,3 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it */
/* global describe it beforeEach */
'use strict';

@@ -21,83 +21,116 @@

const fs = require('fs');
const nock = require('nock');
const request = require('../includes/request.js');
const writer = require('../includes/writer.js');
const noopEmitter = new (require('events')).EventEmitter();
const liner = require('../includes/liner.js');
const { once } = require('node:events');
const u = require('./citestutils.js');
const { pipeline } = require('node:stream/promises');
const longTestTimeout = 3000;
[{ useApi: true }, { useApi: false }].forEach(function(params) {
describe(u.scenario('Resume tests', params), function() {
it('should create a log file', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 60);
describe('#unit Check database restore writer', function() {
const dbUrl = 'http://localhost:5984/animaldb';
const db = request.client(dbUrl, { parallelism: 1 });
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
const p = u.p(params, { opts: { log: logFile } });
return u.testBackupToFile(p, 'animaldb', actualBackup).then(() => {
assert.ok(fs.existsSync(logFile), 'The log file should exist.');
});
});
beforeEach('Reset nocks', function() {
nock.cleanAll();
});
it('should restore corrupted animaldb to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_corrupted.json');
const dbName = this.dbName;
const p = u.p(params, { expectedRestoreErrorRecoverable: { name: 'BackupFileJsonError' } });
return once(input, 'open')
.then(() => {
return u.testRestore(p, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
it('should complete successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, []); // success
it('should restore resumed animaldb with blank line to a database correctly', async function() {
// Allow up to 60 s to restore and compare (again it should be faster)!
u.setTimeout(this, 60);
const input = fs.createReadStream('./test/fixtures/animaldb_resumed_blank.json');
const dbName = this.dbName;
return once(input, 'open')
.then(() => {
return u.testRestore(params, input, dbName);
}).then(() => {
return u.dbCompare('animaldb', dbName);
});
});
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
});
});
describe('Resume tests', function() {
// Currently cannot abort API backups, when we do this test should be run for
// both API and CLI
it('should correctly backup and restore backup10m', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 90);
it('should terminate on a fatal error', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(401, { error: 'Unauthorized' }); // fatal error
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
// Use abort parameter to terminate the backup
const p = u.p(params, { abort: true }, { opts: { log: logFile } });
const restoreDb = this.dbName;
// Set the database doc count as fewer than this should be written during
// resumed backup.
p.exclusiveMaxExpected = 5096;
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Unauthorized');
assert.strictEqual(err.message, 'Access is denied due to invalid credentials.');
assert.ok(nock.isDone());
return true;
}
);
});
return u.testBackupAbortResumeRestore(p, 'backup10m', actualBackup, restoreDb);
it('should retry on transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(200, { ok: true }); // third time lucky success
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 15);
assert.ok(nock.isDone());
})]);
}).timeout(longTestTimeout);
it('should fail after 3 transient errors', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(429, { error: 'Too Many Requests' }) // transient error
.post('/_bulk_docs')
.reply(500, { error: 'Internal Server Error' }) // transient error
.post('/_bulk_docs')
.reply(503, { error: 'Service Unavailable' }); // Final transient error
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `503 : post ${dbUrl}/_bulk_docs - Error: Service Unavailable`);
assert.ok(nock.isDone());
return true;
}
);
}).timeout(longTestTimeout);
it('should restore shallow backups without rev info successfully', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ ok: true, id: 'foo', rev: '1-abc' }]); // success
const w = writer(db, 500, 1, noopEmitter);
return Promise.all([pipeline(fs.createReadStream('./test/fixtures/animaldb_old_shallow.json'), liner(), w),
once(w, 'finished').then((data) => {
assert.strictEqual(data[0].total, 11);
assert.ok(nock.isDone());
})]);
});
// Note --output is only valid for CLI usage, this test should only run for CLI
const params = { useApi: false };
it('should correctly backup and restore backup10m using --output', async function() {
// Allow up to 90 s for this test
u.setTimeout(this, 90);
const actualBackup = `./${this.fileName}`;
const logFile = `./${this.fileName}` + '.log';
// Use abort parameter to terminate the backup
const p = u.p(params, { abort: true }, { opts: { output: actualBackup, log: logFile } });
const restoreDb = this.dbName;
// Set the database doc count as fewer than this should be written during
// resumed backup.
p.exclusiveMaxExpected = 5096;
it('should get a batch error for non-empty array response with new_edits false', async function() {
nock(dbUrl)
.post('/_bulk_docs')
.reply(200, [{ id: 'foo', error: 'foo', reason: 'bar' }]);
return await u.testBackupAbortResumeRestore(p, 'backup10m', actualBackup, restoreDb);
const w = writer(db, 500, 1, noopEmitter);
return assert.rejects(
pipeline(fs.createReadStream('./test/fixtures/animaldb_expected.json'), liner(), w),
(err) => {
assert.strictEqual(err.name, 'Error');
assert.strictEqual(err.message, 'Error writing batch with new_edits:false and 1 items');
assert.ok(nock.isDone());
return true;
}
);
});
});

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -16,164 +16,61 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const pkg = require('../package.json');
const fs = require('fs');
const stream = require('stream');
const { CloudantV1, CouchdbSessionAuthenticator } = require('@ibm-cloud/cloudant');
const { IamAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const retryPlugin = require('retry-axios');
const liner = require('./liner.js');
const userAgent = 'couchbackup-cloudant/' + pkg.version + ' (Node.js ' +
process.version + ')';
const onLine = function(onCommand, batches) {
const change = new stream.Transform({ objectMode: true });
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
// Class for streaming _changes error responses into
// In general the response is a small error/reason JSON object
// so it is OK to have this in memory.
class ResponseWriteable extends stream.Writable {
constructor(options) {
super(options);
this.data = [];
}
let matches;
_write(chunk, encoding, callback) {
this.data.push(chunk);
callback();
}
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
stringBody() {
return Buffer.concat(this.data).toString();
}
}
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
// An interceptor function to help augment error bodies with a little
// extra information so we can continue to use consistent messaging
// after the ugprade to @ibm-cloud/cloudant
const errorHelper = async function(err) {
let method;
let requestUrl;
if (err.response) {
if (err.response.config.url) {
requestUrl = err.response.config.url;
method = err.response.config.method;
}
// Override the status text with an improved message
let errorMsg = `${err.response.status} ${err.response.statusText || ''}: ` +
`${method} ${requestUrl}`;
if (err.response.data) {
// Check if we have a JSON response and try to get the error/reason
if (err.response.headers['content-type'] === 'application/json') {
if (!err.response.data.error && err.response.data.pipe) {
// If we didn't find a JSON object with `error` then we might have a stream response.
// Detect the stream by the presence of `pipe` and use it to get the body and parse
// the error information.
const p = new Promise((resolve, reject) => {
const errorBody = new ResponseWriteable();
err.response.data.pipe(errorBody)
.on('finish', () => { resolve(JSON.parse(errorBody.stringBody())); })
.on('error', () => { reject(err); });
});
// Replace the stream on the response with the parsed object
err.response.data = await p;
}
// Append the error/reason if available
if (err.response.data.error) {
// Override the status text with our more complete message
errorMsg += ` - Error: ${err.response.data.error}`;
if (err.response.data.reason) {
errorMsg += `, Reason: ${err.response.data.reason}`;
}
}
} else {
errorMsg += err.response.data;
// if this is one we want
if (obj.command === 't' && batches.indexOf(obj.batch) > -1) {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
onCommand(obj);
}
// Set a new message for use by the node-sdk-core
// We use the errors array because it gets processed
// ahead of all other service errors.
err.response.data.errors = [{ message: errorMsg }];
}
} else if (err.request) {
if (!err.message.includes(err.config.url)) {
// Augment the message with the URL and method
// but don't do it again if we already have the URL.
err.message = `${err.message}: ${err.config.method} ${err.config.url}`;
}
}
return Promise.reject(err);
done();
};
return change;
};
module.exports = {
client: function(rawUrl, opts) {
const url = new URL(rawUrl);
// Split the URL to separate service from database
// Use origin as the "base" to remove auth elements
const actUrl = new URL(url.pathname.substring(0, url.pathname.lastIndexOf('/')), url.origin);
const dbName = url.pathname.substring(url.pathname.lastIndexOf('/') + 1);
let authenticator;
// Default to cookieauth unless an IAM key is provided
if (opts.iamApiKey) {
const iamAuthOpts = { apikey: opts.iamApiKey };
if (opts.iamTokenUrl) {
iamAuthOpts.url = opts.iamTokenUrl;
}
authenticator = new IamAuthenticator(iamAuthOpts);
} else if (url.username) {
authenticator = new CouchdbSessionAuthenticator({
username: decodeURIComponent(url.username),
password: decodeURIComponent(url.password)
});
} else {
authenticator = new NoAuthAuthenticator();
}
const serviceOpts = {
authenticator: authenticator,
timeout: opts.requestTimeout,
// Axios performance options
maxContentLength: -1
};
module.exports = function(log, batches, callback) {
// our sense of state
const retval = { };
const service = new CloudantV1(serviceOpts);
// Configure retries
const maxRetries = 2; // for 3 total attempts
service.getHttpClient().defaults.raxConfig = {
// retries for status codes
retry: maxRetries,
// retries for non-response e.g. ETIMEDOUT
noResponseRetries: maxRetries,
backoffType: 'exponential',
httpMethodsToRetry: ['GET', 'HEAD', 'POST'],
statusCodesToRetry: [
[429, 429],
[500, 599]
],
shouldRetry: err => {
const cfg = retryPlugin.getConfig(err);
// cap at max retries regardless of response/non-response type
if (cfg.currentRetryAttempt >= maxRetries) {
return false;
} else {
return retryPlugin.shouldRetryRequest(err);
}
},
instance: service.getHttpClient()
};
retryPlugin.attach(service.getHttpClient());
// called with each line from the log file
const onCommand = function(obj) {
retval[obj.batch] = obj;
};
service.setServiceUrl(actUrl.toString());
if (authenticator instanceof CouchdbSessionAuthenticator) {
// Awkward workaround for known Couch issue with compression on _session requests
// It is not feasible to disable compression on all requests with the amount of
// data this lib needs to move, so override the property in the tokenManager instance.
authenticator.tokenManager.requestWrapperInstance.compressRequestData = false;
}
if (authenticator.tokenManager && authenticator.tokenManager.requestWrapperInstance) {
authenticator.tokenManager.requestWrapperInstance.axiosInstance.interceptors.response.use(null, errorHelper);
}
// Add error interceptors to put URLs in error messages
service.getHttpClient().interceptors.response.use(null, errorHelper);
// Add request interceptor to add user-agent (adding it with custom request headers gets overwritten)
service.getHttpClient().interceptors.request.use(function(requestConfig) {
requestConfig.headers['User-Agent'] = userAgent;
return requestConfig;
}, null);
return { service: service, db: dbName, url: actUrl.toString() };
}
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, batches))
.on('error', function(err) {
callback(err);
})
.on('finish', function() {
callback(null, retval);
});
};

@@ -16,267 +16,99 @@ // Copyright © 2017, 2021 IBM Corp. All rights reserved.

const async = require('async');
const events = require('events');
const fs = require('fs');
const error = require('./error.js');
const spoolchanges = require('./spoolchanges.js');
const logfilesummary = require('./logfilesummary.js');
const logfilegetbatches = require('./logfilegetbatches.js');
// fatal errors
const codes = {
Error: 1,
InvalidOption: 2,
DatabaseNotFound: 10,
Unauthorized: 11,
Forbidden: 12,
DatabaseNotEmpty: 13,
NoLogFileName: 20,
LogDoesNotExist: 21,
IncompleteChangesInLogFile: 22,
SpoolChangesError: 30,
HTTPFatalError: 40,
BulkGetError: 50
};
/**
* Read documents from a database to be backed up.
*
* @param {string} db - `@cloudant/cloudant` DB object for source database.
* @param {number} blocksize - number of documents to download in single request
* @param {number} parallelism - number of concurrent downloads
* @param {string} log - path to log file to use
* @param {boolean} resume - whether to resume from an existing log file
* @returns EventEmitter with following events:
* - `received` - called with a block of documents to write to backup
* - `error` - on error
* - `finished` - when backup process is finished (either complete or errored)
*/
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime(); // backup start time
const batchesPerDownloadSession = 50; // max batches to read from log file for download at a time (prevent OOM)
class BackupError extends Error {
constructor(name, message) {
super(message);
this.name = name;
}
}
function proceedWithBackup() {
if (options.resume) {
// pick up from existing log file from previous run
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
} else {
// create new log file and process
spoolchanges(db, options.log, options.bufferSize, ee, function(err) {
if (err) {
ee.emit('error', err);
} else {
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
}
});
class HTTPError extends BackupError {
constructor(responseError, name) {
// Special case some names for more useful error messages
switch (responseError.status) {
case 401:
name = 'Unauthorized';
break;
case 403:
name = 'Forbidden';
break;
default:
name = name || 'HTTPFatalError';
}
super(name, responseError.message);
}
}
validateBulkGetSupport(db, function(err) {
if (err) {
return ee.emit('error', err);
// Default function to return an error for HTTP status codes
// < 400 -> OK
// 4XX (except 429) -> Fatal
// 429 & >=500 -> Transient
function checkResponse(err) {
if (err) {
// Construct an HTTPError if there is request information on the error
// Codes < 400 are considered OK
if (err.status >= 400) {
return new HTTPError(err);
} else {
proceedWithBackup();
// Send it back again if there was no status code, e.g. a cxn error
return augmentMessage(err);
}
});
return ee;
};
/**
* Validate /_bulk_get support for a specified database.
*
* @param {string} db - nodejs-cloudant db
* @param {function} callback - called on completion with signature (err)
*/
function validateBulkGetSupport(db, callback) {
db.service.postBulkGet({ db: db.db, docs: [] }).then(() => { callback(); }).catch(err => {
err = error.convertResponseError(err, function(err) {
switch (err.status) {
case undefined:
// There was no status code on the error
return err;
case 404:
return new error.BackupError('BulkGetError', 'Database does not support /_bulk_get endpoint');
default:
return new error.HTTPError(err);
}
});
callback(err);
});
}
}
/**
* Download remaining batches in a log file, splitting batches into sets
* to avoid enqueueing too many in one go.
*
* @param {string} log - log file name to maintain download state
* @param {string} db - nodejs-cloudant db
* @param {events.EventEmitter} ee - event emitter to emit received events on
* @param {time} startTime - start time for backup process
* @param {number} batchesPerDownloadSession - max batches to enqueue for
* download at a time. As batches contain many doc IDs, this helps avoid
* exhausting memory.
* @param {number} parallelism - number of concurrent downloads
* @returns function to call do download remaining batches with signature
* (err, {batches: batch, docs: doccount}) {@see spoolchanges}.
*/
function downloadRemainingBatches(log, db, ee, startTime, batchesPerDownloadSession, parallelism) {
let total = 0; // running total of documents downloaded so far
let noRemainingBatches = false;
// Generate a set of batches (up to batchesPerDownloadSession) to download from the
// log file and download them. Set noRemainingBatches to `true` for last batch.
function downloadSingleBatchSet(done) {
// Fetch the doc IDs for the batches in the current set to
// download them.
function batchSetComplete(err, data) {
if (!err) {
total = data.total;
}
done(err);
}
function processRetrievedBatches(err, batches) {
if (!err) {
// process them in parallelised queue
processBatchSet(db, parallelism, log, batches, ee, startTime, total, batchSetComplete);
} else {
batchSetComplete(err);
}
}
readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, function(err, batchSetIds) {
if (err) {
ee.emit('error', err);
// Stop processing changes file for fatal errors
noRemainingBatches = true;
done();
} else {
if (batchSetIds.length === 0) {
noRemainingBatches = true;
return done();
}
logfilegetbatches(log, batchSetIds, processRetrievedBatches);
}
});
function convertResponseError(responseError, errorFactory) {
if (!errorFactory) {
errorFactory = checkResponse;
}
return errorFactory(responseError);
}
// Return true if all batches in log file have been downloaded
function isFinished(callback) { callback(null, noRemainingBatches); }
function onComplete() {
ee.emit('finished', { total: total });
function augmentMessage(err) {
// For errors that don't have a status code, we are likely looking at a cxn
// error.
// Try to augment the message with more detail (core puts the code in statusText)
if (err && err.statusText) {
err.message = `${err.message} ${err.statusText}`;
}
async.doUntil(downloadSingleBatchSet, isFinished, onComplete);
if (err && err.description) {
err.message = `${err.message} ${err.description}`;
}
return err;
}
/**
* Return a set of uncompleted download batch IDs from the log file.
*
* @param {string} log - log file path
* @param {number} batchesPerDownloadSession - maximum IDs to return
* @param {function} callback - sign (err, batchSetIds array)
*/
function readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, callback) {
logfilesummary(log, function processSummary(err, summary) {
if (!err) {
if (!summary.changesComplete) {
callback(new error.BackupError('IncompleteChangesInLogFile',
'WARNING: Changes did not finish spooling'));
return;
}
if (Object.keys(summary.batches).length === 0) {
return callback(null, []);
}
// batch IDs are the property names of summary.batches
const batchSetIds = getPropertyNames(summary.batches, batchesPerDownloadSession);
callback(null, batchSetIds);
} else {
callback(err);
}
});
function wrapPossibleInvalidUrlError(err) {
if (err.code === 'ERR_INVALID_URL') {
// Wrap ERR_INVALID_URL in our own InvalidOption
return new BackupError('InvalidOption', err.message);
}
return err;
}
/**
* Download a set of batches retrieved from a log file. When a download is
* complete, add a line to the logfile indicating such.
*
* @param {any} db - nodejs-cloudant database
* @param {any} parallelism - number of concurrent requests to make
* @param {any} log - log file to drive downloads from
* @param {any} batches - batches to download
* @param {any} ee - event emitter for progress. This funciton emits
* received and error events.
* @param {any} start - time backup started, to report deltas
* @param {any} grandtotal - count of documents downloaded prior to this set
* of batches
* @param {any} callback - completion callback, (err, {total: number}).
*/
function processBatchSet(db, parallelism, log, batches, ee, start, grandtotal, callback) {
let hasErrored = false;
let total = grandtotal;
// queue to process the fetch requests in an orderly fashion using _bulk_get
const q = async.queue(function(payload, done) {
const output = [];
const thisBatch = payload.batch;
delete payload.batch;
delete payload.command;
function logCompletedBatch(batch) {
if (log) {
fs.appendFile(log, ':d batch' + thisBatch + '\n', done);
} else {
done();
}
module.exports = {
BackupError,
HTTPError,
wrapPossibleInvalidUrlError,
convertResponseError,
terminationCallback: function terminationCallback(err, data) {
if (err) {
console.error(`ERROR: ${err.message}`);
process.exitCode = codes[err.name] || 1;
process.exit();
}
// do the /db/_bulk_get request
db.service.postBulkGet({
db: db.db,
revs: true,
docs: payload.docs
}).then(response => {
// create an output array with the docs returned
response.result.results.forEach(function(d) {
if (d.docs) {
d.docs.forEach(function(doc) {
if (doc.ok) {
output.push(doc.ok);
}
});
}
});
total += output.length;
const t = (new Date().getTime() - start) / 1000;
ee.emit('received', {
batch: thisBatch,
data: output,
length: output.length,
time: t,
total: total
}, q, logCompletedBatch);
}).catch(err => {
if (!hasErrored) {
hasErrored = true;
err = error.convertResponseError(err);
// Kill the queue for fatal errors
q.kill();
ee.emit('error', err);
}
done();
});
}, parallelism);
for (const i in batches) {
q.push(batches[i]);
}
q.drain(function() {
callback(null, { total: total });
});
}
/**
* Returns first N properties on an object.
*
* @param {object} obj - object with properties
* @param {number} count - number of properties to return
*/
function getPropertyNames(obj, count) {
// decide which batch numbers to deal with
const batchestofetch = [];
let j = 0;
for (const i in obj) {
batchestofetch.push(parseInt(i));
j++;
if (j >= count) break;
}
return batchestofetch;
}
};

@@ -1,2 +0,2 @@

// Copyright © 2017 IBM Corp. All rights reserved.
// Copyright © 2017, 2018 IBM Corp. All rights reserved.
//

@@ -16,78 +16,17 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const fs = require('fs');
const stream = require('stream');
const liner = require('./liner.js');
module.exports = function(db, options, readstream, ee, callback) {
const liner = require('../includes/liner.js')();
const writer = require('../includes/writer.js')(db, options.bufferSize, options.parallelism, ee);
const onLine = function(onCommand, getDocs) {
const change = new stream.Transform({ objectMode: true });
// pipe the input to the output, via transformation functions
readstream
.pipe(liner) // transform the input stream into per-line
.on('error', function(err) {
// Forward the error to the writer event emitter where we already have
// listeners on for handling errors
writer.emit('error', err);
})
.pipe(writer); // transform the data
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
let matches;
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
// extract doc ids
if (getDocs && obj.command === 't') {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
}
onCommand(obj);
}
done();
};
return change;
callback(null, writer);
};
/**
* Generate a list of remaining batches from a download file.
*
* @param {string} log - log file name
* @param {function} callback - callback with err, {changesComplete: N, batches: N}.
* changesComplete signifies whether the log file appeared to
* have completed reading the changes feed (contains :changes_complete).
* batches are remaining batch IDs for download.
*/
module.exports = function(log, callback) {
// our sense of state
const state = {
};
let changesComplete = false;
// called with each line from the log file
const onCommand = function(obj) {
if (obj.command === 't') {
state[obj.batch] = true;
} else if (obj.command === 'd') {
delete state[obj.batch];
} else if (obj.command === 'changes_complete') {
changesComplete = true;
}
};
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, false))
.on('finish', function() {
const obj = { changesComplete: changesComplete, batches: state };
callback(null, obj);
});
};

@@ -1,2 +0,2 @@

// Copyright © 2017, 2021 IBM Corp. All rights reserved.
// Copyright © 2017 IBM Corp. All rights reserved.
//

@@ -16,150 +16,78 @@ // Licensed under the Apache License, Version 2.0 (the "License");

const async = require('async');
const fs = require('fs');
const stream = require('stream');
const error = require('./error.js');
const debug = require('debug')('couchbackup:writer');
const liner = require('./liner.js');
module.exports = function(db, bufferSize, parallelism, ee) {
const writer = new stream.Transform({ objectMode: true });
let buffer = [];
let written = 0;
let linenumber = 0;
const onLine = function(onCommand, getDocs) {
const change = new stream.Transform({ objectMode: true });
// this is the queue of chunks that are written to the database
// the queue's payload will be an array of documents to be written,
// the size of the array will be bufferSize. The variable parallelism
// determines how many HTTP requests will occur at any one time.
const q = async.queue(function(payload, cb) {
// if we are restoring known revisions, we need to supply new_edits=false
if (payload.docs && payload.docs[0] && payload.docs[0]._rev) {
payload.new_edits = false;
debug('Using new_edits false mode.');
}
change._transform = function(line, encoding, done) {
if (line && line[0] === ':') {
const obj = {
command: null,
batch: null,
docs: []
};
if (!didError) {
db.service.postBulkDocs({
db: db.db,
bulkDocs: payload
}).then(response => {
if (!response.result || (payload.new_edits === false && response.result.length > 0)) {
throw new Error(`Error writing batch with new_edits:${payload.new_edits !== false}` +
` and ${response.result ? response.result.length : 'unavailable'} items`);
}
written += payload.docs.length;
writer.emit('restored', { documents: payload.docs.length, total: written });
cb();
}).catch(err => {
err = error.convertResponseError(err);
debug(`Error writing docs ${err.name} ${err.message}`);
cb(err, payload);
});
}
}, parallelism);
let matches;
let didError = false;
// extract command
matches = line.match(/^:([a-z_]+) ?/);
if (matches) {
obj.command = matches[1];
}
// write the contents of the buffer to CouchDB in blocks of bufferSize
function processBuffer(flush, callback) {
function taskCallback(err, payload) {
if (err && !didError) {
debug(`Queue task failed with error ${err.name}`);
didError = true;
q.kill();
writer.emit('error', err);
// extract batch
matches = line.match(/ batch([0-9]+)/);
if (matches) {
obj.batch = parseInt(matches[1]);
}
}
if (flush || buffer.length >= bufferSize) {
// work through the buffer to break off bufferSize chunks
// and feed the chunks to the queue
do {
// split the buffer into bufferSize chunks
const toSend = buffer.splice(0, bufferSize);
// and add the chunk to the queue
debug(`Adding ${toSend.length} to the write queue.`);
q.push({ docs: toSend }, taskCallback);
} while (buffer.length >= bufferSize);
// send any leftover documents to the queue
if (flush && buffer.length > 0) {
debug(`Adding remaining ${buffer.length} to the write queue.`);
q.push({ docs: buffer }, taskCallback);
// extract doc ids
if (getDocs && obj.command === 't') {
const json = line.replace(/^.* batch[0-9]+ /, '').trim();
obj.docs = JSON.parse(json);
}
// wait until the queue size falls to a reasonable level
async.until(
// wait until the queue length drops to twice the paralellism
// or until empty on the last write
function(callback) {
// if we encountered an error, stop this until loop
if (didError) {
return callback(null, true);
}
if (flush) {
callback(null, q.idle() && q.length() === 0);
} else {
callback(null, q.length() <= parallelism * 2);
}
},
function(cb) {
setTimeout(cb, 20);
},
function() {
if (flush && !didError) {
writer.emit('finished', { total: written });
}
// callback when we're happy with the queue size
callback();
});
} else {
callback();
onCommand(obj);
}
}
done();
};
return change;
};
// take an object
writer._transform = function(obj, encoding, done) {
// each obj that arrives here is a line from the backup file
// it should contain an array of objects. The length of the array
// depends on the bufferSize at backup time.
linenumber++;
if (!didError && obj !== '') {
// see if it parses as JSON
try {
const arr = JSON.parse(obj);
/**
* Generate a list of remaining batches from a download file.
*
* @param {string} log - log file name
* @param {function} callback - callback with err, {changesComplete: N, batches: N}.
* changesComplete signifies whether the log file appeared to
* have completed reading the changes feed (contains :changes_complete).
* batches are remaining batch IDs for download.
*/
module.exports = function(log, callback) {
// our sense of state
const state = {
// if it's an array with a length
if (typeof arr === 'object' && arr.length > 0) {
// push each document into a buffer
buffer = buffer.concat(arr);
};
let changesComplete = false;
// pause the stream
// it's likely that the speed with which data can be read from disk
// may exceed the rate it can be written to CouchDB. To prevent
// the whole file being buffered in memory, we pause the stream here.
// it is resumed, when processBuffer calls back and we call done()
this.pause();
// break the buffer in to bufferSize chunks to be written to the database
processBuffer(false, done);
} else {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - not an array`));
done();
}
} catch (e) {
ee.emit('error', new error.BackupError('BackupFileJsonError', `Error on line ${linenumber} of backup file - cannot parse as JSON`));
// Could be an incomplete write that was subsequently resumed
done();
}
} else {
done();
// called with each line from the log file
const onCommand = function(obj) {
if (obj.command === 't') {
state[obj.batch] = true;
} else if (obj.command === 'd') {
delete state[obj.batch];
} else if (obj.command === 'changes_complete') {
changesComplete = true;
}
};
// called when we need to flush everything
writer._flush = function(done) {
processBuffer(true, done);
};
return writer;
// stream through the previous log file
fs.createReadStream(log)
.pipe(liner())
.pipe(onLine(onCommand, false))
.on('finish', function() {
const obj = { changesComplete: changesComplete, batches: state };
callback(null, obj);
});
};

@@ -16,99 +16,267 @@ // Copyright © 2017, 2021 IBM Corp. All rights reserved.

// fatal errors
const codes = {
Error: 1,
InvalidOption: 2,
DatabaseNotFound: 10,
Unauthorized: 11,
Forbidden: 12,
DatabaseNotEmpty: 13,
NoLogFileName: 20,
LogDoesNotExist: 21,
IncompleteChangesInLogFile: 22,
SpoolChangesError: 30,
HTTPFatalError: 40,
BulkGetError: 50
const async = require('async');
const events = require('events');
const fs = require('fs');
const error = require('./error.js');
const spoolchanges = require('./spoolchanges.js');
const logfilesummary = require('./logfilesummary.js');
const logfilegetbatches = require('./logfilegetbatches.js');
/**
* Read documents from a database to be backed up.
*
* @param {string} db - `@cloudant/cloudant` DB object for source database.
* @param {number} blocksize - number of documents to download in single request
* @param {number} parallelism - number of concurrent downloads
* @param {string} log - path to log file to use
* @param {boolean} resume - whether to resume from an existing log file
* @returns EventEmitter with following events:
* - `received` - called with a block of documents to write to backup
* - `error` - on error
* - `finished` - when backup process is finished (either complete or errored)
*/
module.exports = function(db, options) {
const ee = new events.EventEmitter();
const start = new Date().getTime(); // backup start time
const batchesPerDownloadSession = 50; // max batches to read from log file for download at a time (prevent OOM)
function proceedWithBackup() {
if (options.resume) {
// pick up from existing log file from previous run
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
} else {
// create new log file and process
spoolchanges(db, options.log, options.bufferSize, ee, function(err) {
if (err) {
ee.emit('error', err);
} else {
downloadRemainingBatches(options.log, db, ee, start, batchesPerDownloadSession, options.parallelism);
}
});
}
}
validateBulkGetSupport(db, function(err) {
if (err) {
return ee.emit('error', err);
} else {
proceedWithBackup();
}
});
return ee;
};
class BackupError extends Error {
constructor(name, message) {
super(message);
this.name = name;
}
/**
* Validate /_bulk_get support for a specified database.
*
* @param {string} db - nodejs-cloudant db
* @param {function} callback - called on completion with signature (err)
*/
function validateBulkGetSupport(db, callback) {
db.service.postBulkGet({ db: db.db, docs: [] }).then(() => { callback(); }).catch(err => {
err = error.convertResponseError(err, function(err) {
switch (err.status) {
case undefined:
// There was no status code on the error
return err;
case 404:
return new error.BackupError('BulkGetError', 'Database does not support /_bulk_get endpoint');
default:
return new error.HTTPError(err);
}
});
callback(err);
});
}
class HTTPError extends BackupError {
constructor(responseError, name) {
// Special case some names for more useful error messages
switch (responseError.status) {
case 401:
name = 'Unauthorized';
break;
case 403:
name = 'Forbidden';
break;
default:
name = name || 'HTTPFatalError';
/**
* Download remaining batches in a log file, splitting batches into sets
* to avoid enqueueing too many in one go.
*
* @param {string} log - log file name to maintain download state
* @param {string} db - nodejs-cloudant db
* @param {events.EventEmitter} ee - event emitter to emit received events on
* @param {time} startTime - start time for backup process
* @param {number} batchesPerDownloadSession - max batches to enqueue for
* download at a time. As batches contain many doc IDs, this helps avoid
* exhausting memory.
* @param {number} parallelism - number of concurrent downloads
* @returns function to call do download remaining batches with signature
* (err, {batches: batch, docs: doccount}) {@see spoolchanges}.
*/
function downloadRemainingBatches(log, db, ee, startTime, batchesPerDownloadSession, parallelism) {
let total = 0; // running total of documents downloaded so far
let noRemainingBatches = false;
// Generate a set of batches (up to batchesPerDownloadSession) to download from the
// log file and download them. Set noRemainingBatches to `true` for last batch.
function downloadSingleBatchSet(done) {
// Fetch the doc IDs for the batches in the current set to
// download them.
function batchSetComplete(err, data) {
if (!err) {
total = data.total;
}
done(err);
}
super(name, responseError.message);
function processRetrievedBatches(err, batches) {
if (!err) {
// process them in parallelised queue
processBatchSet(db, parallelism, log, batches, ee, startTime, total, batchSetComplete);
} else {
batchSetComplete(err);
}
}
readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, function(err, batchSetIds) {
if (err) {
ee.emit('error', err);
// Stop processing changes file for fatal errors
noRemainingBatches = true;
done();
} else {
if (batchSetIds.length === 0) {
noRemainingBatches = true;
return done();
}
logfilegetbatches(log, batchSetIds, processRetrievedBatches);
}
});
}
// Return true if all batches in log file have been downloaded
function isFinished(callback) { callback(null, noRemainingBatches); }
function onComplete() {
ee.emit('finished', { total: total });
}
async.doUntil(downloadSingleBatchSet, isFinished, onComplete);
}
// Default function to return an error for HTTP status codes
// < 400 -> OK
// 4XX (except 429) -> Fatal
// 429 & >=500 -> Transient
function checkResponse(err) {
if (err) {
// Construct an HTTPError if there is request information on the error
// Codes < 400 are considered OK
if (err.status >= 400) {
return new HTTPError(err);
/**
* Return a set of uncompleted download batch IDs from the log file.
*
* @param {string} log - log file path
* @param {number} batchesPerDownloadSession - maximum IDs to return
* @param {function} callback - sign (err, batchSetIds array)
*/
function readBatchSetIdsFromLogFile(log, batchesPerDownloadSession, callback) {
logfilesummary(log, function processSummary(err, summary) {
if (!err) {
if (!summary.changesComplete) {
callback(new error.BackupError('IncompleteChangesInLogFile',
'WARNING: Changes did not finish spooling'));
return;
}
if (Object.keys(summary.batches).length === 0) {
return callback(null, []);
}
// batch IDs are the property names of summary.batches
const batchSetIds = getPropertyNames(summary.batches, batchesPerDownloadSession);
callback(null, batchSetIds);
} else {
// Send it back again if there was no status code, e.g. a cxn error
return augmentMessage(err);
callback(err);
}
}
});
}
function convertResponseError(responseError, errorFactory) {
if (!errorFactory) {
errorFactory = checkResponse;
/**
* Download a set of batches retrieved from a log file. When a download is
* complete, add a line to the logfile indicating such.
*
* @param {any} db - nodejs-cloudant database
* @param {any} parallelism - number of concurrent requests to make
* @param {any} log - log file to drive downloads from
* @param {any} batches - batches to download
* @param {any} ee - event emitter for progress. This funciton emits
* received and error events.
* @param {any} start - time backup started, to report deltas
* @param {any} grandtotal - count of documents downloaded prior to this set
* of batches
* @param {any} callback - completion callback, (err, {total: number}).
*/
function processBatchSet(db, parallelism, log, batches, ee, start, grandtotal, callback) {
let hasErrored = false;
let total = grandtotal;
// queue to process the fetch requests in an orderly fashion using _bulk_get
const q = async.queue(function(payload, done) {
const output = [];
const thisBatch = payload.batch;
delete payload.batch;
delete payload.command;
function logCompletedBatch(batch) {
if (log) {
fs.appendFile(log, ':d batch' + thisBatch + '\n', done);
} else {
done();
}
}
// do the /db/_bulk_get request
db.service.postBulkGet({
db: db.db,
revs: true,
docs: payload.docs
}).then(response => {
// create an output array with the docs returned
response.result.results.forEach(function(d) {
if (d.docs) {
d.docs.forEach(function(doc) {
if (doc.ok) {
output.push(doc.ok);
}
});
}
});
total += output.length;
const t = (new Date().getTime() - start) / 1000;
ee.emit('received', {
batch: thisBatch,
data: output,
length: output.length,
time: t,
total: total
}, q, logCompletedBatch);
}).catch(err => {
if (!hasErrored) {
hasErrored = true;
err = error.convertResponseError(err);
// Kill the queue for fatal errors
q.kill();
ee.emit('error', err);
}
done();
});
}, parallelism);
for (const i in batches) {
q.push(batches[i]);
}
return errorFactory(responseError);
}
function augmentMessage(err) {
// For errors that don't have a status code, we are likely looking at a cxn
// error.
// Try to augment the message with more detail (core puts the code in statusText)
if (err && err.statusText) {
err.message = `${err.message} ${err.statusText}`;
}
if (err && err.description) {
err.message = `${err.message} ${err.description}`;
}
return err;
q.drain(function() {
callback(null, { total: total });
});
}
function wrapPossibleInvalidUrlError(err) {
if (err.code === 'ERR_INVALID_URL') {
// Wrap ERR_INVALID_URL in our own InvalidOption
return new BackupError('InvalidOption', err.message);
/**
* Returns first N properties on an object.
*
* @param {object} obj - object with properties
* @param {number} count - number of properties to return
*/
function getPropertyNames(obj, count) {
// decide which batch numbers to deal with
const batchestofetch = [];
let j = 0;
for (const i in obj) {
batchestofetch.push(parseInt(i));
j++;
if (j >= count) break;
}
return err;
return batchestofetch;
}
module.exports = {
BackupError,
HTTPError,
wrapPossibleInvalidUrlError,
convertResponseError,
terminationCallback: function terminationCallback(err, data) {
if (err) {
console.error(`ERROR: ${err.message}`);
process.exitCode = codes[err.name] || 1;
process.exit();
}
}
};

@@ -15,52 +15,29 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global beforeEach afterEach */
/* global describe it */
'use strict';
const { CloudantV1 } = require('@ibm-cloud/cloudant');
const url = new URL((process.env.COUCH_BACKEND_URL) ? process.env.COUCH_BACKEND_URL : 'https://no-couch-backend-url-set.test');
const { BasicAuthenticator, NoAuthAuthenticator } = require('ibm-cloud-sdk-core');
const authenticator = (url.username) ? new BasicAuthenticator({ username: url.username, password: decodeURIComponent(url.password) }) : new NoAuthAuthenticator();
const serviceOpts = {
authenticator: authenticator
};
const cloudant = new CloudantV1(serviceOpts);
// Remove auth from URL before using for service
cloudant.setServiceUrl(new URL(url.pathname, url.origin).toString());
const uuid = require('uuid').v4;
const assert = require('assert');
const fs = require('fs');
const u = require('./citestutils.js');
// Mocha hooks that will be at the root context so run for all tests
beforeEach('Create test database', async function() {
// Don't run hook for unit tests, just for CI
if (!this.currentTest.fullTitle().includes('#unit')) {
// Allow 10 seconds to create the DB
this.timeout(10 * 1000);
const unique = uuid();
this.fileName = `${unique}`;
this.dbName = 'couchbackup_test_' + unique;
return cloudant.putDatabase({ db: this.dbName });
}
describe('Write error tests', function() {
it('calls callback with error set when stream is not writeable', async function() {
u.setTimeout(this, 10);
const dirname = fs.mkdtempSync('test_backup_');
// make temp dir read only
fs.chmodSync(dirname, 0o444);
const filename = dirname + '/test.backup';
const backupStream = fs.createWriteStream(filename, { flags: 'w' });
const params = { useApi: true };
// try to do backup and check err was set in callback
return u.testBackup(params, 'animaldb', backupStream).then(() => {
assert.fail('Should throw an "EACCES" error');
}).catch((resultErr) => {
// cleanup temp dir
fs.rmdirSync(dirname);
// error should have been set
assert.ok(resultErr);
assert.strictEqual(resultErr.code, 'EACCES');
});
});
});
afterEach('Delete test database', async function() {
// Don't run hook for unit tests, just for CI
if (!this.currentTest.fullTitle().includes('#unit')) {
// Allow 10 seconds to delete the DB
this.timeout(10 * 1000);
deleteIfExists(this.fileName);
deleteIfExists(`${this.fileName}.log`);
return cloudant.deleteDatabase({ db: this.dbName });
}
});
function deleteIfExists(fileName) {
fs.unlink(fileName, function(err) {
if (err) {
if (err.code !== 'ENOENT') {
console.error(`${err.code} ${err.message}`);
}
}
});
}

@@ -1,114 +0,102 @@

// Copyright © 2017 IBM Corp. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* global after before describe */
'use strict';
const url = require('url');
const toxy = require('toxy');
// Import the common hooks
require('../test/hooks.js');
const tpoisons = toxy.poisons;
const trules = toxy.rules;
function setupProxy(poison) {
const backendUrl = new url.URL(process.env.COUCH_BACKEND_URL);
const proxy = toxy({
auth: `${backendUrl.username}:${backendUrl.password}`,
changeOrigin: true
});
// Forward traffic to DB
proxy.forward(process.env.COUCH_BACKEND_URL);
switch (poison) {
case 'normal':
// No poisons to add
break;
case 'bandwidth-limit':
// https://github.com/h2non/toxy#bandwidth
// Note the implementation of bandwidth is simplistic and the threshold
// delay is applied to every write of the buffer, so use the smallest
// delay possible and adjust the rate using the bytes size instead.
proxy
.poison(tpoisons.bandwidth({ bytes: 512, threshold: 1 })); // 0.5 MB/s
break;
case 'latency':
// https://github.com/h2non/toxy#latency
proxy
.poison(tpoisons.latency({ max: 1500, min: 250 }))
.withRule(trules.probability(60));
break;
case 'slow-read':
// https://github.com/h2non/toxy#slow-read
// Note this only impacts read of data from requests so only for non-GET
// In practice this means that it impacts restore much more than backup
// since although backup POSTs to _bulk_get the content is much smaller
// than what is POSTed to _bulk_docs for a restore.
// Similarly to bandwidth-limit use a 1 ms threshold
proxy
.poison(tpoisons.slowRead({ chunk: 256, threshold: 1 }))
// Slow read for 10 % of the time e.g. 10 ms in every 100
.withRule(trules.timeThreshold({ duration: 10, period: 100 }));
break;
case 'rate-limit':
// https://github.com/h2non/toxy#rate-limit
// Simulate the Cloudant free plan with 20 lookups ps and 10 writes ps
proxy.post('/*/_bulk_get')
.poison(tpoisons.rateLimit({ limit: 20, threshold: 1000 }));
proxy.post('/*/_bulk_docs')
.poison(tpoisons.rateLimit({ limit: 10, threshold: 1000 }));
break;
default:
throw Error('Unknown toxy poison ' + poison);
}
// Catch remaining traffic
proxy.all('/*');
return proxy;
}
const poisons = [
'normal',
'bandwidth-limit',
'latency',
'slow-read',
'rate-limit'
];
poisons.forEach(function(poison) {
describe('unreliable network tests (using toxy poison ' + poison + ')', function() {
let proxy;
before('start toxy server', function() {
proxy = setupProxy(poison);
console.log('Using toxy poison ' + poison);
// For these tests COUCH_URL points to the toxy proxy on localhost whereas
// COUCH_BACKEND_URL is the real CouchDb instance.
const toxyUrl = new url.URL(process.env.COUCH_URL);
// Listen on the specified hostname only, so if using localhost we don't
// need external connections.
proxy.listen(toxyUrl.port, toxyUrl.hostname);
});
after('stop toxy server', function() {
proxy.close();
});
delete require.cache[require.resolve('../test/ci_e2e.js')];
require('../test/ci_e2e.js');
});
});
<testsuites name="test-iam">
<testsuite name="Basic backup and restore using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-10-23T14:55:30" time="4.804">
<testcase classname="test-iam.Basic backup and restore using API" name="should backup animaldb to a file correctly" time="1.121">
</testcase>
<testcase classname="test-iam.Basic backup and restore using API" name="should restore animaldb to a database correctly" time="1.726">
</testcase>
<testcase classname="test-iam.Basic backup and restore using API" name="should execute a shallow mode backup successfully" time="0.686">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using API Buffer size tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-10-23T14:55:35" time="10.306">
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with the same buffer size" time="2.598">
</testcase>
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="3.441">
</testcase>
<testcase classname="test-iam.Basic backup and restore using API Buffer size tests using API" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="3.454">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-10-23T14:55:45" time="6.018">
<testcase classname="test-iam.Basic backup and restore using CLI" name="should backup animaldb to a file correctly" time="1.283">
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI" name="should restore animaldb to a database correctly" time="2.692">
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI" name="should execute a shallow mode backup successfully" time="1.247">
</testcase>
</testsuite>
<testsuite name="Basic backup and restore using CLI Buffer size tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-10-23T14:55:51" time="12.918">
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with the same buffer size" time="3.414">
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &gt; restore buffer" time="4.315">
</testcase>
<testcase classname="test-iam.Basic backup and restore using CLI Buffer size tests using CLI" name="should backup/restore animaldb with backup buffer &lt; restore buffer" time="4.376">
</testcase>
</testsuite>
<testsuite name="Compression tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-10-23T14:56:04" time="5.607">
<testcase classname="test-iam.Compression tests using API" name="should backup animaldb to a compressed file" time="0.823">
</testcase>
<testcase classname="test-iam.Compression tests using API" name="should backup and restore animaldb via a compressed file" time="1.464">
</testcase>
<testcase classname="test-iam.Compression tests using API" name="should backup and restore animaldb via a compressed stream" time="2.524">
</testcase>
</testsuite>
<testsuite name="Compression tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-10-23T14:56:09" time="7.468">
<testcase classname="test-iam.Compression tests using CLI" name="should backup animaldb to a compressed file" time="1.325">
</testcase>
<testcase classname="test-iam.Compression tests using CLI" name="should backup and restore animaldb via a compressed file" time="2.514">
</testcase>
<testcase classname="test-iam.Compression tests using CLI" name="should backup and restore animaldb via a compressed stream" time="2.819">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using API" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-10-23T14:56:17" time="320.915">
<testcase classname="test-iam.End to end backup and restore using API" name="should backup and restore animaldb" time="2.079">
</testcase>
<testcase classname="test-iam.End to end backup and restore using API" name="should backup and restore largedb1g #slow" time="318.053">
</testcase>
</testsuite>
<testsuite name="End to end backup and restore using CLI" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-10-23T15:01:38" time="506.907">
<testcase classname="test-iam.End to end backup and restore using CLI" name="should backup and restore animaldb" time="2.672">
</testcase>
<testcase classname="test-iam.End to end backup and restore using CLI" name="should backup and restore largedb1g #slow" time="503.463">
</testcase>
</testsuite>
<testsuite name="Encryption tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-10-23T15:10:05" time="2.869">
<testcase classname="test-iam.Encryption tests" name="should backup and restore animaldb via an encrypted file" time="2.606">
</testcase>
</testsuite>
<testsuite name="Write error tests" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-10-23T15:10:08" time="0.278">
<testcase classname="test-iam.Write error tests" name="calls callback with error set when stream is not writeable" time="0.012">
</testcase>
</testsuite>
<testsuite name="Event tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-10-23T15:10:08" time="2.405">
<testcase classname="test-iam.Event tests" name="should get a finished event when using stdout" time="1.045">
</testcase>
<testcase classname="test-iam.Event tests" name="should get a finished event when using file output" time="0.828">
</testcase>
</testsuite>
<testsuite name="Resume tests using API" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-10-23T15:10:10" time="5.217">
<testcase classname="test-iam.Resume tests using API" name="should create a log file" time="0.828">
</testcase>
<testcase classname="test-iam.Resume tests using API" name="should restore corrupted animaldb to a database correctly" time="1.824">
</testcase>
<testcase classname="test-iam.Resume tests using API" name="should restore resumed animaldb with blank line to a database correctly" time="1.75">
</testcase>
</testsuite>
<testsuite name="Resume tests using CLI" tests="3" errors="0" failures="0" skipped="0" timestamp="2023-10-23T15:10:16" time="6.972">
<testcase classname="test-iam.Resume tests using CLI" name="should create a log file" time="1.457">
</testcase>
<testcase classname="test-iam.Resume tests using CLI" name="should restore corrupted animaldb to a database correctly" time="2.429">
</testcase>
<testcase classname="test-iam.Resume tests using CLI" name="should restore resumed animaldb with blank line to a database correctly" time="2.281">
</testcase>
</testsuite>
<testsuite name="Resume tests" tests="2" errors="0" failures="0" skipped="0" timestamp="2023-10-23T15:10:23" time="34.216">
<testcase classname="test-iam.Resume tests" name="should correctly backup and restore backup10m" time="16.831">
</testcase>
<testcase classname="test-iam.Resume tests" name="should correctly backup and restore backup10m using --output" time="16.862">
</testcase>
</testsuite>
<testsuite name="Longer spool changes checks" tests="1" errors="0" failures="0" skipped="0" timestamp="2023-10-23T15:10:57" time="62.507">
<testcase classname="test-iam.Longer spool changes checks" name="#slow should keep collecting changes (25M)" time="62.24">
</testcase>
</testsuite>
</testsuites>

@@ -15,117 +15,178 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe it before after */
/* global describe it */
'use strict';
const assert = require('assert');
const applyEnvVars = require('../includes/config.js').applyEnvironmentVariables;
const nock = require('nock');
const request = require('../includes/request.js');
const changes = require('../includes/spoolchanges.js');
describe('#unit Configuration', function() {
let processEnvCopy;
const url = 'http://localhost:7777';
const dbName = 'fakenockdb';
const longTestTimeout = 3000;
before('Save env', function() {
// Copy env so we can reset it after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
});
const db = request.client(`${url}/${dbName}`, { parallelism: 1 });
after('Reset env', function() {
process.env = processEnvCopy;
const seqSuffix = Buffer.alloc(124, 'abc123').toString('base64');
function provideChanges(batchSize, totalChanges, fullResponse = false) {
let pending = totalChanges;
const sparseResultsArray = Array(batchSize).fill({
seq: null,
id: 'doc',
changes: [{ rev: '1-abcdef0123456789abcdef0123456789' }]
});
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(totalChanges / batchSize + (totalChanges % batchSize > 0 ? 1 : 0))
.reply(200, (uri, requestBody) => {
pending -= batchSize;
const lastSeq = (totalChanges - pending);
const seq = lastSeq - batchSize;
return {
results: fullResponse
? Array.from(Array(batchSize), (_, i) => {
return {
seq: `${seq + i}-${seqSuffix}`,
id: `doc${seq + i}`,
changes: [{ rev: '1-abcdef0123456789abcdef0123456789' }]
};
})
: sparseResultsArray,
pending: pending,
last_seq: `${lastSeq}-abc`
};
});
}
it('respects the COUCH_URL env variable', function() {
process.env.COUCH_URL = 'http://user:pass@myurl.com';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.url, 'string');
assert.strictEqual(config.url, process.env.COUCH_URL);
});
describe('#unit Check spool changes', function() {
it('should terminate on request error', async function() {
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(3)
.replyWithError({ code: 'ECONNRESET', message: 'socket hang up' });
it('respects the COUCH_DATABASE env variable', function() {
process.env.COUCH_DATABASE = 'mydb';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.db, 'string');
assert.strictEqual(config.db, process.env.COUCH_DATABASE);
});
return new Promise((resolve, reject) => {
changes(db, '/dev/null', 500, null, function(err) {
try {
assert.strictEqual(err.name, 'SpoolChangesError');
assert.strictEqual(err.message, `Failed changes request - socket hang up: post ${url}/${dbName}/_changes`);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
}).timeout(longTestTimeout);
it('respects the COUCH_BUFFER_SIZE env variable', function() {
process.env.COUCH_BUFFER_SIZE = '1000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.bufferSize, 'number');
assert.strictEqual(config.bufferSize, 1000);
});
it('should terminate on bad HTTP status code response', async function() {
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(3)
.reply(500, function(uri, requestBody) {
this.req.response.statusMessage = 'Internal Server Error';
return { error: 'foo', reason: 'bar' };
});
return new Promise((resolve, reject) => {
changes(db, '/dev/null', 500, null, function(err) {
try {
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `500 Internal Server Error: post ${url}/${dbName}/_changes - Error: foo, Reason: bar`);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
}).timeout(longTestTimeout);
it('respects the COUCH_PARALLELISM env variable', function() {
process.env.COUCH_PARALLELISM = '20';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.parallelism, 'number');
assert.strictEqual(config.parallelism, 20);
});
it('should keep collecting changes', async function() {
// This test validates that spooling will correctly
// continue across multiple requests
// (4 batches of 100000 to be precise).
// This test might take up to 10 seconds
this.timeout(10 * 1000);
it('respects the COUCH_REQUEST_TIMEOUT env variable', function() {
process.env.COUCH_REQUEST_TIMEOUT = '10000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.requestTimeout, 'number');
assert.strictEqual(config.requestTimeout, 10000);
// Use full changes for this test
provideChanges(100000, 400000, true);
return new Promise((resolve, reject) => {
changes(db, '/dev/null', 500, null, function(err) {
try {
assert.ok(!err);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
it('respects the CLOUDANT_IAM_API_KEY env variable', function() {
const key = 'ABC123-ZYX987_cba789-xyz321';
process.env.CLOUDANT_IAM_API_KEY = key;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamApiKey, 'string');
assert.strictEqual(config.iamApiKey, key);
it('should keep collecting sparse changes', async function() {
// This test checks that making thousands of requests doesn't
// make anything bad happen.
// This test might take up to 25 seconds
this.timeout(25 * 1000);
// Use sparse changes for this test and a batch size of 1
provideChanges(1, 2500);
return new Promise((resolve, reject) => {
changes(db, '/dev/null', 500, null, function(err) {
try {
assert.ok(!err);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
});
it('respects the CLOUDANT_IAM_TOKEN_URL env variable', function() {
const u = 'https://testhost.example:1234/identity/token';
process.env.CLOUDANT_IAM_TOKEN_URL = u;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamTokenUrl, 'string');
assert.strictEqual(config.iamTokenUrl, u);
describe('Longer spool changes checks', function() {
it('#slow should keep collecting changes (25M)', async function() {
// This test might take up to 5 minutes
this.timeout(5 * 60 * 1000);
// Note changes spooling uses a constant batch size, we are setting
// a test value here and setting the buffer to match
const batch = 100000;
// Use sparse changes for this test
provideChanges(batch, 25000000);
return new Promise((resolve, reject) => {
changes(db, '/dev/null', batch, null, function(err) {
try {
assert.ok(!err);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
it('respects the COUCH_LOG env variable', function() {
process.env.COUCH_LOG = 'my.log';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.log, 'string');
assert.strictEqual(config.log, process.env.COUCH_LOG);
it('#slower should keep collecting changes (500M)', async function() {
// This test might take up to 90 minutes
this.timeout(90 * 60 * 1000);
// Note changes spooling uses a constant batch size, we are setting
// a test value here and setting the buffer to match
const batch = 1000000;
// Use full changes for this test to exercise load
provideChanges(batch, 500000000, true);
return new Promise((resolve, reject) => {
changes(db, '/dev/null', batch, null, function(err) {
try {
assert.ok(!err);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
});
it('respects the COUCH_RESUME env variable', function() {
process.env.COUCH_RESUME = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.resume, 'boolean');
assert.strictEqual(config.resume, true);
});
it('respects the COUCH_OUTPUT env variable', function() {
process.env.COUCH_OUTPUT = 'myfile.txt';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.output, 'string');
assert.strictEqual(config.output, process.env.COUCH_OUTPUT);
});
it('respects the COUCH_MODE env variable', function() {
process.env.COUCH_MODE = 'shallow';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.mode, 'string');
assert.strictEqual(config.mode, 'shallow');
});
it('respects the COUCH_QUIET env variable', function() {
process.env.COUCH_QUIET = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.quiet, 'boolean');
assert.strictEqual(config.quiet, true);
});
});

@@ -15,316 +15,117 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

/* global describe afterEach before after it */
/* global describe it before after */
'use strict';
const assert = require('assert');
const parser = require('../includes/parser.js');
const applyEnvVars = require('../includes/config.js').applyEnvironmentVariables;
describe('#unit Default parameters', function() {
describe('#unit Configuration', function() {
let processEnvCopy;
let processArgvCopy;
before('Set process data for test', function() {
// Copy env and argv so we can reset them after the tests
before('Save env', function() {
// Copy env so we can reset it after the tests
processEnvCopy = JSON.parse(JSON.stringify(process.env));
processArgvCopy = JSON.parse(JSON.stringify(process.argv));
// setup environment variables
process.env.COUCH_URL = 'http://user:pass@myurl.com';
process.env.COUCH_DATABASE = 'mydb';
process.env.COUCH_BUFFER_SIZE = '1000';
process.env.COUCH_PARALLELISM = '20';
process.env.COUCH_REQUEST_TIMEOUT = '20000';
process.env.COUCH_LOG = 'my.log';
process.env.COUCH_RESUME = 'true';
process.env.COUCH_OUTPUT = 'myfile.txt';
process.env.COUCH_MODE = 'shallow';
process.env.CLOUDANT_IAM_API_KEY = 'ABC123-ZYX987_cba789-xyz321';
process.env.COUCH_QUIET = 'true';
});
after('Reset process data', function() {
after('Reset env', function() {
process.env = processEnvCopy;
process.argv = processArgvCopy;
});
afterEach(function() {
delete require.cache[require.resolve('commander')];
it('respects the COUCH_URL env variable', function() {
process.env.COUCH_URL = 'http://user:pass@myurl.com';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.url, 'string');
assert.strictEqual(config.url, process.env.COUCH_URL);
});
describe('Backup command-line', function() {
it('respects the COUCH_URL env variable if the --url backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, process.env.COUCH_URL);
});
it('respects the COUCH_DATABASE env variable', function() {
process.env.COUCH_DATABASE = 'mydb';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.db, 'string');
assert.strictEqual(config.db, process.env.COUCH_DATABASE);
});
it('respects the COUCH_DATABASE env variable if the --db backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, process.env.COUCH_DATABASE);
});
it('respects the COUCH_BUFFER_SIZE env variable', function() {
process.env.COUCH_BUFFER_SIZE = '1000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.bufferSize, 'number');
assert.strictEqual(config.bufferSize, 1000);
});
it('respects the COUCH_BUFFER_SIZE env variable if the --buffer-size backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, parseInt(process.env.COUCH_BUFFER_SIZE, 10));
});
it('respects the COUCH_PARALLELISM env variable', function() {
process.env.COUCH_PARALLELISM = '20';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.parallelism, 'number');
assert.strictEqual(config.parallelism, 20);
});
it('respects the COUCH_PARALLELISM env variable if the --parallelism backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parseInt(process.env.COUCH_PARALLELISM, 10));
});
it('respects the COUCH_REQUEST_TIMEOUT env variable', function() {
process.env.COUCH_REQUEST_TIMEOUT = '10000';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.requestTimeout, 'number');
assert.strictEqual(config.requestTimeout, 10000);
});
it('respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, parseInt(process.env.COUCH_REQUEST_TIMEOUT, 10));
});
it('respects the CLOUDANT_IAM_API_KEY env variable', function() {
const key = 'ABC123-ZYX987_cba789-xyz321';
process.env.CLOUDANT_IAM_API_KEY = key;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamApiKey, 'string');
assert.strictEqual(config.iamApiKey, key);
});
it('respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, process.env.CLOUDANT_IAM_API_KEY);
});
it('respects the CLOUDANT_IAM_TOKEN_URL env variable', function() {
const u = 'https://testhost.example:1234/identity/token';
process.env.CLOUDANT_IAM_TOKEN_URL = u;
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.iamTokenUrl, 'string');
assert.strictEqual(config.iamTokenUrl, u);
});
it('respects the COUCH_LOG env variable if the --log backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.log, 'string');
assert.strictEqual(program.log, process.env.COUCH_LOG);
});
it('respects the COUCH_LOG env variable', function() {
process.env.COUCH_LOG = 'my.log';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.log, 'string');
assert.strictEqual(config.log, process.env.COUCH_LOG);
});
it('respects the COUCH_RESUME env variable if the --resume backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.resume, 'boolean');
assert.strictEqual(program.resume, true);
});
it('respects the COUCH_RESUME env variable', function() {
process.env.COUCH_RESUME = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.resume, 'boolean');
assert.strictEqual(config.resume, true);
});
it('respects the COUCH_OUTPUT env variable if the --output backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.output, 'string');
assert.strictEqual(program.output, process.env.COUCH_OUTPUT);
});
it('respects the COUCH_OUTPUT env variable', function() {
process.env.COUCH_OUTPUT = 'myfile.txt';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.output, 'string');
assert.strictEqual(config.output, process.env.COUCH_OUTPUT);
});
it('respects the COUCH_MODE env variable if the --mode backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, process.env.COUCH_MODE);
});
it('respects the COUCH_QUIET env variable if the --quiet backup command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
it('respects the backup --url command-line parameter', function() {
const url = 'http://user:pass@myurl2.com';
process.argv = ['node', 'test', '--url', url];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, url);
});
it('respects the backup --db command-line parameter', function() {
const db = 'mydb2';
process.argv = ['node', 'test', '--db', db];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, db);
});
it('respects the backup --buffer-size command-line parameter', function() {
const bufferSize = 500;
process.argv = ['node', 'test', '--buffer-size', bufferSize];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, bufferSize);
});
it('respects the backup --parallelism command-line parameter', function() {
const parallelism = 10;
process.argv = ['node', 'test', '--parallelism', parallelism];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parallelism);
});
it('respects the backup --request-timeout command-line parameter', function() {
const requestTimeout = 10000;
process.argv = ['node', 'test', '--request-timeout', requestTimeout];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, requestTimeout);
});
it('respects the backup --iam-api-key command-line parameter', function() {
const key = '123abc-789zyx_CBA987-XYZ321';
process.argv = ['node', 'test', '--iam-api-key', key];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, key);
});
it('respects the backup --log command-line parameter', function() {
const filename = 'my2.log';
process.argv = ['node', 'test', '--log', filename];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.log, 'string');
assert.strictEqual(program.log, filename);
});
it('respects the backup --resume command-line parameter', function() {
process.argv = ['node', 'test', '--resume'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.resume, 'boolean');
assert.strictEqual(program.resume, true);
});
it('respects the backup --output command-line parameter', function() {
const filename = 'myfile2.txt';
process.argv = ['node', 'test', '--output', filename];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.output, 'string');
assert.strictEqual(program.output, filename);
});
it('respects the backup --mode full command-line parameter', function() {
process.argv = ['node', 'test', '--mode', 'full'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, 'full');
});
it('respects the backup --mode shallow command-line parameter', function() {
process.argv = ['node', 'test', '--mode', 'shallow'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.mode, 'string');
assert.strictEqual(program.mode, 'shallow');
});
it('respects the backup --quiet command-line parameter', function() {
process.argv = ['node', 'test', '--quiet'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
it('respects the COUCH_MODE env variable', function() {
process.env.COUCH_MODE = 'shallow';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.mode, 'string');
assert.strictEqual(config.mode, 'shallow');
});
describe('Restore command-line', function() {
it('respects the COUCH_URL env variable if the --url restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, process.env.COUCH_URL);
});
it('respects the COUCH_DATABASE env variable if the --db restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, process.env.COUCH_DATABASE);
});
it('respects the COUCH_BUFFER_SIZE env variable if the --buffer-size restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, parseInt(process.env.COUCH_BUFFER_SIZE, 10));
});
it('respects the COUCH_PARALLELISM env variable if the --parallelism restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parseInt(process.env.COUCH_PARALLELISM, 10));
});
it('respects the COUCH_REQUEST_TIMEOUT env variable if the --request-timeout restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, parseInt(process.env.COUCH_REQUEST_TIMEOUT, 10));
});
it('respects the CLOUDANT_IAM_API_KEY env variable if the --iam-api-key restore command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, process.env.CLOUDANT_IAM_API_KEY);
});
it('respects the COUCH_QUIET env variable if the --quiet restorer command-line parameter is missing', function() {
process.argv = ['node', 'test'];
const program = parser.parseBackupArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
it('respects the restore --url command-line parameter', function() {
const url = 'https://a:b@myurl3.com';
process.argv = ['node', 'test', '--url', url];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.url, 'string');
assert.strictEqual(program.url, url);
});
it('respects the restore --db command-line parameter', function() {
const db = 'mydb3';
process.argv = ['node', 'test', '--db', db];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.db, 'string');
assert.strictEqual(program.db, db);
});
it('respects the restore --buffer-size command-line parameter', function() {
const bufferSize = 250;
process.argv = ['node', 'test', '--buffer-size', bufferSize];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.bufferSize, 'number');
assert.strictEqual(program.bufferSize, bufferSize);
});
it('respects the restore --parallelism command-line parameter', function() {
const parallelism = 5;
process.argv = ['node', 'test', '--parallelism', parallelism];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.parallelism, 'number');
assert.strictEqual(program.parallelism, parallelism);
});
it('respects the restore --request-timeout command-line parameter', function() {
const requestTimeout = 10000;
process.argv = ['node', 'test', '--request-timeout', requestTimeout];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.requestTimeout, 'number');
assert.strictEqual(program.requestTimeout, requestTimeout);
});
it('respects the restore --iam-api-key command-line parameter', function() {
const key = '123abc-789zyx_CBA987-XYZ321';
process.argv = ['node', 'test', '--iam-api-key', key];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.iamApiKey, 'string');
assert.strictEqual(program.iamApiKey, key);
});
it('respects the restore --quiet command-line parameter', function() {
process.argv = ['node', 'test', '--quiet'];
const program = parser.parseRestoreArgs();
assert.strictEqual(typeof program.quiet, 'boolean');
assert.strictEqual(program.quiet, true);
});
it('respects the COUCH_QUIET env variable', function() {
process.env.COUCH_QUIET = 'true';
const config = {};
applyEnvVars(config);
assert.strictEqual(typeof config.quiet, 'boolean');
assert.strictEqual(config.quiet, true);
});
});

@@ -19,174 +19,170 @@ // Copyright © 2017, 2023 IBM Corp. All rights reserved.

const assert = require('assert');
const backup = require('../app.js').backup;
const fs = require('fs');
const nock = require('nock');
const request = require('../includes/request.js');
const changes = require('../includes/spoolchanges.js');
const util = require('util');
const backupPromise = util.promisify(backup);
const url = 'http://localhost:7777';
const dbName = 'fakenockdb';
const longTestTimeout = 3000;
const goodUrl = 'http://localhost:5984/db';
// The real validateArgs function of app.js isn't
// exported - so we call the exported backup method
// instead. We don't get as far as a real backup when
// testing error cases. For success cases we nock the
// goodUrl and
const validateArgs = async function(url, opts, errorValidationForAssertRejects) {
const nullStream = fs.createWriteStream('/dev/null');
if (url === goodUrl) {
// Nock the goodUrl
nock(goodUrl).head('').reply(404, { error: 'not_found', reason: 'missing' });
}
return assert.rejects(backupPromise(url, nullStream, opts), errorValidationForAssertRejects);
};
const db = request.client(`${url}/${dbName}`, { parallelism: 1 });
const validateShallowModeArgs = async function(url, opts, msg) {
// We pass assertNoValidationError because for these shallow opts
// we are expecting only a stderr warning
return validateArgs(url, opts, assertNoValidationError()).then(() => {
// Assert the warning message was in stderr
assert(capturedStderr.indexOf(msg) > -1, 'Log warning message was not present');
});
};
const seqSuffix = Buffer.alloc(124, 'abc123').toString('base64');
function provideChanges(batchSize, totalChanges, fullResponse = false) {
let pending = totalChanges;
const sparseResultsArray = Array(batchSize).fill({
seq: null,
id: 'doc',
changes: [{ rev: '1-abcdef0123456789abcdef0123456789' }]
});
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(totalChanges / batchSize + (totalChanges % batchSize > 0 ? 1 : 0))
.reply(200, (uri, requestBody) => {
pending -= batchSize;
const lastSeq = (totalChanges - pending);
const seq = lastSeq - batchSize;
return {
results: fullResponse
? Array.from(Array(batchSize), (_, i) => {
return {
seq: `${seq + i}-${seqSuffix}`,
id: `doc${seq + i}`,
changes: [{ rev: '1-abcdef0123456789abcdef0123456789' }]
};
})
: sparseResultsArray,
pending: pending,
last_seq: `${lastSeq}-abc`
};
});
const stderrWriteFun = process.stderr.write;
let capturedStderr;
function captureStderr() {
process.stderr.write = function(string, encoding, fd) {
capturedStderr += string;
};
}
describe('#unit Check spool changes', function() {
it('should terminate on request error', async function() {
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(3)
.replyWithError({ code: 'ECONNRESET', message: 'socket hang up' });
function releaseStderr() {
process.stderr.write = stderrWriteFun;
capturedStderr = null;
}
return new Promise((resolve, reject) => {
changes(db, '/dev/null', 500, null, function(err) {
try {
assert.strictEqual(err.name, 'SpoolChangesError');
assert.strictEqual(err.message, `Failed changes request - socket hang up: post ${url}/${dbName}/_changes`);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
}).timeout(longTestTimeout);
// Return a validation object for use with assert.rejects
function assertErrorMessage(msg) {
return { name: 'InvalidOption', message: msg };
}
it('should terminate on bad HTTP status code response', async function() {
nock(url)
.post(`/${dbName}/_changes`)
.query(true)
.times(3)
.reply(500, function(uri, requestBody) {
this.req.response.statusMessage = 'Internal Server Error';
return { error: 'foo', reason: 'bar' };
});
return new Promise((resolve, reject) => {
changes(db, '/dev/null', 500, null, function(err) {
try {
assert.strictEqual(err.name, 'HTTPFatalError');
assert.strictEqual(err.message, `500 Internal Server Error: post ${url}/${dbName}/_changes - Error: foo, Reason: bar`);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
}).timeout(longTestTimeout);
// For cases where validation should pass we reach a real backup that hits a 404
// mock for a DatabaseNotFound, so that it is the expected in the case assertNoValidationError
function assertNoValidationError() { return { name: 'DatabaseNotFound' }; }
it('should keep collecting changes', async function() {
// This test validates that spooling will correctly
// continue across multiple requests
// (4 batches of 100000 to be precise).
// This test might take up to 10 seconds
this.timeout(10 * 1000);
// Use full changes for this test
provideChanges(100000, 400000, true);
return new Promise((resolve, reject) => {
changes(db, '/dev/null', 500, null, function(err) {
try {
assert.ok(!err);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
});
});
describe('#unit Validate arguments', function() {
it('returns error for invalid URL type', async function() {
return validateArgs(true, {}, assertErrorMessage('Invalid URL, must be type string'));
});
it('should keep collecting sparse changes', async function() {
// This test checks that making thousands of requests doesn't
// make anything bad happen.
// This test might take up to 25 seconds
this.timeout(25 * 1000);
// Use sparse changes for this test and a batch size of 1
provideChanges(1, 2500);
return new Promise((resolve, reject) => {
changes(db, '/dev/null', 500, null, function(err) {
try {
assert.ok(!err);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
it('returns no error for valid URL type', async function() {
return validateArgs(goodUrl, {}, assertNoValidationError());
});
it('returns error for invalid (no host) URL', async function() {
return validateArgs('http://', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid (no protocol) URL', async function() {
return validateArgs('invalid', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid (wrong protocol) URL', async function() {
return validateArgs('ftp://invalid.example.com', {}, assertErrorMessage('Invalid URL protocol.'));
});
it('returns error for invalid (no path) URL', async function() {
return validateArgs('https://invalid.example.com', {}, assertErrorMessage('Invalid URL, missing path element (no database).'));
});
it('returns error for invalid (no protocol, no host) URL', async function() {
return validateArgs('invalid', {}, assertErrorMessage('Invalid URL'));
});
it('returns error for invalid buffer size type', async function() {
return validateArgs(goodUrl, { bufferSize: '123' }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero buffer size', async function() {
return validateArgs(goodUrl, { bufferSize: 0 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float buffer size', async function() {
return validateArgs(goodUrl, { bufferSize: 1.23 }, assertErrorMessage('Invalid buffer size option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid buffer size type', async function() {
return validateArgs(goodUrl, { bufferSize: 123 }, assertNoValidationError());
});
it('returns error for invalid log type', async function() {
return validateArgs(goodUrl, { log: true }, assertErrorMessage('Invalid log option, must be type string'));
});
it('returns no error for valid log type', async function() {
return validateArgs(goodUrl, { log: 'log.txt' }, assertNoValidationError());
});
it('returns error for invalid mode type', async function() {
return validateArgs(goodUrl, { mode: true }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"'));
});
it('returns error for invalid mode string', async function() {
return validateArgs(goodUrl, { mode: 'foobar' }, assertErrorMessage('Invalid mode option, must be either "full" or "shallow"'));
});
it('returns no error for valid mode type', async function() {
return validateArgs(goodUrl, { mode: 'full' }, assertNoValidationError());
});
it('returns error for invalid output type', async function() {
return validateArgs(goodUrl, { output: true }, assertErrorMessage('Invalid output option, must be type string'));
});
it('returns no error for valid output type', async function() {
return validateArgs(goodUrl, { output: 'output.txt' }, assertNoValidationError());
});
it('returns error for invalid parallelism type', async function() {
return validateArgs(goodUrl, { parallelism: '123' }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero parallelism', async function() {
return validateArgs(goodUrl, { parallelism: 0 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float parallelism', async function() {
return validateArgs(goodUrl, { parallelism: 1.23 }, assertErrorMessage('Invalid parallelism option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid parallelism type', async function() {
return validateArgs(goodUrl, { parallelism: 123 }, assertNoValidationError());
});
it('returns error for invalid request timeout type', async function() {
return validateArgs(goodUrl, { requestTimeout: '123' }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for zero request timeout', async function() {
return validateArgs(goodUrl, { requestTimeout: 0 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns error for float request timout', async function() {
return validateArgs(goodUrl, { requestTimeout: 1.23 }, assertErrorMessage('Invalid request timeout option, must be a positive integer in the range (0, MAX_SAFE_INTEGER]'));
});
it('returns no error for valid request timeout type', async function() {
return validateArgs(goodUrl, { requestTimeout: 123 }, assertNoValidationError());
});
it('returns error for invalid resume type', async function() {
return validateArgs(goodUrl, { resume: 'true' }, assertErrorMessage('Invalid resume option, must be type boolean'));
});
it('returns no error for valid resume type', async function() {
return validateArgs(goodUrl, { resume: false }, assertNoValidationError());
});
it('returns error for invalid key type', async function() {
return validateArgs(goodUrl, { iamApiKey: true }, assertErrorMessage('Invalid iamApiKey option, must be type string'));
});
it('returns error for key and URL credentials supplied', async function() {
return validateArgs('https://a:b@example.com/db', { iamApiKey: 'abc123' }, assertErrorMessage('URL user information must not be supplied when using IAM API key.'));
});
it('warns for log arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', log: 'test' },
'the options "log" and "resume" are invalid when using shallow mode.').finally(
() => {
releaseStderr();
});
});
});
});
describe('Longer spool changes checks', function() {
it('#slow should keep collecting changes (25M)', async function() {
// This test might take up to 5 minutes
this.timeout(5 * 60 * 1000);
// Note changes spooling uses a constant batch size, we are setting
// a test value here and setting the buffer to match
const batch = 100000;
// Use sparse changes for this test
provideChanges(batch, 25000000);
return new Promise((resolve, reject) => {
changes(db, '/dev/null', batch, null, function(err) {
try {
assert.ok(!err);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
it('warns for resume arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', log: 'test', resume: true },
'the options "log" and "resume" are invalid when using shallow mode.').finally(
() => {
releaseStderr();
});
});
});
it('#slower should keep collecting changes (500M)', async function() {
// This test might take up to 90 minutes
this.timeout(90 * 60 * 1000);
// Note changes spooling uses a constant batch size, we are setting
// a test value here and setting the buffer to match
const batch = 1000000;
// Use full changes for this test to exercise load
provideChanges(batch, 500000000, true);
return new Promise((resolve, reject) => {
changes(db, '/dev/null', batch, null, function(err) {
try {
assert.ok(!err);
assert.ok(nock.isDone());
resolve();
} catch (err) {
reject(err);
}
it('warns for parallelism arg in shallow mode', async function() {
captureStderr();
return validateShallowModeArgs(goodUrl, { mode: 'shallow', parallelism: 10 },
'the option "parallelism" has no effect when using shallow mode.').finally(
() => {
releaseStderr();
});
});
});
});
{
"name": "@cloudant/couchbackup",
"version": "2.9.14-SNAPSHOT.154",
"version": "2.9.14-SNAPSHOT.155",
"description": "CouchBackup - command-line backup utility for Cloudant/CouchDB",

@@ -40,3 +40,3 @@ "homepage": "https://github.com/IBM/couchbackup",

"devDependencies": {
"eslint": "8.51.0",
"eslint": "8.52.0",
"eslint-config-semistandard": "17.0.0",

@@ -43,0 +43,0 @@ "eslint-config-standard": "17.1.0",

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc