Socket
Socket
Sign inDemoInstall

@sap/hdi-deploy

Package Overview
Dependencies
Maintainers
3
Versions
60
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@sap/hdi-deploy - npm Package Compare versions

Comparing version 3.7.0 to 3.11.3

lib/client-info.js

83

CHANGELOG.md

@@ -0,1 +1,84 @@

## 3.11.3
Features:
- update dependencies
Fixes:
- correctly handle SQL grantors
## 3.11.2
Features:
- update dependencies
## 3.11.1
Features:
- node 10 support
- updated dependencies
## 3.11.0
Features:
- added option `--liveness-ping` to periodically send a signal that notifies the user that the deployer is still working
- added option `--live-messages` to display the make messages while the make is still in progress
- added function `clean-env` to `library.js` to allow cleaning a passed environment of all deployer-related variables
Fixes:
- `library.js` would sometimes return with `exitCode` null because of unexpected closing of the child process
- update dependencies
- add missing options to HDI_DEPLOY_OPTIONS
## 3.10.0
Features:
- passwords can be split over multiple services
## 3.9.4
Fixes:
- update handlebars
## 3.9.3
Fixes:
- add full support for .hdbmigrationtable files by adding the --[no-]migrationtable-development-mode flag
## 3.9.2
Fixes:
- the private key used for mutual authentication was logged if tracing is enabled
- mutual auth was missing on some database connections
- some database connections were not closed correctly
## 3.9.1
Fixes:
- revert changes to hdi actions that could possibly cause a behavior change
## 3.9.0
Features:
- allow passing ssl connection parameters in service binding
- allow mutual auth via parameters in service binding
- set session variable APPLICATION on all HANA connections
- support path parameters for HDI
- allow development debug role similar to the default access role
- update @sap/hdi dependency
- better handling of invalid undeploy.json files
- improved timestamps in logging output
- check ownership of objects in the container via --treat-wrong-ownership-as-errors
- allow logging of additional application data
Fixes:
- issue with schema privileges and global roles in the same .hdbgrants file
## 3.8.2
Features:
- introduced .hdbrevokes as a counterpart to .hdbgrants
- automatically find target service: if only one HDI service is bound, TARGET_CONTAINER does not have to be set
- print timestamp of HDI messages
- print the status of the last build
- fallback to the .hdiconfig in src/ if it is missing in cfg/
- switch to @sap/hdi 2.1.2
- switch from hdb to @sap/hana-client
- check if the container supports locking and skip it if not
Fixes:
- support \n and \r\n as line endings in .hdiignore files
- correctly process grantor files in ZDM mode
- fix 'Maximum call stack size exceeded' in ZDM mode
- don't escape schema names in .hdbgrants
## 3.7.0

@@ -2,0 +85,0 @@

148

deploy.js

@@ -89,2 +89,6 @@ 'use strict';

args.translateJSONEnvBooleanOptionToOption(logger, options, name, option, process.argv);
} else if (option === 'detect_container_api_version') {
args.translateJSONEnvBooleanOptionToOption(logger, options, name, option, process.argv);
} else if (option === 'detect_hdi_version') {
args.translateJSONEnvBooleanOptionToOption(logger, options, name, option, process.argv);
} else if (option === 'lock_container') {

@@ -96,2 +100,4 @@ args.translateJSONEnvBooleanOptionToOption(logger, options, name, option, process.argv);

args.translateJSONEnvStringArrayOptionToOption(logger, options, name, option, process.argv);
} else if (option === 'exclude_filter') {
args.translateJSONEnvStringArrayOptionToOption(logger, options, name, option, process.argv);
} else if (option === 'deploy') {

@@ -105,2 +111,4 @@ args.translateJSONEnvStringArrayOptionToOption(logger, options, name, option, process.argv);

args.translateJSONEnvStringKeyValueObjectOptionToOption(logger, options, name, option, process.argv);
} else if (option === 'path-parameter' || option === 'path_parameter') {
args.translateJSONEnvStringKeyValueObjectOptionToOption(logger, options, name, option, process.argv);
} else if (option === 'treat_warnings_as_errors') {

@@ -126,2 +134,14 @@ args.translateJSONEnvBooleanOptionToOption(logger, options, name, option, process.argv);

args.translateJSONEnvBooleanOptionToOption(logger, options, name, option, process.argv);
} else if(option === 'treat_wrong_ownership_as_errors') {
args.translateJSONEnvBooleanOptionToOption(logger, options, name, option, process.argv);
} else if(option === 'migrationtable_development_mode') {
args.translateJSONEnvBooleanOptionToOption(logger, options, name, option, process.argv);
} else if(option === 'liveness_ping'){
args.translateJSONEnvBooleanOptionToOption(logger, options, name, option, process.argv);
} else if(option === 'live_messages'){
args.translateJSONEnvBooleanOptionToOption(logger, options, name, option, process.argv);
} else if(option === 'trace') {
args.translateJSONEnvBooleanOptionToOption(logger, options, name, option, process.argv);
} else if(option === 'version') {
args.translateJSONEnvBooleanOptionToOption(logger, options, name, option, process.argv);
} else {

@@ -171,2 +191,8 @@ logger.error('Unknown option ' + option + ' in HDI_DEPLOY_OPTIONS');

" by default, version detection is enabled",
" --[no-]detect-container-api-version",
" [don't] detect the container api version of the server",
" by default, container api version detection is enabled",
" --[no-]detect-hdi-version",
" [don't] detect the hdi version of the server",
" by default, hdi version detection is enabled",
"",

@@ -199,2 +225,4 @@ " --[no-]exit [don't] exit after deployment of artifacts",

" pass the given list of key-value parameters to the deployment",
" --path-parameter [<path>:<key>=<value> ..]",
" pass the given list of key-value path-parameters to the deployment",
"",

@@ -204,2 +232,14 @@ " --[no-]strip-cr-from-csv [don't] strip carriage return characters from CSV files",

"",
" --[no-]treat-wrong-ownership-as-errors [don't] treat wrong ownership of objects as errors",
" by default, wrong ownership will not result in errors",
"",
" --[no-]migrationtable-development-mode [don't] pass the development mode flag for migration tables to HDI, if the parameter is supported by the server",
" by default, will not pass the flag",
"",
" --[no-]liveness-ping [don't] send a sign of life from time to time",
" by default, a sign of life will be sent",
"",
" --[no-]live-messages [don't] display the make messages while the make is still in progress",
" by default, the messages will be displayed while the make is in progress",
"",
" --connection-timeout <ms>",

@@ -238,2 +278,10 @@ " number of milliseconds to wait for the database connection(s)",

opt.detectServerVersion = false;
}else if (arg === '--detect-container-api-version') {
opt.detectContainerAPIVersion = true;
} else if (arg === '--no-detect-container-api-version') {
opt.detectContainerAPIVersion = false;
}else if (arg === '--detect-hdi-version') {
opt.detectHDIVersion = true;
} else if (arg === '--no-detect-hdi-version') {
opt.detectHDIVersion = false;
} else if (arg === '--lock-container') {

@@ -259,2 +307,4 @@ opt.lockContainer = true;

i = args.translateStringKeyValueListOptionToObject(i, process.argv, opt.parameters);
} else if (arg === '--path-parameter') {
i = args.translateStringKeyValueListOptionToObject(i, process.argv, opt.path_parameters);
} else if (arg === '--treat-warnings-as-errors') {

@@ -310,2 +360,18 @@ opt.treatWarningsAsErrors = true;

opt.lockContainerTimeout = Number.parseInt(process.argv[i]);
} else if(arg === '--no-treat-wrong-ownership-as-errors'){
opt.treatWrongOwnershipAsErrors = false;
} else if(arg === '--treat-wrong-ownership-as-errors'){
opt.treatWrongOwnershipAsErrors = true;
} else if(arg === '--no-migrationtable-development-mode'){
opt.migrationTableDevMode = false;
} else if(arg === '--migrationtable-development-mode'){
opt.migrationTableDevMode = true;
} else if(arg === '--no-liveness-ping'){
opt.liveness_ping = false;
} else if(arg === '--liveness-ping'){
opt.liveness_ping = true;
} else if(arg === '--no-live-messages'){
opt.live_messages = false;
} else if(arg === '--live-messages'){
opt.live_messages = true;
} else {

@@ -340,2 +406,3 @@ logger.error('Unknown argument: ' + arg);

logger.setSendMessagesToParentProcess(opt.sendMessagesToParentProcess); // only used for inter process communication when forked from server.js
logger.set_liveness_ping(opt.liveness_ping);

@@ -348,6 +415,42 @@ var async = require('async');

const version_container_api = require('./lib/version.container-api.js');
let container_api_version = version_container_api.getFallbackVersion();
function initializeContainerAPIVersion(cb) {
try {
if (opt.detectContainerAPIVersion) {
version_container_api.getVersion(services.getTargetCreds(), function(err, result) {
container_api_version = result;
cb();
});
} else {
cb();
}
} catch (error) {
cb(error.message);
}
}
const version_hdi = require('./lib/version.hdi-version.js');
let hdi_version = version_hdi.getFallbackVersion();
function initializeHDIVersion(cb) {
try {
if (opt.detectHDIVersion) {
version_hdi.getVersion(services.getTargetCreds(), function(err, result) {
hdi_version = result;
cb();
});
} else {
cb();
}
} catch (error) {
cb(error.message);
}
}
function handleShowInfoOptionAndExit(cb) {
try {
// fill info object
info = require('./lib/info.js').getInfoForComponents(showInfoComponents, serverVersion);
info = require('./lib/info.js').getInfoForComponents(showInfoComponents, serverVersion, container_api_version, hdi_version);

@@ -434,3 +537,3 @@ // show info and stop; if requested

var mode = process.env.HDI_DEPLOY_MODE ? ('' + process.env.HDI_DEPLOY_MODE).toLowerCase() : 'default';
logger.log(pjson.name + ', version ' + pjson.version + ' (mode ' + mode + '), server version ' + serverVersion.version + ' (' + serverVersion.versionSynthesized + '), node version ' + process.versions.node);
logger.log(pjson.name + ', version ' + pjson.version + ' (mode ' + mode + '), server version ' + serverVersion.version + ' (' + serverVersion.versionSynthesized + '), node version ' + process.versions.node +', HDI version ' + hdi_version.version + ', container API version ' + container_api_version.version);

@@ -442,2 +545,12 @@ // log that we couldn't get the version from the server (usually we don't have privileges for SYS.M_DATABASE)

// log that we couldn't get the container api version from the server (usually we don't have privileges for SYS.M_FEATURES)
if (container_api_version.error){
logger.log('Detection of container API version failed; root cause: ' + container_api_version.error);
}
// log that we couldn't get the hdi version from the server (usually we don't have privileges for SYS.M_FEATURES)
if (hdi_version.error){
logger.log('Detection of HDI version failed; root cause: ' + hdi_version.error);
}
// if a default-env.json was sourced, write the log message now

@@ -487,2 +600,8 @@ if (usedDefaultEnvFile) {

if (Object.keys(opt.path_parameters).length !== 0) {
if (info.client.features['path-parameter'] <= 0) {
cb('Option path-parameter is not supported by the server; based on detected server version ' + serverVersion.version);
}
}
if (opt.simulateMake) {

@@ -500,2 +619,8 @@ if (info.client.features['simulate-make'] <= 0) {

if (opt.treatWrongOwnershipAsErrors) {
if (info.client.features['treat-wrong-ownership-as-errors'] <= 0) {
cb('Option treat-wrong-ownership-as-errors is not supported by the server; based on HDI version ' + hdi_version.version);
}
}
if (opt.lockContainer) {

@@ -513,2 +638,18 @@ if (info.client.features['lock-container'] <= 0) {

// Don't error out if this is not supported, simply ignore it.
if(opt.migrationTableDevMode){
if(info.client.features['migrationtable-development-mode'] <= 0){
logger.log('The server does not support development_mode, --migrationtable-development-mode will be ignored.');
opt.migrationTableDevMode = false;
}
}
// Don't error out if this is not supported, simply ignore it.
if(opt.live_messages){
if(info.client.features['live-messages'] <= 0){
logger.log('The server does not support live updating of make messages. The messages will be displayed, when the make is done.');
opt.live_messages = false;
}
}
cb();

@@ -612,2 +753,4 @@ }

initializeServerVersion,
initializeContainerAPIVersion,
initializeHDIVersion,
handleShowInfoOptionAndExit,

@@ -623,2 +766,3 @@ showVersionAndOtherInformation,

async.series(tasks, function(err) {
logger.stop_sending_liveness_ping();
if (err === exit_rc0_silent) {

@@ -625,0 +769,0 @@ signalProcessExit(0, false, true);

650

lib/content.js

@@ -5,24 +5,30 @@ 'use strict';

var fs = require('fs');
var util = require('util');
var Transform = require('stream').Transform;
const fs = require('./fileWorker');
const util = require('util');
const Transform = require('stream').Transform;
var fileWalker = require('./fileWalker.js');
var logger = require('./logger.js');
var template = require('./template.js');
var utils = require('./utils.js');
const fileWalker = require('./fileWalker.js');
const logger = require('./logger.js');
const template = require('./template.js');
const utils = require('./utils.js');
var paths = require('./paths.js');
const paths = require('./paths.js');
var dummyBuffer = new Buffer(0);
const dummyBuffer = Buffer.alloc(0);
const isGrantorFile = utils.isGrantorFile;
const isDeployableFile = utils.isDeployableFile;
const isRevokerFile = utils.isRevokerFile;
// remove '\r' from files
function Dos2Unix(options) {
if (!(this instanceof Dos2Unix)) {
return new Dos2Unix(options);
}
Transform.call(this, options);
/**
* remove '\r' from files
*
* @param {any} options Options for Dos2Unix
* @returns {Dos2Unix} Instance of Dos2Unix
*/
function Dos2Unix (options) {
if (!(this instanceof Dos2Unix)) {
return new Dos2Unix(options);
}
Transform.call(this, options);
}

@@ -32,277 +38,465 @@

Dos2Unix.prototype._transform = function(chunk, encoding, done) {
var p = 0;
Dos2Unix.prototype._transform = function (chunk, encoding, done) {
let p = 0;
//debug.log(chunk);
for (var i = 0; i < chunk.length; i = i + 1) {
if (chunk[i] === 13) { // '\r'
this.push(chunk.slice(p, i));
p = i + 1;
}
// debug.log(chunk);
for (let i = 0; i < chunk.length; i = i + 1) {
if (chunk[i] === 13) { // '\r'
this.push(chunk.slice(p, i));
p = i + 1;
}
if (p < chunk.length) {
this.push(chunk.slice(p, chunk.length));
}
done();
}
if (p < chunk.length) {
this.push(chunk.slice(p, chunk.length));
}
done();
};
// add a trailing '/' to the path
function deployDirPath(dir) {
if (dir[dir.length - 1] === '/') {
return dir;
}
/**
* add a trailing '/' to the path
*
* @param {String} dir directory.
* @returns {String} dir with / added.
*/
function deployDirPath (dir) {
if (dir[dir.length - 1] === '/') {
return dir;
}
return dir + '/';
return `${dir}/`;
}
// modify the path for certain files
function deployFilePath(file) {
// we keep configured config-templates files where there are, although we have modified them
// this way, we don't have to re-map errors messages, path filters, etc.
/**
* modify the path for certain files
*
* @param {String} file Filename
* @returns {String} Modified path.
*/
function deployFilePath (file) {
/*
* we keep configured config-templates files where there are, although we have modified them
* this way, we don't have to re-map errors messages, path filters, etc.
*/
// rename old .hdbsynonymtemplate to .hdbsynonymconfig
if (paths.extname(file) === '.hdbsynonymtemplate') {
file = utils.rename_synonymtemplate_to_config(file);
}
// rename old .hdbsynonymtemplate to .hdbsynonymconfig
if (paths.extname(file) === '.hdbsynonymtemplate') {
file = utils.rename_synonymtemplate_to_config(file);
}
return file;
return file;
}
function isTemplateFile(file) {
var fileExtension = paths.extname(file);
/**
* Check if the given file is a template file.
*
* @param {String} file Filename
* @returns {Boolean} True or False.
*/
function isTemplateFile (file) {
const fileExtension = paths.extname(file);
if (fileExtension === '.hdbsynonymtemplate') {
// it's an old-style synonym template file
return true;
}
if (fileExtension === '.hdbsynonymtemplate') {
// it's an old-style synonym template file
return true;
}
if (fileExtension.indexOf('config') === fileExtension.length - 6 && paths.isInCfgDirectory(file)) {
// it's a new new-style config template file in cfg/
return true;
}
if (fileExtension.indexOf('config') === fileExtension.length - 6 && paths.isInCfgDirectory(file)) {
// it's a new new-style config template file in cfg/
return true;
}
return false;
return false;
}
function deployFileContent(root, file, services, stripCRFromCSV) {
var dos2unix;
/**
* Get a ReadStream to the given file.
*
* @param {String} file Filename
* @param {any} services Services
* @param {Boolean} stripCRFromCSV True or false
* @returns {ReadStream|Dos2Unix} The ReadStream
*/
function deployFileContent (file, services, stripCRFromCSV) {
let dos2unix;
var p = [root, paths.clientPath(file)].join('/');
const p = paths.clientPath(file);
var fileExtension = paths.extname(file);
const fileExtension = paths.extname(file);
if (stripCRFromCSV) {
// replace dos /r/n with unix /n (for csv files)
if (fileExtension.toLowerCase() === '.csv') {
dos2unix = new Dos2Unix();
fs.createReadStream(p).pipe(dos2unix);
return dos2unix;
}
if (stripCRFromCSV) {
// replace dos /r/n with unix /n (for csv files)
if (fileExtension.toLowerCase() === '.csv') {
dos2unix = new Dos2Unix();
fs.createReadStream(p).pipe(dos2unix);
return dos2unix;
}
}
// process templates
if (isTemplateFile(file)) {
var result = template.convertTemplate(utils.readJSONFile(p), services);
if (result.converted) {
return new Buffer(JSON.stringify(result.content));
}
// process templates
if (isTemplateFile(file)) {
const result = template.convertTemplate(fs.readJSONFile(p), services);
if (result.converted) {
return Buffer.from(JSON.stringify(result.content));
}
}
return fs.createReadStream(p);
return fs.createReadStream(p);
}
// check and collect a single module
function collectSingleReusableModule(root, modulePath, scopePath, cb) {
// modulePath already has a trailing /
var node_modules = 'node_modules/';
if (fs.existsSync(paths.join(root, paths.clientPath(modulePath + 'src/.hdiconfig')))) {
if (fs.existsSync(paths.join(root, paths.clientPath(modulePath + node_modules)))) {
throw new Error('Nested node_modules found at ' + modulePath);
}
cb(modulePath, scopePath);
/**
* check and collect a single module
*
* @param {any} modulePath Path of the module
* @param {any} scopePath Path of the scope
* @param {SingleReusableModuleCallback} callbackfn Function taking two arguments, the module path and the scope path.
* @returns {undefined}
*/
function collectSingleReusableModule (modulePath, scopePath, callbackfn) {
// modulePath already has a trailing /
const node_modules = 'node_modules/';
if (fs.existsSync(paths.clientPath(`${modulePath}src/.hdiconfig`))) {
if (fs.existsSync(paths.clientPath(modulePath + node_modules))) {
throw new Error(`Nested node_modules found at ${modulePath}`);
}
callbackfn(modulePath, scopePath);
}
}
// find node_modules/<module> folders, where node_modules/<module>/src contains a .hdiconfig file
function collectReusableModules(root, cb) {
var rootPaths = [];
var node_modules = 'node_modules/';
if (fs.existsSync(paths.join(root, paths.clientPath(node_modules)))) {
var moduleDirs = fs.readdirSync(paths.join(root, paths.clientPath(node_modules)));
/**
* Callback that takes two arguments, the module path and the scope path.
* @callback SingleReusableModuleCallback
* @param {String} modulePath The module path.
* @param {String} scopePath The scope path.
*/
moduleDirs.forEach(function(dir) {
if (dir.startsWith('@')) {
// this is a scoped module folder at node_modules/@<scope>/; collect scoped modules
var scopePath = node_modules + dir + '/';
var scopedModuleDirs = fs.readdirSync(paths.join(root, paths.clientPath(scopePath)));
scopedModuleDirs.forEach(function(dir) {
// this is a scoped module at node_modules/@<scope>/<module>/
var modulePath = scopePath + dir + '/';
collectSingleReusableModule(root, modulePath, scopePath, cb);
});
} else {
// this is a non-scoped module at node_modules/<module>/
var modulePath = node_modules + dir + '/';
collectSingleReusableModule(root, modulePath, null, cb);
}
/**
* find node_modules/<module> folders, where node_modules/<module>/src contains a .hdiconfig file
*
* @param {SingleReusableModuleCallback} callbackfn Function taking two arguments, the module path and the scope path.
* @returns {Array} root paths.
*/
function collectReusableModules (callbackfn) {
const rootPaths = [];
const node_modules = 'node_modules/';
if (fs.existsSync(paths.clientPath(node_modules))) {
const moduleDirs = fs.readdirSync(paths.clientPath(node_modules));
moduleDirs.forEach(function (dir) {
if (dir.startsWith('@')) {
// this is a scoped module folder at node_modules/@<scope>/; collect scoped modules
const scopePath = `${node_modules + dir}/`;
const scopedModuleDirs = fs.readdirSync(paths.clientPath(scopePath));
scopedModuleDirs.forEach(function (scopedModuleDir) {
// this is a scoped module at node_modules/@<scope>/<module>/
const modulePath = `${scopePath + scopedModuleDir}/`;
collectSingleReusableModule(modulePath, scopePath, callbackfn);
});
}
return rootPaths;
} else {
// this is a non-scoped module at node_modules/<module>/
const modulePath = `${node_modules + dir}/`;
collectSingleReusableModule(modulePath, null, callbackfn);
}
});
}
return rootPaths;
}
module.exports = function(root, services, workingSet, deployDirs, pathFilter, stripCRFromCSV, excludeFilter) {
var
dirs = [],
files = [];
/**
* Handle collecting of files to deploy etc.
*
* @class Content
*/
class Content {
/**
* Creates an instance of Content.
* @param {String} root ROOT for the deployer.
* @param {Array} services Services
* @param {PathFilter} workingSet WorkingSet filter
* @param {Array} deployDirs The directories to check for the deployment
* @param {PathFilter} pathFilter PathFilter
* @param {Boolean} stripCRFromCSV Wether to strip CR or not
* @param {PathFilter} excludeFilter The exclude filter
*
* @memberOf Content
*/
constructor (root, services, workingSet, deployDirs, pathFilter, stripCRFromCSV, excludeFilter) {
let
dirs = [],
files = [];
//file walker functions
function enterDir(/*level, dir*/) {
return true;
const defaultAccessRoleFile = 'src/defaults/default_access_role.hdbrole';
const developmentDebugRoleFile = 'src/defaults/development_debug_role.hdbrole';
// file walker functions
/**
* Function for the fileWalker - action when entering directory.
*
* @returns {Boolean} true
*/
function enterDir (/* level, dir */) {
return true;
}
function leaveDir(level, dir, found) {
if (found) {
dir = paths.serverPath(dir);
dirs.push(dir);
}
/**
* Function for the fileWalker - action when leaving directory.
*
* @param {Number} level Depth
* @param {String} dir Directory
* @param {Boolean} found Found files (TODO)
* @returns {undefined}
*/
function leaveDir (level, dir, found) {
if (found) {
dir = paths.serverPath(dir);
dirs.push(dir);
}
}
function addFile(level, dir, file) {
// skip .gitignore files
if (paths.basename(file) === '.gitignore') {
return false;
}
/**
* Function for the fileWalker - action when adding a file.
*
* @param {Number} level Depth
* @param {String} dir Directory
* @param {String} file File
* @returns {Boolean} True or false.
*/
function addFile (level, dir, file) {
// skip .gitignore files
if (paths.basename(file) === '.gitignore') {
return false;
}
file = paths.serverPath(file);
file = paths.serverPath(file);
// apply the filter for paths
// we apply the path filter in the file walk to ensure that it also affects files which are handled by the deployer application itself, e.g. template files
// we also apply the exclude filter, to ensure that ignored files don't get picked up.
if (!pathFilter.matchesPath(file) || excludeFilter.matchesPath(file)) {
// directory is not in the filter, or file itself is not in the filter; skip the file
return false;
}
/*
* apply the filter for paths
* we apply the path filter in the file walk to ensure that it also affects files which are handled by the deployer application itself, e.g. template files
* we also apply the exclude filter, to ensure that ignored files don't get picked up.
*/
if (!pathFilter.matchesPath(file) || excludeFilter.matchesPath(file)) {
// directory is not in the filter, or file itself is not in the filter; skip the file
return false;
}
// file needs to be added
if(file.endsWith('.hdbsynonymtemplate')){
logger.warn(`File ${file} is using old-style .hdbsynonymtemplate. Please switch to .hdbsynonymconfig`);
} else if(file.endsWith('.hdbsynonymgrantor')){
logger.warn(`File ${file} is using old-style .hdbsynonymgrantor. Please switch to .hdbgrants`);
}
files.push(file);
return true;
// file needs to be added
if (file.endsWith('.hdbsynonymtemplate')) {
logger.warn(`File ${file} is using old-style .hdbsynonymtemplate. Please switch to .hdbsynonymconfig`);
} else if (file.endsWith('.hdbsynonymgrantor')) {
logger.warn(`File ${file} is using old-style .hdbsynonymgrantor. Please switch to .hdbgrants`);
}
files.push(file);
return true;
}
logger.logTimerInit('collect-files', "Collecting files...");
logger.logTimerInit('collect-files', 'Collecting files...');
// the collect directories are our deploy directories plus the src/ + cfg/ directories of modules
var collectDirs = deployDirs;
const collectDirs = deployDirs;
var reuseModulesCount = 0;
collectReusableModules(root, function(modulePath, scopePath) {
modulePath = paths.serverPath(modulePath);
// directories in collectDirs will be pushed to dirs automatically
collectDirs.push(modulePath + 'src/');
collectDirs.push(modulePath + 'cfg/');
// but, we need to push the root path of the module manually
dirs.push(modulePath);
// and, we also need to push the scope's path manually, if it's defined
if (scopePath) {
scopePath = paths.serverPath(scopePath);
if (dirs.indexOf(scopePath) === -1) {
dirs.push(scopePath);
}
let reuseModulesCount = 0;
collectReusableModules(function (modulePath, scopePath) {
modulePath = paths.serverPath(modulePath);
// directories in collectDirs will be pushed to dirs automatically
collectDirs.push(`${modulePath}src/`);
collectDirs.push(`${modulePath}cfg/`);
// but, we need to push the root path of the module manually
dirs.push(modulePath);
// and, we also need to push the scope's path manually, if it's defined
if (scopePath) {
scopePath = paths.serverPath(scopePath);
if (dirs.indexOf(scopePath) === -1) {
dirs.push(scopePath);
}
reuseModulesCount++;
}
reuseModulesCount++;
});
if (reuseModulesCount) {
// also push the lib/
dirs.push(paths.serverPath('lib/'));
// also push the lib/
dirs.push(paths.serverPath('lib/'));
}
// check if .hdiconfig exists in cfg
if (workingSet.matchesPath('src/.hdiconfig') && workingSet.matchesPath('cfg/.hdiconfig')) {
if (fs.existsSync('cfg') && fs.statSync('cfg').isDirectory() && !fs.existsSync('cfg/.hdiconfig') && fs.existsSync('src/.hdiconfig')) {
fs.copyFileSync('src/.hdiconfig', 'cfg/.hdiconfig');
logger.log('No .hdiconfig found in cfg/, using the one in src.');
}
}
// collect now
fileWalker.walk(root, collectDirs, enterDir, leaveDir, addFile);
var defaultAccessRoleFile = 'src/defaults/default_access_role.hdbrole';
this.root = root;
/*
* the directories to consider on the server-side are always our given deployDirs
* no matter which directories we've found locally, e.g. a cfg/ might not exist locally, but on the server
* with reusable modules, we also need 'lib/' here
*/
var result = {
root: root,
serverTopDirs: function() {
// the directories to consider on the server-side are always our given deployDirs
// no matter which directories we've found locally, e.g. a cfg/ might not exist locally, but on the server
// with reusable modules, we also need 'lib/' here
return [].concat(
deployDirs.map(function(dir) {
return [deployDirPath(dir)];
}),
[ ['lib/'] ]
);
},
deployFiles: function() {
return files.filter(isDeployableFile).map(function(file) {
return [deployFilePath(file)];
});
},
deployContent: function() {
return [].concat(
dirs.map(function(dir) {
return [deployDirPath(dir), dummyBuffer];
}),
files.filter(isDeployableFile).map(function(file) {
return [deployFilePath(file), deployFileContent(root, file, services, stripCRFromCSV)];
})
);
},
synonymGrantorFiles: function() {
return files.filter(function(file) {
if (!isGrantorFile(file)) {
return false;
}
if (!workingSet.matchesPath(file)) {
return false;
}
return true;
}).map(function(file) {
return file;
});
},
containsDefaultAccessRoleFile: function() {
if (!workingSet.matchesPath(defaultAccessRoleFile)) {
return false;
}
if (!pathFilter.matchesPath(defaultAccessRoleFile)) {
return false;
}
if (!fs.existsSync(paths.join(root, defaultAccessRoleFile))) {
return false;
}
return true;
},
checkDefaultAccessRoleFile: function() {
var roleJSON = utils.readJSONFile(paths.join(root, defaultAccessRoleFile));
/*
* The try catch blocks are only to simulate the previous behavior.
* Since before the rework these properties were functions, they only threw the exception when called, not on object creation.
*/
try {
this._serverTopDirs = [].concat(deployDirs.map((dir) => [deployDirPath(dir)]), [['lib/']]);
} catch (e) {
this._serverTopDirs = e;
}
if (!roleJSON.hasOwnProperty('role')) {
return 'Invalid default-access-role file "' + defaultAccessRoleFile + '": key "role" not found';
}
try {
this._deployFiles = files.filter(isDeployableFile).map((file) => [deployFilePath(file)]);
} catch (e) {
this._deployFiles = e;
}
if (!roleJSON.role.hasOwnProperty('name')) {
return 'Invalid default-access-role file "' + defaultAccessRoleFile + '": key "name" in object "role" not found';
}
try {
this._deployContent = [].concat(dirs.map((dir) => [deployDirPath(dir), dummyBuffer]), files.filter(isDeployableFile).map((file) => [deployFilePath(file), deployFileContent(file, services, stripCRFromCSV)]));
} catch (e) {
this._deployContent = e;
}
var name = roleJSON.role.name;
if (name !== 'default_access_role') {
return 'Invalid default-access-role file "' + defaultAccessRoleFile + '": file does not define the "default_access_role" role';
}
try {
this.synonymGrantorFiles = files.filter((file) => {
if (!isGrantorFile(file)) {
return false;
}
if (!workingSet.matchesPath(file)) {
return false;
}
return true;
});
} catch (e) {
this.synonymGrantorFiles = e;
}
return undefined;
try {
this.synonymRevokerFiles = files.filter((file) => {
if (!isRevokerFile(file)) {
return false;
}
if (!workingSet.matchesPath(file)) {
return false;
}
return true;
});
} catch (e) {
this.synonymRevokerFiles = e;
}
this.containsDefaultAccessRoleFile = function () {
if (!workingSet.matchesPath(defaultAccessRoleFile)) {
return false;
}
if (!pathFilter.matchesPath(defaultAccessRoleFile)) {
return false;
}
if (!fs.existsSync(defaultAccessRoleFile)) {
return false;
}
return true;
};
logger.logTimerDelta('collect-files', "Collecting files... ok");
this.containsDevelopmentDebugRoleFile = function () {
if (!workingSet.matchesPath(developmentDebugRoleFile)) {
return false;
}
if (!pathFilter.matchesPath(developmentDebugRoleFile)) {
return false;
}
if (!fs.existsSync(developmentDebugRoleFile)) {
return false;
}
return true;
};
logger.log(dirs.length + ' directories collected');
logger.log(files.length + ' files collected');
logger.log(reuseModulesCount + ' reusable modules collected');
this.checkDevelopmentDebugRoleFile = function () {
const roleJSON = fs.readJSONFile(developmentDebugRoleFile);
return result;
};
if (!roleJSON.hasOwnProperty('role')) {
return `Invalid development-debug-role file "${developmentDebugRoleFile}": key "role" not found`;
}
if (!roleJSON.role.hasOwnProperty('name')) {
return `Invalid development-debug-role file "${developmentDebugRoleFile}": key "name" in object "role" not found`;
}
const name = roleJSON.role.name;
if (name !== 'development_debug_role') {
return `Invalid development-debug-role file "${developmentDebugRoleFile}": file does not define the "development_debug_role" role`;
}
return undefined;
};
this.checkDefaultAccessRoleFile = function () {
const roleJSON = fs.readJSONFile(defaultAccessRoleFile);
if (!roleJSON.hasOwnProperty('role')) {
return `Invalid default-access-role file "${defaultAccessRoleFile}": key "role" not found`;
}
if (!roleJSON.role.hasOwnProperty('name')) {
return `Invalid default-access-role file "${defaultAccessRoleFile}": key "name" in object "role" not found`;
}
const name = roleJSON.role.name;
if (name !== 'default_access_role') {
return `Invalid default-access-role file "${defaultAccessRoleFile}": file does not define the "default_access_role" role`;
}
return undefined;
};
logger.logTimerDelta('collect-files', 'Collecting files... ok');
logger.log(`${dirs.length} directories collected`);
logger.log(`${files.length} files collected`);
logger.log(`${reuseModulesCount} reusable modules collected`);
}
get serverTopDirs () {
if (this._serverTopDirs instanceof Error) {
throw this._serverTopDirs;
}
return this._serverTopDirs;
}
get deployFiles () {
if (this._deployFiles instanceof Error) {
throw this._deployFiles;
}
return this._deployFiles;
}
get deployContent () {
if (this._deployContent instanceof Error) {
throw this._deployContent;
}
return this._deployContent;
}
get synonymGrantorFiles () {
if (this._synonymGrantorFiles instanceof Error) {
throw this._synonymGrantorFiles;
}
return this._synonymGrantorFiles;
}
set serverTopDirs (value) {
this._serverTopDirs = value;
}
set deployFiles (value) {
this._deployFiles = value;
}
set deployContent (value) {
this._deployContent = value;
}
set synonymGrantorFiles (value) {
this._synonymGrantorFiles = value;
}
}
module.exports = Content;

@@ -7,3 +7,3 @@ 'use strict';

const content = require('./content.js');
const Content = require('./content.js');
const hdi = require('./hdi.js');

@@ -14,3 +14,7 @@ const logger = require('./logger.js');

const IgnoreFile = require('./IgnoreFile');
const status = require('./status');
const client_info = require('./client-info');
const check_ownership = require('./ownership_checker');
const fs = require('./fileWorker');

@@ -25,3 +29,6 @@ /**

*/
module.exports = function(options, services, cb) {
module.exports = function (options, services, cb) {
// Make sure that root is set for our fs wrapper.
fs.set_root(options.root);
const contentPathsFilter = new PathFilter();

@@ -46,3 +53,4 @@ if (options.includeFilter.valid) {

const cnt = content(options.root, services, options.workingSet, options.deployDirs, contentPathsFilter, options.stripCRFromCSV, options.excludeFilter);
const cnt = new Content(options.root, services, options.workingSet, options.deployDirs, contentPathsFilter, options.stripCRFromCSV, options.excludeFilter);
const tasks = [];

@@ -53,3 +61,3 @@

logger.log('Target service:', target_service.name);
} catch (e){
} catch (e) {
// Ignore errors.

@@ -60,5 +68,5 @@ }

logger.trace('top directories: ', cnt.serverTopDirs());
logger.trace('top directories: ', cnt.serverTopDirs);
logger.trace('deploy files: ', cnt.deployFiles());
logger.trace('deploy files: ', cnt.deployFiles);

@@ -68,4 +76,4 @@ const targetCreds = services.getTargetCreds();

const container = `${targetCreds.schema }#OO`;
const containerRole = `${targetCreds.schema }::access_role`;
const container = `${targetCreds.schema}#OO`;
const containerRole = `${targetCreds.schema}::access_role`;

@@ -80,7 +88,33 @@ // if we have a default_access_role file in the processing set, check its content

tasks.push(function(callback) {
// if we have a development_debug_role file in the processing set, check its content
if (cnt.containsDevelopmentDebugRoleFile()) {
const checkDevelopmentDebugRoleFileResult = cnt.checkDevelopmentDebugRoleFile();
if (checkDevelopmentDebugRoleFileResult !== undefined) {
return cb(checkDevelopmentDebugRoleFileResult);
}
}
tasks.push(function (callback) {
client_info.print_client_info(options, targetCreds, callback);
});
tasks.push(function (callback) {
status.get(options, targetCreds, callback);
});
tasks.push(function (callback) {
privileges.revoke(options, services, cnt, container, containerRole, callback);
});
tasks.push(function (callback) {
privileges.grant(options, services, cnt, container, containerRole, callback);
});
tasks.push(function(callback) {
if (options.treatWrongOwnershipAsErrors) {
tasks.push(function (callback) {
check_ownership(options, targetCreds, callback);
});
}
tasks.push(function (callback) {
hdi.deploy(options, targetCreds, cnt, callback);

@@ -87,0 +121,0 @@ });

@@ -5,33 +5,51 @@ 'use strict';

function getFeatures(serverVersion) {
var sinceServerVersion_1_0_120_0 = serverVersion.isGreaterThanOrEqualTo(1, 0, 120, 0) ? 1 : -1;
var sinceServerVersion_2_0_1_0 = serverVersion.isGreaterThanOrEqualTo(2, 0, 1, 0) ? 1 : -1;
var sinceServerVersion_2_0_10_0 = serverVersion.isGreaterThanOrEqualTo(2, 0, 10, 0) ? 1 : -1;
/**
* Get the features supported by the client according to the server version.
*
* @param {Version} server_version Server version
* @param {Container_API_Version} container_api_version
* @param {HDI_Version} hdi_version
* @returns {Object} feature map
*/
function getFeatures (server_version, container_api_version, hdi_version) {
const since_server_version_1_0_120_0 = server_version.isGreaterThanOrEqualTo(1, 0, 120, 0) ? 1 : -1;
const since_server_version_2_0_1_0 = server_version.isGreaterThanOrEqualTo(2, 0, 1, 0) ? 1 : -1;
const since_server_version_2_0_10_0 = server_version.isGreaterThanOrEqualTo(2, 0, 10, 0) ? 1 : -1;
return {
'info': 2,
'verbose': 1,
'structured-log': 1,
'lock-container': 1 * sinceServerVersion_2_0_1_0,
'default-access-role': 1,
'grants': 4,
'working-set': 1,
'include-filter': 1,
'deploy': 1,
'treat-unmodified-as-modified': 1,
'undeploy': 1,
'parameter': 1 * sinceServerVersion_1_0_120_0,
'treat-warnings-as-errors': 1 * sinceServerVersion_1_0_120_0,
'simulate-make': 1 * sinceServerVersion_1_0_120_0,
'service-replacements': 1,
'modules': 2,
'config-templates': 2,
'environment-options': 1,
'undeploy-whitelist': 1,
'zero-downtime-update': 1 * sinceServerVersion_2_0_10_0
};
const since_container_api_version_44 = container_api_version.isGreaterThanOrEqualTo(44) ? 1 : -1;
const since_hdi_version_1 = hdi_version.isGreaterThanOrEqualTo(1) ? 1 : -1;
const since_hdi_version_3 = hdi_version.isGreaterThanOrEqualTo(3);
const since_server_version_2_0_37_1_but_not_higher_sp = server_version.isGreaterThanOrEqualTo(2, 0, 37, 1) && !server_version.isGreaterThanOrEqualTo(2, 0, 40, 0);
const since_server_version_2_0_24_10_but_not_higher_sp = server_version.isGreaterThanOrEqualTo(2, 0, 24, 10) && !server_version.isGreaterThanOrEqualTo(2, 0, 30, 0);
const since_server_version_1_0_122_25_but_not_higher_sp = server_version.isGreaterThanOrEqualTo(1, 0, 122, 25) && !server_version.isGreaterThanOrEqualTo(2, 0, 0, 0);
return {
'info': 2,
'verbose': 1,
'structured-log': 1,
'lock-container': 1 * since_server_version_2_0_1_0,
'default-access-role': 1,
'grants': 4,
'working-set': 1,
'include-filter': 1,
'deploy': 1,
'treat-unmodified-as-modified': 1,
'undeploy': 1,
'parameter': 1 * since_server_version_1_0_120_0,
'path-parameter': 1 * since_server_version_1_0_120_0,
'treat-warnings-as-errors': 1 * since_server_version_1_0_120_0,
'simulate-make': 1 * since_server_version_1_0_120_0,
'service-replacements': 1,
'modules': 2,
'config-templates': 2,
'environment-options': 1,
'undeploy-whitelist': 1,
'zero-downtime-update': 1 * since_server_version_2_0_10_0,
'treat-wrong-ownership-as-errors': since_container_api_version_44,
'migrationtable-development-mode': since_hdi_version_1,
'live-messages': (since_hdi_version_3 || since_server_version_2_0_37_1_but_not_higher_sp || since_server_version_2_0_24_10_but_not_higher_sp || since_server_version_1_0_122_25_but_not_higher_sp) ? 1 : -1
};
}
module.exports = {
getFeatures: getFeatures
getFeatures: getFeatures
};

@@ -5,3 +5,3 @@ 'use strict';

var fs = require('fs');
var fs = require('./fileWorker');
var path = require('path');

@@ -11,3 +11,2 @@ var paths = require('./paths.js');

exports.walk = function(root, dirs, enterDir, leaveDir, addFile) {
function walkIntern(level, dir) {

@@ -22,7 +21,6 @@ var files, stats, found;

}
files = fs.readdirSync(dir);
files = fs.readdirSync(path.join(root, dir));
stats = files.map(function(name) {
return fs.statSync(path.join(root, dir, name));
return fs.statSync(path.join(dir, name));
});

@@ -58,5 +56,4 @@

}
var rootPlusDir = path.join(root, dir);
if (fs.existsSync(rootPlusDir)) {
if (fs.statSync(rootPlusDir).isDirectory()) {
if (fs.existsSync(dir)) {
if (fs.statSync(dir).isDirectory()) {
walkIntern(0, dir);

@@ -63,0 +60,0 @@ }

@@ -6,2 +6,3 @@ /* jslint indent: 4 */

const util = require('util');
const inspect = Symbol.for('nodejs.util.inspect.custom');
const regex_helper = require('./regex-helper');

@@ -20,3 +21,3 @@

*/
constructor() {
constructor () {
this.invalidate();

@@ -30,3 +31,4 @@ }

* @memberOf PathFilter
*/ invalidate() {
*/
invalidate () {
this.valid = false;

@@ -45,3 +47,3 @@ this.directories = {};

*/
inspect() {
inspect () {
if (this.regex.length > 0) {

@@ -52,26 +54,21 @@ return util.format('{ valid: %s, directories: %j, files: %j, regex: %j }', this.valid, this.directories, this.files, this.regex);

}
/*
* var dirs = '';
* Object.keys(this.directories).forEach(function(dir) {
* if (dirs.length === 0) {
* dirs += ' \'' + dir + '\': ' + this.directories[dir];
* } else {
* dirs += ', \'' + dir + '\': ' + this.directories[dir];
* }
* });
*
* var files = '';
* Object.keys(this.files).forEach(function(file) {
* if (files.length === 0) {
* files += ' \'' + file + '\': ' + this.files[file];
* } else {
* files += ', \'' + file + '\': ' + this.files[file];
* }
* });
*
* return util.format('{ valid: %s, directories: {%s}, files: {%s} }', this.valid, dirs, files);
*/
}
// Use Symbol-based implementation - old one is deprecated, but we have to keep it for node 6 support.
/**
* inspect will be called by util.inspect in node.js
* we want to avoid that the 'class name' is also printed and mucks up the assertions
* @returns {String} the inspect properties.
*
* @memberOf PathFilter
*/
[inspect] () {
if (this.regex.length > 0) {
return util.format('{ valid: %s, directories: %j, files: %j, regex: %j }', this.valid, this.directories, this.files, this.regex);
} else {
return util.format('{ valid: %s, directories: %j, files: %j }', this.valid, this.directories, this.files);
}
}
/**
* Filter the given paths by the regexes in this filter.

@@ -84,3 +81,3 @@ *

*/
filter_by_regex(paths) {
filter_by_regex (paths) {
return paths.filter((path) => {

@@ -97,3 +94,2 @@ for (let i = 0; i < this.regex.length; i++) {

/**

@@ -106,3 +102,3 @@ * Add the given path and make the filter valid.

*/
addPath(path) {
addPath (path) {
this.valid = true;

@@ -124,3 +120,3 @@ if (path.indexOf('*') !== -1) {

* @memberOf PathFilter
*/ addPaths(paths) {
*/ addPaths (paths) {
if (Array.isArray(paths)) {

@@ -163,3 +159,3 @@ for (let i = 0; i < paths.length; ++i) {

* @memberOf PathFilter
*/ matchesPath(path) {
*/ matchesPath (path) {
if (!this.valid) {

@@ -201,3 +197,3 @@ // an invalid filter always contains everything

*/
forEachFile(cb) {
forEachFile (cb) {
if (!this.valid) {

@@ -221,3 +217,3 @@ return;

*/
size() {
size () {
return Object.keys(this.directories).length + Object.keys(this.files).length + this.regex.length;

@@ -224,0 +220,0 @@ }

'use strict';
/* jslint indent: 4 */
const logger = require('./logger.js');
const async = require('async');
const DeployTask = require('./tasks.js');
const {Container} = require('@sap/hdi');
const {enrich_credentials_with_session_variables} = require('./client-info');
const {prepareCredentials} = require('./utils');
var async = require('async');
var path = require('path');
var fs = require('fs');
var utils = require('./utils.js');
const deployId = process.env.DEPLOY_ID || 'Deployment ID: none';
var hdbAsync = require('./hdbAsync.js');
var hdiClient = require('./node-hdi/node-hdi.js');
var logger = require('./logger.js');
var messages_hdi = require('./messages.hdi.js');
var deployId = process.env.DEPLOY_ID || 'Deployment ID: none';
/**
* Filter out client only files in the deploy set and/or transform them accordingly.
*
* @param {Array} deploy_files List of files to deploy.
* @returns {Array} The filtered files.
*/
function handle_client_files(files) {
// Filter out grantor files and turn into a set to allow blindly adding config files without risking duplicates.
const cleaned = new Set();
files.forEach((file) => {
if (!utils.isDeployableFile(file)) {
// Do nothing.
logger.log('Filtered undeployable file', file);
} else if (utils.isSynonymTemplateFile(file)) {
// Ensure that for each template file the corresponding config will be deployed.
cleaned.add(utils.rename_synonymtemplate_to_config(file));
logger.log('Filtered .hdbsynonymtempate file', file);
} else {
cleaned.add(file);
}
});
return Array.from(cleaned);
}
// deploy hdi content
exports.deploy = function (options, creds, content, cb) {
var hdiCreds = {
user: creds.hdi_user ? creds.hdi_user : creds.user,
password: creds.hdi_user ? creds.hdi_password : creds.password,
};
const hdiCreds = enrich_credentials_with_session_variables(prepareCredentials(creds, options, logger));
const schema = `${creds.schema}#DI`;
const container = new Container(creds.schema, hdiCreds, schema);
if (Array.isArray(creds.db_hosts)) {
hdiCreds.hosts = creds.db_hosts;
} else {
hdiCreds.host = creds.host;
hdiCreds.port = creds.port;
}
const deployTask = new DeployTask(container, hdiCreds, content, options, logger, creds.schema);
if (creds.certificate) {
hdiCreds.ca = Array.isArray(creds.certificate) ? creds.certificate : [creds.certificate];
logger.trace('hdiCreds.ca set to', hdiCreds.ca);
}
const tasks = [
...deployTask.preprocessing(),
...deployTask.connect(),
...deployTask.lock(),
...deployTask.synchronize(),
...deployTask.make(),
...deployTask.deploy(),
...deployTask.unlock()
];
hdiCreds.initializationTimeout = options.connectionTimeout;
var printMessages = hdbAsync().printMessages;
function logfn(messages) {
printMessages(logger, messages);
}
//var client = new hdiClient(creds.schema, function(messages) { logfn(logger, messages); }, hdiCreds);
var client = new hdiClient(creds.schema, null, hdiCreds); // no logging inside node-hdi-client
var deployContent;
var tasks = [];
var serverTopDirs = content.serverTopDirs() || [];
var deployFiles = [];
var undeployFiles = [];
function filterUndeploy(undeployFiles) {
var filename = path.join(options.root, options.undeployFilename);
var filter = [];
if (fs.existsSync(filename)) {
logger.log('Undeploy whitelist file "undeploy.json" found; deleted files will be filtered by the whitelist');
filter = utils.readJSONFile(filename);
return undeployFiles.filter(function (file) {
return filter.indexOf(file) !== -1;
});
} else {
logger.log('Undeploy whitelist file "undeploy.json" not found; an empty whitelist is used and no files will be scheduled for undeploy');
return [];
}
}
function prepareMake(result) {
if (options.treatUnmodifiedAsModified) {
// schedule all locally collected files for deploy; this maps to Added, Modified, or Unmodified
deployFiles = content.deployFiles().map(function(item) { return item[0]; })
.filter((item) => !options.excludeFilter.matchesPath(item));
} else {
// schedule the Added or Modified files for deploy
deployFiles = result.filter(function(item) { return item.STATUS === 'A' || item.STATUS === 'M'; })
.map(function(item) { return item.PATH; })
.filter((item) => !options.excludeFilter.matchesPath(item));
}
undeployFiles = result.filter(function(item) { return item.STATUS === 'D'; })
.map(function(item) { return item.PATH; })
.filter((item) => !options.excludeFilter.matchesPath(item));
logger.trace('status result:', result);
logger.trace('deploy files:', deployFiles);
logger.trace('undeploy files:', undeployFiles);
// filter the undeploy set based on the undeploy.json file
if (undeployFiles.length && !options.autoUndeploy) {
undeployFiles = filterUndeploy(undeployFiles);
logger.trace('filtered undeploy files:', undeployFiles);
}
// filter the undeploy set by the include-filter, because deleted files are not considered during the file walk
if (options.includeFilter.valid) {
undeployFiles = undeployFiles.filter(function (file) {
return options.includeFilter.matchesPath(file);
});
}
// filter current deployFiles and undeployFiltes via the working set
if (options.workingSet.valid) {
deployFiles = deployFiles.filter(function (file) {
return options.workingSet.matchesPath(file);
});
undeployFiles = undeployFiles.filter(function (file) {
return options.workingSet.matchesPath(file);
});
}
if (options.treatUnmodifiedAsModified) {
logger.log(deployFiles.length + " modified, unmodified, or added files are scheduled for deploy");
} else {
logger.log(deployFiles.length + " modified or added files are scheduled for deploy based on delta detection");
}
logger.log(undeployFiles.length + " deleted files are scheduled for undeploy based on delta detection (filtered by undeploy whitelist)");
let explicit_deploy_files = new Set();
let options_deploy_count = 0;
// add explicit deploy set, but filter it via the working set
// Since deploy files will be filtered in "Handle client files", already substract files we know will be removed.
options.deploy.forEachFile(function(path) {
if (options.workingSet.matchesPath(path)) {
explicit_deploy_files.add(path);
if (utils.isGrantorFile(path)) {
options_deploy_count--;
}
}
});
//add files defined by file pattern
if(options.deploy){
options.deploy.filter_by_regex(content.deployFiles().map(item => item[0])).forEach(file => explicit_deploy_files.add(file));
}
explicit_deploy_files.forEach((file) => {
if(!options.excludeFilter.matchesPath(file)){
deployFiles.push(file);
options_deploy_count++;
}
});
let explicit_undeploy_files = new Set();
// add explicit undeploy set, but filter it via the working set
options.undeploy.forEachFile(function(path) {
if (options.workingSet.matchesPath(path)) {
explicit_undeploy_files.add(path);
}
});
let options_undeploy_count = 0;
explicit_undeploy_files.forEach((file) => {
if(!options.excludeFilter.matchesPath(file)){
undeployFiles.push(file);
options_undeploy_count++;
}
});
deployFiles = handle_client_files(deployFiles);
//undeployFiles = handle_client_files(undeployFiles);
logger.log(options_deploy_count + " files are scheduled for deploy based on explicit specification");
logger.log(options_undeploy_count + " files are scheduled for undeploy based on explicit specification");
return {
deployFiles: deployFiles,
undeployFiles: undeployFiles
};
}
logger.trace(serverTopDirs);
logger.logTimerInit('preprocessing-files', 'Preprocessing files...');
try {
deployContent = content.deployContent();
} catch (err) {
cb(err);
return;
}
logger.logTimerDelta('preprocessing-files', 'Preprocessing files... ok');
tasks.push(logger.logfnTimerInit('connect-container', 'Connecting to the container "%s"...', creds.schema));
tasks.push(function (callback) { client.connect(callback); });
tasks.push(logger.logfnTimerDelta('connect-container', 'Connecting to the container "%s"... ok', creds.schema));
if (options.lockContainer) {
tasks.push(logger.logfnTimerInit('locking-container', 'Locking the container "%s"...', creds.schema));
tasks.push(function (callback) { client.lock(options.lockContainerTimeout, {}, callback); });
tasks.push(logger.logfnTimerDelta('locking-container', 'Locking the container "%s"... ok', creds.schema));
}
tasks.push(logger.logfnTimerInit('synchronizing-files', 'Synchronizing files with the container "%s"...', creds.schema));
if (options.singleDeleteCallsForDirectories) {
serverTopDirs.forEach(function (dir) {
tasks.push(function (callback) {
async.waterfall([
function (innerCB) { client.delete(dir, { RECURSIVE: 'TRUE' }, innerCB); },
function (result, innerCB) {
if (result.RETURN_CODE >= 0) {
// don't log anything
return innerCB(null, result);
}
var messages = result.MESSAGES;
var expectedErrors = [messages_hdi.DELETE_PATHS_FAILED.code, messages_hdi.FOLDER_NOT_FOUND.code];
var foundErrorCodes = 0;
for (var i = 0; i < expectedErrors.length; ++i) {
for (var j = 0; j < messages.length; ++j) {
if (messages[j].MESSAGE_CODE === expectedErrors[i]) {
++foundErrorCodes;
break;
}
}
}
if (foundErrorCodes === expectedErrors.length) {
// don't log anything
return innerCB(null, result);
}
logfn(result.MESSAGES);
innerCB(new Error('HDI call failed'));
}
], callback);
});
});
async.series(tasks, function (err, results) {
if (err) {
// err.message.replace: delete line breaks
const message = err.message ? String(err.message) : `${err}`;
logger.error('Deployment to container %s failed - error: %s [%s].', creds.schema, message.replace(/(\r\n|\n|\r)/gm, ''), deployId);
} else {
tasks.push(function (callback) { client.delete(serverTopDirs.map(function (a) { return a[0]; }), { RECURSIVE: 'TRUE', IGNORE_NON_EXISTING_PATHS: 'TRUE' }, callback); });
logger.log('Deployment to container %s done [%s].', creds.schema, deployId);
}
tasks.push(function (callback) { client.write(deployContent, {}, callback); });
tasks.push(logger.logfnTimerDelta('synchronizing-files', 'Synchronizing files with the container "%s"... ok', creds.schema));
var deployParameters = {};
// copy parameters from options
Object.keys(options.parameters).forEach(function (key) {
deployParameters[key.toUpperCase()] = options.parameters[key];
// as last action, close the client
deployTask.disconnect();
cb(err, {
task: 'deploy',
results: results
});
// add explicit parameters
if (options.treatWarningsAsErrors) {
deployParameters.TREAT_WARNINGS_AS_ERRORS = 'TRUE';
}
if (options.simulateMake) {
deployParameters.SIMULATE_MAKE = 'TRUE';
}
tasks.push(function (callback) {
async.waterfall([
function (innerCB) { client.status(serverTopDirs.map(function (a) { return a[0]; }), {}, innerCB); },
function (result, innerCB) {
var fileLists = prepareMake(result.RESULTS);
deployFiles = fileLists.deployFiles;
undeployFiles = fileLists.undeployFiles;
return innerCB(null, deployFiles, undeployFiles);
},
function (deployFiles, undeployFiles, innerCB) {
logger.logTimerInit('deploying-files', 'Deploying to the container "%s"...', creds.schema);
client.make(deployFiles, undeployFiles, {}, deployParameters, innerCB);
},
function (result, innerCB) {
logfn(result.MESSAGES);
if (result.RETURN_CODE < 0) {
innerCB(new Error('HDI make failed'), result);
} else {
innerCB(null, result);
}
}
], callback);
});
tasks.push(logger.logfnTimerDelta('deploying-files', 'Deploying to the container "%s"... ok', creds.schema));
tasks.push(function (callback) {
var defaultAccessRoleFile = 'src/defaults/default_access_role.hdbrole';
var defaultAccessRoleName = 'default_access_role';
var containerAccessRoleName = creds.schema + "::access_role";
var defaultPermissionSet = [
['CREATE TEMPORARY TABLE', '', containerAccessRoleName],
['DELETE', '', containerAccessRoleName],
['EXECUTE', '', containerAccessRoleName],
['INSERT', '', containerAccessRoleName],
['SELECT', '', containerAccessRoleName],
['SELECT CDS METADATA', '', containerAccessRoleName],
['UPDATE', '', containerAccessRoleName]
];
if (undeployFiles.indexOf(defaultAccessRoleFile) !== -1 && deployFiles.indexOf(defaultAccessRoleFile) === -1) {
if (options.simulateMake) {
logger.log('Default-access-role file "src/defaults/default_access_role.hdbrole" undeployed, but simulate-make option was given; global role "%s" will not be adapted', containerAccessRoleName);
callback();
} else {
async.series([
logger.logfn('Default-access-role file "src/defaults/default_access_role.hdbrole" undeployed; global role "%s" will be adapted', containerAccessRoleName),
logger.logfnTimerInit('regrant-default-permissions', 'Regranting default permission set to global role "%s"...', containerAccessRoleName),
function (cb) { client.grantContainerSchemaPrivileges(defaultPermissionSet, {}, cb); },
logger.logfnTimerDelta('regrant-default-permissions', 'Regranting default permission set to global role "%s"... ok', containerAccessRoleName)
], callback);
}
} else {
if (content.containsDefaultAccessRoleFile()) {
if (options.simulateMake) {
logger.log('Default-access-role file "src/defaults/default_access_role.hdbrole" scheduled for deploy, but simulate-make option was given; global role "%s" will not be adapted', containerAccessRoleName);
callback();
} else {
async.series([
logger.logfn('Default-access-role file "src/defaults/default_access_role.hdbrole" scheduled for deploy; global role "%s" will be adapted', containerAccessRoleName),
logger.logfnTimerInit('grant-default-role', 'Granting container-local default access role "%s"."%s" to global role "%s"...', creds.schema, defaultAccessRoleName, containerAccessRoleName),
function (cb) { client.grantContainerSchemaRoles([[defaultAccessRoleName, '', containerAccessRoleName]], {}, cb); },
logger.logfnTimerDelta('grant-default-role', 'Granting container-local default access role "%s"."%s" to global role "%s"... ok', creds.schema, defaultAccessRoleName, containerAccessRoleName),
logger.logfnTimerInit('revoke-default-permissions', 'Revoking default permission set from global role "%s"...', containerAccessRoleName),
function (cb) { client.revokeContainerSchemaPrivileges(defaultPermissionSet, {}, cb); },
logger.logfnTimerDelta('revoke-default-permissions', 'Revoking default permission set from global role "%s"... ok', containerAccessRoleName)
], callback);
}
} else {
logger.log('No default-access-role handling needed; global role "%s" will not be adapted', containerAccessRoleName);
callback();
}
}
});
if (options.lockContainer) {
tasks.push(logger.logfnTimerInit('unlocking-container', 'Unlocking the container "%s"...', creds.schema));
tasks.push(function (callback) { client.unlock(callback); });
tasks.push(logger.logfnTimerDelta('unlocking-container', 'Unlocking the container "%s"... ok', creds.schema));
}
// unleash the avalanche
async.series(tasks, function (err, results) {
if (err) {
// err.message.replace: delete line breaks
var message = err.message ? '' + err.message : '' + err;
logger.error('Deployment to container %s failed - error: %s [%s].', creds.schema, message.replace(/(\r\n|\n|\r)/gm, ''), deployId);
} else {
logger.log('Deployment to container %s done [%s].', creds.schema, deployId);
}
// as last action, close the client
client.close();
cb(err, {
task: 'deploy',
results: results
});
});
});
};

@@ -23,3 +23,3 @@ 'use strict';

try {
this.lines = fs.readFileSync(file_path, 'utf8').split('\n')
this.lines = fs.readFileSync(file_path, 'utf8').split(/\r?\n/)
.filter((line) => {

@@ -26,0 +26,0 @@ if (line.startsWith('!')){

@@ -5,30 +5,41 @@ 'use strict';

function getInfoForComponents(components, serverVersion) {
var info = {};
/**
* Get info for the different components.
*
* @param {any} components
* @param {Version} server_version
* @param {Client_API_Version} container_api_version
* @param {HDI_Version} hdi_version
* @returns
*/
function getInfoForComponents (components, server_version, container_api_version, hdi_version) {
const info = {};
var all = (components.length === 0) || (components.indexOf('all') !== -1);
const all = (components.length === 0) || (components.indexOf('all') !== -1);
if (all || (components.indexOf('client') !== -1)) {
var packagejson = require('./../package.json');
var features_client = require('./features.client.js').getFeatures(serverVersion);
info.client = {
name: packagejson.name,
version: packagejson.version,
features: features_client
};
}
if (all || (components.indexOf('client') !== -1)) {
const packagejson = require('./../package.json');
const features_client = require('./features.client.js').getFeatures(server_version, container_api_version, hdi_version);
info.client = {
name: packagejson.name,
version: packagejson.version,
features: features_client
};
}
if (all || (components.indexOf('server') !== -1)) {
info.server = {
name: 'sap-hana-database',
version: serverVersion.version,
features: {}
};
}
if (all || (components.indexOf('server') !== -1)) {
info.server = {
name: 'sap-hana-database',
version: server_version.version,
'container-api-version': '' + container_api_version.version,
'hdi-version': '' + hdi_version.version,
features: {}
};
}
return info;
return info;
}
module.exports = {
getInfoForComponents: getInfoForComponents
getInfoForComponents: getInfoForComponents
};

@@ -9,27 +9,46 @@ 'use strict';

var trace = process.env.TRACE;
const Liveness_Ping = require('./liveness-ping');
function writeToStdOut() {
var str = util.format.apply(null, arguments) + "\n";
process.stdout.write(str);
// initialize with dummy object, will be replaced with instance of Liveness_Ping if --liveness-ping is set.
let liveness_ping = {
sent: () => {},
stop: () => {}
};
function writeToStdOut () {
liveness_ping.sent();
var str = util.format.apply(null, arguments) + '\n';
process.stdout.write(str);
}
function writeToStdErr() {
var str = util.format.apply(null, arguments) + "\n";
process.stderr.write(str);
function writeToStdErr () {
liveness_ping.sent();
var str = util.format.apply(null, arguments) + '\n';
process.stderr.write(str);
}
exports.set_liveness_ping = function (enable_liveness_ping) {
if (enable_liveness_ping) {
liveness_ping = new Liveness_Ping();
}
};
exports.stop_sending_liveness_ping = function () {
liveness_ping.stop();
};
exports.log = writeToStdOut;
exports.warn = function(){
var args = arguments;
args[0] ='WARNING: ' + args[0];
writeToStdOut.apply(null, args);
exports.warn = function () {
var args = arguments;
args[0] = 'WARNING: ' + args[0];
writeToStdOut.apply(null, args);
};
exports.logfn = function() {
var args = arguments;
return function(cb) {
writeToStdOut.apply(null, args);
cb();
};
exports.logfn = function () {
var args = arguments;
return function (cb) {
writeToStdOut.apply(null, args);
cb();
};
};

@@ -39,12 +58,20 @@

exports.setTrace = function(value) {
trace = value;
exports.setTrace = function (value) {
trace = value;
};
exports.trace = function() {
if (trace) {
var str = util.format.apply(null, arguments);
str = str.replace(/PASSWORD.*/ig, 'p[...]');
writeToStdOut(str);
}
const client_private_key_1_regexp = new RegExp('"client_authentication_private_key":.*', 'ig');
const client_private_key_2_regexp = new RegExp('client_authentication_private_key:.*', 'ig');
const client_private_key_3_regexp = new RegExp('key:.*', 'g');
const client_private_key_4_regexp = new RegExp('.*-----BEGIN PRIVATE KEY.*END PRIVATE KEY.*', 'g');
exports.trace = function () {
if (trace) {
var str = util.format.apply(null, arguments);
str = str.replace(/PASSWORD.*/ig, 'p[...]');
str = str.replace(client_private_key_1_regexp, '"client_authentication_private_key": [..]');
str = str.replace(client_private_key_2_regexp, 'client_authentication_private_key: [..]');
str = str.replace(client_private_key_3_regexp, 'key: [..]');
str = str.replace(client_private_key_4_regexp, '');
writeToStdOut(str);
}
};

@@ -54,14 +81,14 @@

function timerInit(timer) {
timers[timer] = process.hrtime();
function timerInit (timer) {
timers[timer] = process.hrtime();
}
function timerDelta(timer) {
var then = timers[timer];
var diff = process.hrtime(then);
if (then || diff) {
return '(' + (diff[0]) + 's ' + (diff[1] / 1000000).toFixed(0) + 'ms)';
} else {
return '';
}
function timerDelta (timer) {
var then = timers[timer];
var diff = process.hrtime(then);
if (then || diff) {
return '(' + (diff[0]) + 's ' + (diff[1] / 1000000).toFixed(0) + 'ms)';
} else {
return '';
}
}

@@ -73,13 +100,13 @@

function logTimerInit(timer) {
var args = Array.prototype.slice.call(arguments, 1);
timerInit(timer);
writeToStdOut.apply(null, args);
function logTimerInit (timer) {
var args = Array.prototype.slice.call(arguments, 1);
timerInit(timer);
writeToStdOut.apply(null, args);
}
function logTimerDelta(timer) {
var args = Array.prototype.slice.call(arguments, 1);
var delta = timerDelta(timer);
args.push(delta);
writeToStdOut.apply(null, args);
function logTimerDelta (timer) {
var args = Array.prototype.slice.call(arguments, 1);
var delta = timerDelta(timer);
args.push(delta);
writeToStdOut.apply(null, args);
}

@@ -91,16 +118,16 @@

exports.logfnTimerInit = function() {
var args = arguments;
return function(cb) {
logTimerInit.apply(undefined, args);
cb();
};
exports.logfnTimerInit = function () {
var args = arguments;
return function (cb) {
logTimerInit.apply(undefined, args);
cb();
};
};
exports.logfnTimerDelta = function() {
var args = arguments;
return function(cb) {
logTimerDelta.apply(undefined, args);
cb();
};
exports.logfnTimerDelta = function () {
var args = arguments;
return function (cb) {
logTimerDelta.apply(undefined, args);
cb();
};
};

@@ -110,10 +137,10 @@

exports.setVerbose = function(value) {
verbose = value;
exports.setVerbose = function (value) {
verbose = value;
};
exports.logVerbose = function() {
if (verbose) {
writeToStdOut.apply(null, arguments);
}
exports.logVerbose = function () {
if (verbose) {
writeToStdOut.apply(null, arguments);
}
};

@@ -123,4 +150,4 @@

exports.setLogFile = function(filename) {
logFile = filename;
exports.setLogFile = function (filename) {
logFile = filename;
};

@@ -130,10 +157,10 @@

exports.nextMessageId = function() {
return ++logMessageId;
exports.nextMessageId = function () {
return ++logMessageId;
};
exports.logToFile = function(message) {
if (logFile) {
fs.appendFileSync(logFile, JSON.stringify(message) + '\n');
}
exports.logToFile = function (message) {
if (logFile) {
fs.appendFileSync(logFile, JSON.stringify(message) + '\n');
}
};

@@ -143,12 +170,12 @@

exports.setSendMessagesToParentProcess = function(value) {
sendMessagesToParentProcess = value;
exports.setSendMessagesToParentProcess = function (value) {
sendMessagesToParentProcess = value;
};
exports.logToParent = function(message) {
if (sendMessagesToParentProcess) {
if (process.send) {
process.send(message);
}
exports.logToParent = function (message) {
if (sendMessagesToParentProcess) {
if (process.send) {
process.send(message);
}
}
};

@@ -7,27 +7,34 @@ 'use strict';

module.exports = function() {
return {
root: process.env.ROOT || process.cwd(),
deployDirs: ['src/', 'cfg/'],
autoUndeploy: false,
exit: false,
detectServerVersion: true,
lockContainer: undefined,
lockContainerTimeout: 2 * 60 * 1000,
workingSet: new PathFilter(),
includeFilter: new PathFilter(),
excludeFilter: new PathFilter(),
deploy: new PathFilter(),
treatUnmodifiedAsModified: false,
undeploy: new PathFilter(),
parameters: {},
treatWarningsAsErrors: false,
simulateMake: false,
stripCRFromCSV: false,
singleDeleteCallsForDirectories: true,
undeployFilename: 'undeploy.json',
verbose: true,
logFile: undefined,
connectionTimeout: 10 * 1000
};
module.exports = function () {
return {
root: process.env.ROOT || process.cwd(),
deployDirs: ['src/', 'cfg/'],
autoUndeploy: false,
exit: false,
detectServerVersion: true,
lockContainer: undefined,
lockContainerTimeout: 2 * 60 * 1000,
workingSet: new PathFilter(),
includeFilter: new PathFilter(),
excludeFilter: new PathFilter(),
deploy: new PathFilter(),
treatUnmodifiedAsModified: false,
undeploy: new PathFilter(),
parameters: {},
path_parameters: {},
treatWarningsAsErrors: false,
simulateMake: false,
stripCRFromCSV: false,
singleDeleteCallsForDirectories: true,
undeployFilename: 'undeploy.json',
verbose: true,
logFile: undefined,
connectionTimeout: 10 * 1000,
treatWrongOwnershipAsErrors: false,
migrationTableDevMode: false,
detectContainerAPIVersion: true,
detectHDIVersion: true,
liveness_ping: true,
live_messages: true
};
};

@@ -7,391 +7,53 @@ 'use strict';

const hdb = require('../hdbAsync.js');
const logger = require('../logger.js');
const utils = require('../utils.js');
const paths = require('../paths.js');
const {
SQLGrantorStrategy,
HDIContainerGrantorStrategy,
ProcedureGrantorStrategy
} = require('./strategies');
const grantor = require('./grantor');
const revoker = require('./revoker');
function selectSchema() {
// return the first non-undefined argument
for (let i = 0; i < arguments.length; ++i) {
const schema = arguments[i];
if (schema !== undefined) {
return schema;
}
}
return '';
}
function check_array_is_array_and_not_empty(to_check, var_name){
if (!utils.isArray(to_check)){
throw new Error(`Expected ${var_name} to be of type Array. Found: ${Object.prototype.toString.call(to_check)}`);
}
return to_check.length > 0;
}
function grantPrivileges(client, grantor_type, privileges, grantee, grantor_schema, grantor_remote, grantor_procedure, grantor_procedure_schema, cb) {
// grant target container oo and target container access role
exports.grant = function(options, services, content, container, grantee, cb) {
try {
const tasks = [];
let grantorStrategy;
if (grantor_type === 'hdi') {
grantorStrategy = new HDIContainerGrantorStrategy(client, tasks, grantor_schema);
} else if (grantor_type === 'procedure') {
grantorStrategy = new ProcedureGrantorStrategy(client, tasks, grantor_procedure, grantor_procedure_schema);
} else {
grantorStrategy = new SQLGrantorStrategy(client, tasks);
}
logger.logTimerInit('grants-files', 'Processing grants files...');
grantorStrategy.initialize();
const fileNames = content.synonymGrantorFiles;
logger.trace('grants files:', fileNames);
/*
* the rule for schema selection is:
* 1. obj.schema
* 2. obj.reference, only used for schema_privileges
* 3. grantor_schema
*/
if (privileges.roles) {
/*
* roles is supported for backwards compatibility
* string format: "roles": [ "X", "Y" ]
* object format: "roles": [ { "names": [ "X", "Y" ], "roles": [ "X", "Y" ], "roles_with_admin_option": [ "A", "B" ] } ]
*/
const string_format_roles = [];
privileges.roles.forEach(function(obj) {
if (typeof obj === 'string') {
string_format_roles.push(obj);
} else {
if (obj.names) {
if (check_array_is_array_and_not_empty(obj.names, 'names')){
grantorStrategy.grantGlobalRoles(obj.names, grantee, false);
}
}
if (obj.roles) {
if (check_array_is_array_and_not_empty(obj.roles, 'roles')){
grantorStrategy.grantGlobalRoles(obj.roles, grantee, false);
}
}
if (obj.roles_with_admin_option) {
if (check_array_is_array_and_not_empty(obj.roles_with_admin_option, 'roles_with_admin_option')){
grantorStrategy.grantGlobalRoles(obj.roles_with_admin_option, grantee, true);
}
}
}
});
if (string_format_roles.length > 0) {
grantorStrategy.grantGlobalRoles(string_format_roles, grantee, false);
fileNames.forEach(function(fileName) {
if (options.workingSet.matchesPath(fileName)) {
tasks.push(logger.logfn(` Processing "${ fileName }"...`));
tasks.push(function(callback) {
grantor(services, content.root, fileName, container, grantee, callback);
});
tasks.push(logger.logfn(` Processing "${ fileName }"... ok`));
}
}
});
if (privileges.global_roles) {
/*
* global_roles is supported for symmetry with hdbrole
* string format: "global_roles": [ "X", "Y" ]
* object format: "global_roles": [ { "names": [ "X", "Y" ], "roles": [ "X", "Y" ], "roles_with_admin_option": [ "A", "B" ] } ]
*/
const string_format_global_roles = [];
privileges.global_roles.forEach(function(obj) {
if (typeof obj === 'string') {
string_format_global_roles.push(obj);
} else {
if (obj.names) {
if (check_array_is_array_and_not_empty(obj.names, 'names')){
grantorStrategy.grantGlobalRoles(obj.names, grantee, false);
}
}
if (obj.roles) {
if (check_array_is_array_and_not_empty(obj.roles, 'roles')){
grantorStrategy.grantGlobalRoles(obj.roles, grantee, false);
}
}
if (obj.roles_with_admin_option) {
if (check_array_is_array_and_not_empty(obj.roles_with_admin_option, 'roles_with_admin_option')){
grantorStrategy.grantGlobalRoles(obj.roles_with_admin_option, grantee, true);
}
}
}
});
tasks.push(logger.logfnTimerDelta('grants-files', 'Processing grants files... ok'));
if (string_format_global_roles.length > 0) {
grantorStrategy.grantGlobalRoles(string_format_global_roles, grantee, false);
}
}
if (privileges.system_privileges) {
/*
* string format: "system_privileges": [ "X", "Y" ]
* object format: "system_privileges": [ { "privileges": [ "X", "Y" ], "privileges_with_admin_option": [ "A", "B" ] } ]
*/
const string_format_privileges = [];
privileges.system_privileges.forEach(function(obj) {
if (typeof obj === 'string') {
string_format_privileges.push(obj);
} else {
if (obj.privileges) {
if (check_array_is_array_and_not_empty(obj.privileges, 'privileges')){
grantorStrategy.grantSystemPrivileges(obj.privileges, grantee, false);
}
}
if (obj.privileges_with_admin_option) {
if (check_array_is_array_and_not_empty(obj.privileges_with_admin_option, 'privileges_with_admin_option')){
grantorStrategy.grantSystemPrivileges(obj.privileges_with_admin_option, grantee, true);
}
}
}
});
if (string_format_privileges.length > 0) {
grantorStrategy.grantSystemPrivileges(string_format_privileges, grantee, false);
}
}
if (privileges.schema_privileges) {
privileges.schema_privileges.forEach(function(obj) {
if (obj.privileges) {
if (check_array_is_array_and_not_empty(obj.privileges, 'privileges')){
grantorStrategy.grantSchemaPrivileges(obj.privileges, selectSchema(obj.schema, obj.reference, grantor_schema), grantee, false);
}
}
if (obj.privileges_with_grant_option) {
if (check_array_is_array_and_not_empty(obj.privileges_with_grant_option, 'privileges_with_grant_option')){
grantorStrategy.grantSchemaPrivileges(obj.privileges_with_grant_option, selectSchema(obj.schema, obj.reference, grantor_schema), grantee, true);
}
}
});
}
if (privileges.object_privileges) {
privileges.object_privileges.forEach(function(obj) {
if (obj.privileges) {
if (check_array_is_array_and_not_empty(obj.privileges, 'privileges')){
grantorStrategy.grantSchemaObjectPrivileges(obj.privileges, selectSchema(obj.schema, grantor_schema), obj.name, grantee, false);
}
}
if (obj.privileges_with_grant_option) {
if (check_array_is_array_and_not_empty(obj.privileges_with_grant_option, 'privileges_with_grant_option')){
grantorStrategy.grantSchemaObjectPrivileges(obj.privileges_with_grant_option, selectSchema(obj.schema, grantor_schema), obj.name, grantee, true);
}
}
});
}
if (privileges.global_object_privileges) {
privileges.global_object_privileges.forEach(function(obj) {
let name = obj.name;
if (typeof name === 'undefined' && obj.type === 'REMOTE SOURCE') {
name = grantor_remote;
}
if (obj.privileges) {
if (check_array_is_array_and_not_empty(obj.privileges, 'privileges')){
grantorStrategy.grantGlobalObjectPrivileges(obj.privileges, name, obj.type, grantee, false);
}
}
if (obj.privileges_with_grant_option) {
if (check_array_is_array_and_not_empty(obj.privileges_with_grant_option, 'privileges_with_grant_option')){
grantorStrategy.grantGlobalObjectPrivileges(obj.privileges_with_grant_option, name, obj.type, grantee, true);
}
}
});
}
if (privileges.schema_roles) {
/*
* string format: "schema_roles": [ "X", "Y" ]
* object format: "schema_roles": [ { "names": [ "X", "Y" ], "roles": [ "X", "Y" ], "roles_with_admin_option": [ "A", "B" ] } ]
*/
const string_format_schema_roles = [];
privileges.schema_roles.forEach(function(obj) {
if (typeof obj === 'string') {
string_format_schema_roles.push(obj);
} else {
if (obj.names) {
if (check_array_is_array_and_not_empty(obj.names, 'names')){
grantorStrategy.grantSchemaRoles(selectSchema(obj.schema, grantor_schema), obj.names, grantee, false);
}
}
if (obj.roles) {
if (check_array_is_array_and_not_empty(obj.roles, 'roles')){
grantorStrategy.grantSchemaRoles(selectSchema(obj.schema, grantor_schema), obj.roles, grantee, false);
}
}
if (obj.roles_with_admin_option) {
if (check_array_is_array_and_not_empty(obj.roles_with_admin_option, 'roles_with_admin_option')){
grantorStrategy.grantSchemaRoles(selectSchema(obj.schema, grantor_schema), obj.roles_with_admin_option, grantee, true);
}
}
}
});
if (string_format_schema_roles.length > 0) {
grantorStrategy.grantSchemaRoles(grantor_schema, string_format_schema_roles, grantee, false);
}
}
if (privileges.container_roles) {
if (check_array_is_array_and_not_empty(privileges.container_roles, 'container_roles')){
grantorStrategy.grantSchemaRoles(grantor_schema, privileges.container_roles, grantee, false);
}
}
grantorStrategy.finalize();
async.series(tasks, cb);
} catch (err) {
cb(err);
}
}
function grantUsers(privileges, grantor, fileName, creds, targetCreds, container, grantee, cb) {
try {
const tasks = [];
let grantor_type;
if (creds.type !== undefined) {
// if the grantor object contains a type field, then use this for selecting the grantor's type
if (creds.type === 'hdi' || creds.type === 'sql' || creds.type === 'procedure' || creds.type === 'ignore') {
grantor_type = creds.type;
} else {
throw new Error("unknown grantor type, known grantor types are 'hdi', 'sql', 'procedure', 'ignore'");
}
} else {
// otherwise, fallback to old auto-sensing for sql and hdi types
grantor_type = 'sql';
if (creds.hdi_user) {
grantor_type = 'hdi';
}
}
let host = creds.host;
let port = creds.port;
let hosts = creds.db_hosts;
let certificate = creds.certificate;
if (!Array.isArray(hosts) && host === undefined && port === undefined && certificate === undefined) {
// host, port, certificate are optional in the service credentials, fallback to target credentials if undefined
host = targetCreds.host;
port = targetCreds.port;
hosts = targetCreds.db_hosts;
certificate = targetCreds.certificate;
}
let user = creds.user;
let password = creds.password;
if (grantor_type === 'hdi') {
user = creds.hdi_user;
password = creds.hdi_password;
}
logger.log(` Using service "${ grantor }" of type "${ grantor_type }"`);
if (grantor_type === 'ignore') {
cb(null);
return;
}
const client = hdb(host, port, user, password, certificate, hosts);
tasks.push(client.connect());
if (creds.schema && grantor_type !== 'procedure') {
tasks.push(client.setSchema(creds.schema));
}
if (privileges.object_owner) {
tasks.push(function(cb) {
grantPrivileges(client, grantor_type, privileges.object_owner, container, creds.schema, creds.remote, creds.procedure, creds.procedure_schema, cb);
});
}
if (privileges.application_user) {
tasks.push(function(cb) {
grantPrivileges(client, grantor_type, privileges.application_user, grantee, creds.schema, creds.remote, creds.procedure, creds.procedure_schema, cb);
});
}
tasks.push(client.disconnect());
async.series(tasks, function(err, results) {
client.end();
if (err) { // add information about grantor service & underlying user
err.message += `\ngrantor service: "${ grantor }", type: "${ grantor_type }", user: "${ user }"`;
if (grantor_type === 'hdi') {
err.message += ' (hdi_user)';
}
if (grantor_type === 'procedure') {
err.message += `, procedure: "${ creds.procedure }"`;
if (creds.procedure_schema) {
err.message += `, procedure_schema: "${ creds.procedure_schema }"`;
}
}
err.message += `\nfile name: ${ fileName}`;
}
cb(err, results);
});
} catch (err) {
cb(err);
}
}
function grantFile(services, root, fileName, container, grantee, cb) {
try {
const tasks = [];
fileName = paths.join(root, paths.clientPath(fileName));
const file = utils.readJSONFile(fileName);
const targetCreds = services.getTargetCreds();
Object.keys(file).forEach(function(grantor) {
let creds;
try {
creds = services.getCreds(grantor);
} catch (err) {
cb(err);
return;
}
logger.trace('grantor', file[grantor]);
tasks.push(function(cb) {
grantUsers(file[grantor], grantor, fileName, creds, targetCreds, container, grantee, cb);
cb(err, {
task: 'synonym',
results: results
});
});
async.series(tasks, cb);
} catch (err) {
cb(err);
return cb(err);
}
}
};
// grant target container oo and target container access role
exports.grant = function(options, services, content, container, grantee, cb) {
exports.revoke = function(options, services, content, container, grantee, cb) {
try {
let
tasks = [],
fileNames;
const tasks = [];
logger.logTimerInit('revoke-files', 'Processing revoke files...');
logger.logTimerInit('grants-files', 'Processing grants files...');
const fileNames = content.synonymRevokerFiles;
logger.trace('revoke files:', fileNames);
fileNames = content.synonymGrantorFiles();
logger.trace('grants files:', fileNames);
fileNames.forEach(function(fileName) {
if (options.workingSet.matchesPath(fileName)) {
tasks.push(logger.logfn(` Processing "${ fileName }"...`));
tasks.push(function(cb) {
grantFile(services, content.root, fileName, container, grantee, cb);
tasks.push(function(callback) {
revoker(services, content.root, fileName, container, grantee, callback);
});

@@ -402,3 +64,3 @@ tasks.push(logger.logfn(` Processing "${ fileName }"... ok`));

tasks.push(logger.logfnTimerDelta('grants-files', 'Processing grants files... ok'));
tasks.push(logger.logfnTimerDelta('revoke-files', 'Processing revoke files... ok'));

@@ -412,4 +74,4 @@ async.series(tasks, function(err, results) {

} catch (err) {
cb(err);
return cb(err);
}
};

@@ -13,3 +13,3 @@ 'use strict';

* Creates an instance of SQLGrantorStrategy.
* @param {hdbAsync} client SQL client.
* @param {hana-helper} client SQL client.
* @param {Array} tasks Tasks.

@@ -56,3 +56,3 @@ *

*/
grantSystemPrivileges(privileges, grantee, grantable) {
handleSystemPrivileges(privileges, grantee, grantable) {
this.tasks.push(this.client.grantSystemPrivileges(privileges, grantee, grantable));

@@ -72,3 +72,3 @@ }

*/
grantSchemaPrivileges(privileges, schema, grantee, grantable) {
handleSchemaPrivileges(privileges, schema, grantee, grantable) {
this.tasks.push(this.client.grantSchemaPrivileges(privileges, schema, grantee, grantable));

@@ -89,3 +89,3 @@ }

*/
grantGlobalObjectPrivileges(privileges, name, type, grantee, grantable) {
handleGlobalObjectPrivileges(privileges, name, type, grantee, grantable) {
this.tasks.push(this.client.grantGlobalObjectPrivileges(privileges, name, type, grantee, grantable));

@@ -106,3 +106,3 @@ }

*/
grantSchemaObjectPrivileges(privileges, schema, name, grantee, grantable) {
handleSchemaObjectPrivileges(privileges, schema, name, grantee, grantable) {
this.tasks.push(this.client.grantObjectPrivileges(privileges, schema, name, grantee, grantable));

@@ -121,3 +121,3 @@ }

*/
grantGlobalRoles(roles, grantee, grantable) {
handleGlobalRoles(roles, grantee, grantable) {
this.tasks.push(this.client.grantRoles(roles, grantee, grantable));

@@ -137,3 +137,3 @@ }

*/
grantSchemaRoles(schema, roles, grantee, grantable) {
handleSchemaRoles(schema, roles, grantee, grantable) {
this.tasks.push(this.client.grantSchemaRoles(schema, roles, grantee, grantable));

@@ -151,3 +151,3 @@ }

* Creates an instance of HDIContainerGrantorStrategy.
* @param {hdbAsync} client SQL client.
* @param {hana-helper} client SQL client.
* @param {Array} tasks Tasks.

@@ -208,3 +208,3 @@ * @param {String} grantor_schema Schema of the grantor container.

*/
grantSystemPrivileges() {
handleSystemPrivileges() {
throw new Error('system privileges are not supported in case of an HDI container service binding');

@@ -224,3 +224,3 @@ }

*/
grantSchemaPrivileges() {
handleSchemaPrivileges() {
throw new Error('schema privileges are not supported in case of an HDI container service binding');

@@ -241,3 +241,3 @@ }

*/
grantGlobalObjectPrivileges() {
handleGlobalObjectPrivileges() {
throw new Error('global object privileges are not supported in case of an HDI container service binding');

@@ -258,3 +258,3 @@ }

*/
grantSchemaObjectPrivileges() {
handleSchemaObjectPrivileges() {
throw new Error('object privileges are not supported in case of an HDI container service binding');

@@ -273,3 +273,3 @@ }

*/
grantGlobalRoles() {
handleGlobalRoles() {
throw new Error('global roles are not supported in case of an HDI container service binding');

@@ -289,3 +289,3 @@ }

*/
grantSchemaRoles(schema, roles, grantee, grantable) {
handleSchemaRoles(schema, roles, grantee, grantable) {
if (schema !== this.grantor_schema) {

@@ -312,3 +312,3 @@ throw new Error('schema is not supported for schema roles in case of an HDI container service binding');

* Creates an instance of ProcedureGrantorStrategy.
* @param {hdbAsync} client SQL client.
* @param {hana-helper} client SQL client.
* @param {Array} tasks Tasks.

@@ -366,3 +366,3 @@ * @param {String} grantor_procedure Procedure to use

*/
grantSystemPrivileges(privileges, grantee, grantable) {
handleSystemPrivileges(privileges, grantee, grantable) {
privileges.forEach((privilege) => {

@@ -393,3 +393,3 @@ this.grant_privileges.push([

*/
grantSchemaPrivileges(privileges, schema, grantee, grantable) {
handleSchemaPrivileges(privileges, schema, grantee, grantable) {
privileges.forEach((privilege) => {

@@ -421,3 +421,3 @@ this.grant_privileges.push([

*/
grantGlobalObjectPrivileges(privileges, name, type, grantee, grantable) {
handleGlobalObjectPrivileges(privileges, name, type, grantee, grantable) {
privileges.forEach((privilege) => {

@@ -449,3 +449,3 @@ this.grant_privileges.push([

*/
grantSchemaObjectPrivileges(privileges, schema, name, grantee, grantable) {
handleSchemaObjectPrivileges(privileges, schema, name, grantee, grantable) {
privileges.forEach((privilege) => {

@@ -475,3 +475,3 @@ this.grant_privileges.push([

*/
grantGlobalRoles(roles, grantee, grantable) {
handleGlobalRoles(roles, grantee, grantable) {
roles.forEach((role) => {

@@ -502,3 +502,3 @@ this.grant_privileges.push([

*/
grantSchemaRoles(schema, roles, grantee, grantable) {
handleSchemaRoles(schema, roles, grantee, grantable) {
roles.forEach((role) => {

@@ -519,6 +519,455 @@ this.grant_privileges.push([

/**
* Revoke privileges via SQL.
*
* @class SQLRevokerStrategy
*/
class SQLRevokerStrategy{
/**
* Creates an instance of SQLRevokerStrategy.
* @param {hana-helper} client SQL client.
* @param {Array} tasks Tasks.
*
* @memberOf SQLRevokerStrategy
*/
constructor(client, tasks){
this.client = client;
this.tasks = tasks;
}
/**
* Initialize the strategy.
*
* Does nothing.
*
* @returns {undefined}
* @memberOf SQLRevokerStrategy
*/
initialize() {
}
/**
* Finalize the strategy.
*
* Does nothing.
*
* @returns {undefined}
* @memberOf SQLRevokerStrategy
*/
finalize() {
}
/**
* Revoke system privileges from the revokee.
*
* @param {any} privileges Privileges to revoke.
* @param {any} revokee Whom to revoke from.
*
* @returns {undefined}
* @memberOf SQLRevokerStrategy
*/
handleSystemPrivileges(privileges, revokee) {
this.tasks.push(this.client.revokeSystemPrivileges(privileges, revokee));
}
/**
* Revoke schema privileges from the revokee.
*
* @param {any} privileges Privileges to revoke.
* @param {any} schema Schema to Revoke for
* @param {any} revokee Whom to revoke from.
*
* @returns {undefined}
* @memberOf SQLRevokerStrategy
*/
handleSchemaPrivileges(privileges, schema, revokee) {
this.tasks.push(this.client.revokeSchemaPrivileges(privileges, schema, revokee));
}
/**
* Revoke global object privileges from the revokee.
*
* @param {any} privileges Privileges to revoke.
* @param {any} name Name
* @param {any} type Type
* @param {any} revokee Whom to revoke from.
*
* @returns {undefined}
* @memberOf SQLRevokerStrategy
*/
handleGlobalObjectPrivileges(privileges, name, type, revokee) {
this.tasks.push(this.client.revokeGlobalObjectPrivileges(privileges, name, type, revokee));
}
/**
* Revoke schema object privileges from the revokee.
*
* @param {any} privileges Privileges to revoke.
* @param {any} schema Schema to revoke for
* @param {any} name Name
* @param {any} revokee Whom to revoke from.
*
* @returns {undefined}
* @memberOf SQLRevokerStrategy
*/
handleSchemaObjectPrivileges(privileges, schema, name, revokee) {
this.tasks.push(this.client.revokeObjectPrivileges(privileges, schema, name, revokee));
}
/**
* Revoke global roles from the revokee.
*
* @param {any} roles Roles to revoke.
* @param {any} revokee Whom to revoke from.
*
* @returns {undefined}
* @memberOf SQLRevokerStrategy
*/
handleGlobalRoles(roles, revokee) {
this.tasks.push(this.client.revokeRoles(roles, revokee));
}
/**
* Revoke schema roles from the revokee.
*
* @param {any} schema Schema to revoke for
* @param {any} roles Roles to revoke.
* @param {any} revokee Whom to revoke from.
*
* @returns {undefined}
* @memberOf SQLRevokerStrategy
*/
handleSchemaRoles(schema, roles, revokee) {
this.tasks.push(this.client.revokeSchemaRoles(schema, roles, revokee));
}
}
/**
* Revoke privileges via a HDI container.
*
* @class HDIContainerRevokerStrategy
*/
class HDIContainerRevokerStrategy{
/**
* Creates an instance of HDIContainerRevokerStrategy.
* @param {hana-helper} client SQL client.
* @param {Array} tasks Tasks.
* @param {String} grantor_schema Schema of the grantor container.
*
* @memberOf HDIContainerRevokerStrategy
*/
constructor(client, tasks, grantor_schema){
this.client = client;
this.tasks = tasks;
this.grantor_schema = grantor_schema;
this.container_roles = [];
}
/**
* Initialize the strategy.
*
* @returns {undefined}
* @memberOf HDIContainerRevokerStrategy
*/
initialize() {
this.container_roles = [];
}
/**
* Finalize the strategy.
*
* @returns {undefined}
* @memberOf HDIContainerRevokerStrategy
*/
finalize() {
if (this.container_roles.length > 0) {
// revoke all container roles which were collected into container_roles
const tempTablesForContainerRoles = [
['#CONTAINER_ROLES_PARAMETERS', hdiTables.parameters.type],
['#CONTAINER_ROLES', hdiTables.schemaRoles.type]
];
this.tasks.push(this.client.createTmpTables(tempTablesForContainerRoles));
this.tasks.push(this.client.bulkInsert('#CONTAINER_ROLES', hdiTables.schemaRoles.fields, this.container_roles));
this.tasks.push(this.client.hdiRevokeSchemaRoles(this.grantor_schema, '#CONTAINER_ROLES', '#CONTAINER_ROLES_PARAMETERS', this.client.hdiCheckResult('revoke container roles', true)));
this.tasks.push(this.client.dropTmpTables(tempTablesForContainerRoles));
}
}
/**
* Revoke system privileges from the revokee.
* @returns {undefined}
* @throws {Error} Throws because this is not supported.
* @memberOf HDIContainerRevokerStrategy
*/
handleSystemPrivileges() {
throw new Error('system privileges are not supported in case of an HDI container service binding');
}
/**
* Revoke schema privileges from the revokee.
* @returns {undefined}
* @throws {Error} Throws because this is not supported.
* @memberOf HDIContainerRevokerStrategy
*/
handleSchemaPrivileges() {
throw new Error('schema privileges are not supported in case of an HDI container service binding');
}
/**
* Revoke global object privileges from the revokee.
* @returns {undefined}
* @throws {Error} Throws because this is not supported.
* @memberOf HDIContainerRevokerStrategy
*/
handleGlobalObjectPrivileges() {
throw new Error('global object privileges are not supported in case of an HDI container service binding');
}
/**
* Revoke schema object privileges from the revokee.
* @returns {undefined}
* @throws {Error} Throws because this is not supported.
* @memberOf HDIContainerRevokerStrategy
*/
handleSchemaObjectPrivileges() {
throw new Error('object privileges are not supported in case of an HDI container service binding');
}
/**
* Revoke global roles from the revokee.
* @returns {undefined}
* @throws {Error} Throws because this is not supported.
* @memberOf HDIContainerRevokerStrategy
*/
handleGlobalRoles() {
throw new Error('global roles are not supported in case of an HDI container service binding');
}
/**
* Revoke schema roles from the revokee.
*
* @param {any} schema Schema to revoke for.
* @param {any} roles Roles to revoke.
* @param {any} revokee Whom to revoke from.
*
* @returns {undefined}
* @memberOf HDIContainerRevokerStrategy
*/
handleSchemaRoles(schema, roles, revokee) {
if (schema !== this.grantor_schema) {
throw new Error('schema is not supported for schema roles in case of an HDI container service binding');
}
const container_roles = this.container_roles;
roles.forEach(function(role) {
container_roles.push([role, revokee]);
});
}
}
/**
* Revoke privileges via a Procedure as a grantor.
*
* @class ProcedureRevokerStrategy
*/
class ProcedureRevokerStrategy{
/**
* Creates an instance of ProcedureRevokerStrategy.
* @param {hana-helper} client SQL client.
* @param {Array} tasks Tasks.
* @param {String} revoker_procedure Procedure to use
* @param {String} revoker_procedure_schema Schema to use
*
* @memberOf ProcedureRevokerStrategy
*/
constructor(client, tasks, revoker_procedure, revoker_procedure_schema){
this.client = client;
this.tasks = tasks;
this.revoker_procedure = revoker_procedure;
this.revoker_procedure_schema = revoker_procedure_schema;
this.revoke_privileges = [];
}
/**
* Initialize the strategy.
*
* @returns {undefined}
* @memberOf ProcedureRevokerStrategy
*/
initialize() {
this.privileges = [];
}
/**
* Finalize the strategy.
*
* @returns {undefined}
* @memberOf ProcedureRevokerStrategy
*/
finalize() {
if (this.revoke_privileges.length > 0) {
this.tasks.push(this.client.execute('CREATE LOCAL TEMPORARY COLUMN TABLE #PRIVILEGES (PRIVILEGE_TYPE NVARCHAR(128), PRIVILEGE_NAME NVARCHAR(256), OBJECT_SCHEMA NVARCHAR(256), OBJECT_NAME NVARCHAR(256), OBJECT_TYPE NVARCHAR(128), REVOKEE_SCHEMA NVARCHAR(256), REVOKEE_NAME NVARCHAR(256))'));
this.tasks.push(this.client.bulkInsert('#PRIVILEGES', ['PRIVILEGE_TYPE', 'PRIVILEGE_NAME', 'OBJECT_SCHEMA', 'OBJECT_NAME', 'OBJECT_TYPE', 'REVOKEE_SCHEMA', 'REVOKEE_NAME'], this.revoke_privileges));
const schema_prefix = this.revoker_procedure_schema ? `${this.client.quotedSQLIdentifier(this.revoker_procedure_schema) }.` : '';
this.tasks.push(this.client.execute(`CALL ${ schema_prefix }${this.client.quotedSQLIdentifier(this.revoker_procedure) }(#PRIVILEGES)`));
this.tasks.push(this.client.execute('DROP TABLE #PRIVILEGES'));
}
}
/**
* Revoke system privileges from the revokee.
*
* @param {any} privileges Privileges to revoke.
* @param {any} revokee Who to revoke from.
*
* @returns {undefined}
* @memberOf ProcedureRevokerStrategy
*/
handleSystemPrivileges(privileges, revokee) {
privileges.forEach((privilege) => {
this.revoke_privileges.push([
'SYSTEM_PRIVILEGE',
privilege,
null,
null,
null,
null,
revokee
]);
});
}
/**
* Revoke schema privileges from the revokee.
*
* @param {any} privileges Privileges to revoke.
* @param {any} schema Schema to revoke from
* @param {any} revokee Who to revoke from.
*
* @returns {undefined}
* @memberOf ProcedureRevokerStrategy
*/
handleSchemaPrivileges(privileges, schema, revokee) {
privileges.forEach((privilege) => {
this.revoke_privileges.push([
'SCHEMA_PRIVILEGE',
privilege,
null,
schema,
null,
null,
revokee
]);
});
}
/**
* Revoke global object privileges from the revokee.
*
* @param {any} privileges Privileges to revoke.
* @param {any} name Name
* @param {any} type Type
* @param {any} revokee Who to revoke from.
*
* @returns {undefined}
* @memberOf ProcedureRevokerStrategy
*/
handleGlobalObjectPrivileges(privileges, name, type, revokee) {
privileges.forEach((privilege) => {
this.revoke_privileges.push([
'GLOBAL_OBJECT_PRIVILEGE',
privilege,
null,
name,
type,
null,
revokee
]);
});
}
/**
* Revoke schema object privileges from the revokee.
*
* @param {any} privileges Privileges to revoke.
* @param {any} schema Schema to revoke from
* @param {any} name Name
* @param {any} revokee Who to revoke from.
*
* @returns {undefined}
* @memberOf ProcedureRevokerStrategy
*/
handleSchemaObjectPrivileges(privileges, schema, name, revokee) {
privileges.forEach((privilege) => {
this.revoke_privileges.push([
'SCHEMA_OBJECT_PRIVILEGE',
privilege,
schema,
name,
null,
null,
revokee
]);
});
}
/**
* Revoke global roles from the revokee.
*
* @param {any} roles Roles to revoke.
* @param {any} revokee Who to revoke from.
*
* @returns {undefined}
* @memberOf ProcedureRevokerStrategy
*/
handleGlobalRoles(roles, revokee) {
roles.forEach((role) => {
this.revoke_privileges.push([
'GLOBAL_ROLE',
null,
null,
role,
null,
null,
revokee
]);
});
}
/**
* Revoke schema roles from the revokee.
*
* @param {any} schema Schema to revoke from
* @param {any} roles Roles to revoke.
* @param {any} revokee Who to revoke from.
*
* @returns {undefined}
* @memberOf ProcedureRevokerStrategy
*/
handleSchemaRoles(schema, roles, revokee) {
roles.forEach((role) => {
this.revoke_privileges.push([
'SCHEMA_ROLE',
null,
schema,
role,
null,
null,
revokee
]);
});
}
}
module.exports = {
SQLGrantorStrategy,
HDIContainerGrantorStrategy,
ProcedureGrantorStrategy
ProcedureGrantorStrategy,
SQLRevokerStrategy,
HDIContainerRevokerStrategy,
ProcedureRevokerStrategy
};

@@ -6,3 +6,4 @@ 'use strict';

const xsenv = require('@sap/xsenv');
const {isArray} = require('./utils');
const shared_password_service = require('./shared-password-service');
/**

@@ -14,12 +15,12 @@ * Returns the reason why the service could not be found by the ServiceAccesor.

*/
function why_is_service_not_found(service_name){
function why_is_service_not_found (service_name) {
const filtered_services = xsenv.filterCFServices((service) => service.name === service_name);
if (filtered_services.length === 0){
if (filtered_services.length === 0) {
// No service matches the name!
return 0;
} else if ((!filtered_services[0].tags || filtered_services[0].tags.indexOf('hana') === -1) && filtered_services[0].label !== 'user-provided'){
} else if ((!filtered_services[0].tags || filtered_services[0].tags.indexOf('hana') === -1) && filtered_services[0].label !== 'user-provided') {
// Service is missing tags or does not have hana tag, but is not user-provided
return 1;
} else if (filtered_services[0].label === 'user-provided' && (!filtered_services[0].credentials.tags || filtered_services[0].credentials.tags.indexOf('hana') === -1)){
// Service is user-provided but missing tags/ tag hana in credentials.
} else if (filtered_services[0].label === 'user-provided' && (!filtered_services[0].credentials.tags || filtered_services[0].credentials.tags.indexOf('hana') === -1 || filtered_services[0].credentials.tags.indexOf('password') === -1)) {
// Service is user-provided but missing tags/ tag hana/ tag password in credentials.
return 2;

@@ -37,3 +38,3 @@ }

*/
function get_service_not_found_error(service_name, replacement_service_name){
function get_service_not_found_error (service_name, replacement_service_name) {
let reason;

@@ -43,16 +44,43 @@ const error_message_suffix = {

1: "not found; the service is not tagged with the tag 'hana'",
2: "not found; the service is user-provided, but is missing the tag 'hana' in the credentials properties."
2: "not found; the service is user-provided, but is missing the tag 'hana' or the tag 'password' in the credentials properties."
};
if (replacement_service_name){
if (replacement_service_name) {
reason = why_is_service_not_found(replacement_service_name);
return new Error(`service ${ replacement_service_name } as replacement for service ${ service_name } ${error_message_suffix[reason]}`);
return new Error(`service ${replacement_service_name} as replacement for service ${service_name} ${error_message_suffix[reason]}`);
} else {
reason = why_is_service_not_found(service_name);
return new Error(`service ${ service_name } ${error_message_suffix[reason]}`);
return new Error(`service ${service_name} ${error_message_suffix[reason]}`);
}
}
/**
* Find a hdi-service in the given services and return it.
*
* @param {Array} services Array of services
* @returns {Object} The hdi-service.
* @throws {Error} Throws if no service found or more than one.
*/
function find_hdi_service_or_throw (services) {
/**
* Check if the given service is a hdi service.
*
* @param {Object} service Service to check.
* @returns {Boolean} True or false.
*/
function is_hdi (service) {
return (service.plan === 'hdi-shared' || service.credentials.hdi_user);
}
const hdi_services = services.filter(is_hdi);
if (hdi_services.length === 0) {
throw new Error('No HDI service found!');
} else if (hdi_services.length > 1) {
throw new Error('More than one HDI service found, but no service is defined as the deployment target via the environment variable "TARGET_CONTAINER"');
} else {
return hdi_services[0];
}
}
/**

@@ -63,8 +91,8 @@ * Class to handle working with services.

*/
function ServiceAccessor() {
this.services = xsenv.filterCFServices(function(service) {
if (service.tags && service.tags.indexOf('hana') !== -1) {
function ServiceAccessor () {
this.services = xsenv.filterCFServices(function (service) {
if (service.tags && (service.tags.indexOf('hana') !== -1)) {
return true;
}
if (service.label === 'user-provided' && service.credentials.tags && service.credentials.tags.indexOf('hana') !== -1) {
if (service.label === 'user-provided' && service.credentials.tags && (service.credentials.tags.indexOf('hana') !== -1 || service.credentials.tags.indexOf('password') !== -1)) {
return true;

@@ -77,3 +105,3 @@ }

this.serviceReplacements = {};
this.getServiceReplacements = function() {
this.getServiceReplacements = function () {
return this.serviceReplacements;

@@ -83,3 +111,3 @@ };

this.useServiceReplacements = false;
this.usingServiceReplacements = function() {
this.usingServiceReplacements = function () {
return this.useServiceReplacements;

@@ -93,3 +121,3 @@ };

} catch (error) {
throw new Error(`Failed to parse JSON object in SERVICE_REPLACEMENTS: ${ error}`);
throw new Error(`Failed to parse JSON object in SERVICE_REPLACEMENTS: ${error}`);
}

@@ -101,9 +129,9 @@ if (!Array.isArray(serviceReplacementsFromEnv)) {

if (!serviceReplacementsFromEnv[i].key) {
throw new Error(`Failed to parse JSON object in SERVICE_REPLACEMENTS: SERVICE_REPLACEMENTS does not define a key for element ${ i}`);
throw new Error(`Failed to parse JSON object in SERVICE_REPLACEMENTS: SERVICE_REPLACEMENTS does not define a key for element ${i}`);
}
if (!serviceReplacementsFromEnv[i].service) {
throw new Error(`Failed to parse JSON object in SERVICE_REPLACEMENTS: SERVICE_REPLACEMENTS does not define a service for element ${ i}`);
throw new Error(`Failed to parse JSON object in SERVICE_REPLACEMENTS: SERVICE_REPLACEMENTS does not define a service for element ${i}`);
}
if (this.serviceReplacements.hasOwnProperty(serviceReplacementsFromEnv[i].key)) {
throw new Error(`Failed to parse JSON object in SERVICE_REPLACEMENTS: SERVICE_REPLACEMENTS contains duplicate entries for the key ${ serviceReplacementsFromEnv[i].key}`);
throw new Error(`Failed to parse JSON object in SERVICE_REPLACEMENTS: SERVICE_REPLACEMENTS contains duplicate entries for the key ${serviceReplacementsFromEnv[i].key}`);
}

@@ -127,3 +155,3 @@ this.serviceReplacements[serviceReplacementsFromEnv[i].key] = serviceReplacementsFromEnv[i];

// returns the bound service with the given name, throws an error if not found
this.getServiceOrThrow = function(serviceName) {
this.getServiceOrThrow = function (serviceName) {
const service = this.getServiceInternal(serviceName);

@@ -137,20 +165,28 @@ if (service === null) {

// returns the bound service with the given name using the service replacements map, throws an error if not found
this.getServiceUsingServiceReplacements = function(serviceName) {
if (this.serviceReplacements.hasOwnProperty(serviceName)) {
// look up real service name via replacement map
const realServiceName = this.serviceReplacements[serviceName].service;
// get the real service
const service = this.getServiceInternal(realServiceName);
if (service === null) {
throw get_service_not_found_error(serviceName, realServiceName);
this.getServiceUsingServiceReplacements = function (serviceName) {
const service = (() => {
if (this.serviceReplacements.hasOwnProperty(serviceName)) {
// look up real service name via replacement map
const realServiceName = this.serviceReplacements[serviceName].service;
// get the real service
const service = this.getServiceInternal(realServiceName);
if (service === null) {
throw get_service_not_found_error(serviceName, realServiceName);
}
return service;
} else {
// service name is not mapped via replacement map, directly get the real service
return this.getServiceOrThrow(serviceName);
}
return service;
} else {
// service name is not mapped via replacement map, directly get the real service
return this.getServiceOrThrow(serviceName);
})();
if (service.credentials && (isArray(service.credentials.password) || isArray(service.credentials.hdi_password))) {
return shared_password_service.build(service, this);
}
return service;
};
// returns the service which represents the target container
this.getTarget = function() {
this.getTarget = function () {
if (process.env.TARGET_CONTAINER) {

@@ -161,12 +197,12 @@ return this.getServiceOrThrow(process.env.TARGET_CONTAINER);

switch (this.services.length) {
case 0:
throw new Error('no service definition found; there must be at least one service definition for the deployment target');
case 1:
return this.services[0];
default:
throw new Error('more than one service definition found, but no service is defined as the deployment target via the environment variable "TARGET_CONTAINER"');
case 0:
throw new Error('no service definition found; there must be at least one service definition for the deployment target');
case 1:
return this.services[0];
default:
return find_hdi_service_or_throw(this.services);
}
};
this.getTargetCreds = function() {
this.getTargetCreds = function () {
const target = this.getTarget();

@@ -183,4 +219,4 @@ return target ? target.credentials : null;

module.exports = function() {
module.exports = function () {
return new ServiceAccessor();
};

@@ -34,8 +34,8 @@ 'use strict';

*/
function quote_dot(name){
function quote_dot (name) {
const parts = name.split('.');
if (parts.length > 2){
throw new Error(`There were multiple "." found in name "${ name }". There can only be at most one ".".`);
} else if (parts.length === 2){
name = `${parts[0] }"."${ parts[1]}`;
if (parts.length > 2) {
throw new Error(`There were multiple "." found in name "${name}". There can only be at most one ".".`);
} else if (parts.length === 2) {
name = `${parts[0]}"."${parts[1]}`;
}

@@ -45,4 +45,12 @@ return name;

exports.identifier = function (name) {
// escape " inside identifiers to ""
name = name.replace(/\"/g, '""');
/*
* Surround result with "..."
*/
return `"${name}"`;
};
exports.identifier = function(name) {
exports.dot_quoted_identifier = function (name) {
// escape " inside identifiers to ""

@@ -54,9 +62,9 @@ name = name.replace(/\"/g, '""');

*/
return `"${ quote_dot(name) }"`;
return `"${quote_dot(name)}"`;
};
exports.quote_dot_in_system_privilege_for_procedure = function(name){
exports.quote_dot_in_system_privilege_for_procedure = function (name) {
const parts = name.split('.');
if (parts.length === 2){
return `"${ quote_dot(name) }"`;
if (parts.length === 2) {
return `"${quote_dot(name)}"`;
} else {

@@ -73,3 +81,3 @@ return name;

*/
function isArray(variable){
function isArray (variable) {
return (variable instanceof Array || Object.prototype.toString.call(variable) === '[object Array]');

@@ -86,3 +94,3 @@ }

*/
function isGrantorFile(file) {
function isGrantorFile (file) {
const ext = paths.extname(file);

@@ -99,2 +107,19 @@ if (ext !== '') {

/**
* Checks if the given file is a hdbsynonymgrantor or hdbgrants file.
*
* @param {String} file File to check.
* @returns {boolean} True if it's a grantor file.
*/
function isRevokerFile (file) {
const ext = paths.extname(file);
if (ext !== '') {
return ext === '.hdbrevokes';
} else {
const base = paths.basename(file);
return base === '.hdbrevokes';
}
}
exports.isRevokerFile = isRevokerFile;
/**
* Check if the file is deployable.

@@ -104,6 +129,6 @@ * This is used to filter out non-deployable files.

* @param {String} file File to check.
* @returns {boolean} True if the file is deplayble.
* @returns {boolean} True if the file is deployable.
*/
function isDeployableFile(file) {
return !isGrantorFile(file);
function isDeployableFile (file) {
return !isGrantorFile(file) && !isRevokerFile(file);
}

@@ -119,3 +144,3 @@

*/
function isSynonymTemplateFile(file) {
function isSynonymTemplateFile (file) {
return paths.extname(file) === '.hdbsynonymtemplate';

@@ -132,3 +157,3 @@ }

*/
function isSynonymConfigFile(file) {
function isSynonymConfigFile (file) {
return paths.extname(file) === '.hdbsynonymconfig';

@@ -148,1 +173,78 @@ }

};
/**
* Obfuscate a string by replacing the middle part with [..].
*
* @param {any} string String to obfuscate
* @param {Number} obfuscation_factor Percentage of characters to leave intact at the beginning and end.
*
* @returns {String} Obfuscated string.
*/
function censor_string (string, obfuscation_factor) {
const length = string.length;
const leave_intact = Math.floor(length * obfuscation_factor / 2);
console.assert(leave_intact > 0);
const start = string.substr(0, leave_intact);
const end = string.substr(string.length - leave_intact, string.length);
const final = `${start}[..]${end}`;
console.assert(final !== string, 'Failed to censor the string!');
return final;
}
exports.censor_string = censor_string;
/**
* Create a container object with the supplied credentials.
*
* @param {any} creds Credentials for the container.
* @param {any} options Options, that can contain credentials as well.
* @returns {Object} An object containing the credentials for a HANA connection.
*/
function prepareCredentials (creds, options, logger) {
const hdiCreds = {
user: creds.hdi_user ? creds.hdi_user : creds.user,
password: creds.hdi_user ? creds.hdi_password : creds.password
};
if (creds.client_authentication_private_key) {
hdiCreds.key = creds.client_authentication_private_key;
logger.trace(`hdiCreds.key set to '${censor_string(hdiCreds.key, 0.05)}'`);
}
if (creds.client_authentication_certificate) {
hdiCreds.cert = creds.client_authentication_certificate;
logger.trace(`hdiCreds.cert set to '${hdiCreds.cert}'`);
}
if (Array.isArray(creds.db_hosts)) {
hdiCreds.hosts = creds.db_hosts;
} else {
hdiCreds.host = creds.host;
hdiCreds.port = creds.port;
}
if (creds.certificate) {
hdiCreds.ca = Array.isArray(creds.certificate) ? creds.certificate : [creds.certificate];
logger.trace('hdiCreds.ca set to', hdiCreds.ca);
}
if (creds.hostname_in_certificate) {
hdiCreds.sslHostNameInCertificate = creds.hostname_in_certificate;
logger.trace('hdiCreds.sslHostNameInCertificate set to', hdiCreds.sslHostNameInCertificate);
}
// boolean
if (creds.validate_certificate !== undefined && creds.validate_certificate !== null) {
hdiCreds.sslValidateCertificate = creds.validate_certificate;
logger.trace('hdiCreds.sslValidateCertificate set to', hdiCreds.sslValidateCertificate);
}
// boolean
if (creds.encrypt !== undefined && creds.encrypt !== null) {
hdiCreds.encrypt = creds.encrypt;
logger.trace('hdiCreds.encrypt set to', hdiCreds.encrypt);
}
hdiCreds.initializationTimeout = options.connectionTimeout;
return hdiCreds;
}
exports.prepareCredentials = prepareCredentials;

@@ -5,99 +5,134 @@ 'use strict';

var async = require('async');
var hdb = require('./hdbAsync.js');
const async = require('async');
const hana_helper = require('./hana-helper.js');
function Version() {
this.version = '';
this.major = -1;
this.minor = -1;
this.revision = -1;
this.patch = -1;
this.versionSynthesized = '';
this.error = undefined;
/**
* Class to handle the server version.
*
*/
function Version () {
this.version = '';
this.major = -1;
this.minor = -1;
this.revision = -1;
this.patch = -1;
this.build = -1;
this.versionSynthesized = '';
this.error = undefined;
this.calculateSynthesizedVersion = function() {
this.versionSynthesized = '' + this.major + '.' + this.minor + '.' + this.revision + '.' + this.patch;
};
this.calculateSynthesizedVersion = function () {
this.versionSynthesized = `${String(this.major)}.${this.minor}.${this.revision}.${this.patch}`;
};
this.setVersion = function(str) {
var components = str.split('.');
this.setVersion = function (str) {
const components = str.split('.');
if (components.length === 5) {
this.version = str;
this.major = parseInt(components[0]);
this.minor = parseInt(components[1]);
this.revision = parseInt(components[2]);
this.patch = parseInt(components[3]);
} else {
this.version = 'unknown';
this.major = 0;
this.minor = 0;
this.revision = 0;
this.patch = 0;
}
this.error = undefined;
this.calculateSynthesizedVersion();
};
if (components.length === 5) {
this.version = str;
this.major = parseInt(components[0]);
this.minor = parseInt(components[1]);
this.revision = parseInt(components[2]);
this.patch = parseInt(components[3]);
this.build = parseInt(components[4]);
} else {
this.version = 'unknown';
this.major = 0;
this.minor = 0;
this.revision = 0;
this.patch = 0;
this.build = 0;
}
this.error = undefined;
this.calculateSynthesizedVersion();
};
this.isGreaterThanOrEqualTo = function(major, minor, revision, patch) {
if (this.major < major) {
return false;
}
if (this.major > major) {
return true;
}
/**
* Is the given version greater or equal to this version.
*
* @param {any} major Major
* @param {any} minor Minor
* @param {any} revision Revision
* @param {any} patch Path
* @param {any} build Build timestamp - optional.
* @returns {Boolean} True if greater than or equal
*/
this.isGreaterThanOrEqualTo = function (major, minor, revision, patch, build) {
if (this.major < major) {
return false;
}
if (this.major > major) {
return true;
}
if (this.minor < minor) {
return false;
}
if (this.minor > minor) {
return true;
}
if (this.minor < minor) {
return false;
}
if (this.minor > minor) {
return true;
}
if (this.revision < revision) {
return false;
}
if (this.revision > revision) {
return true;
}
if (this.revision < revision) {
return false;
}
if (this.revision > revision) {
return true;
}
if (this.patch < patch) {
return false;
}
if (this.patch < patch) {
return false;
}
return true;
};
if (build) {
if (this.build < build) {
return false;
}
}
this.setVersion('');
return true;
};
this.setVersion('');
}
function getFallbackVersion() {
return new Version();
function getFallbackVersion () {
return new Version();
}
function getVersion(credentials, cb) {
var client = hdb(credentials.host, credentials.port, credentials.hdi_user, credentials.hdi_password, credentials.certificate, credentials.db_hosts);
function getVersion (credentials, cb) {
const client = hana_helper(
credentials.host,
credentials.port,
credentials.hdi_user,
credentials.hdi_password,
credentials.certificate,
credentials.db_hosts,
credentials.hostname_in_certificate,
credentials.validate_certificate,
credentials.encrypt,
credentials.client_authentication_private_key,
credentials.client_authentication_certificate
);
var tasks = [
client.connect(),
client.execute('SELECT VERSION FROM SYS.M_DATABASE')
];
const tasks = [
client.connect(),
client.execute('SELECT VERSION FROM SYS.M_DATABASE')
];
async.series(tasks, function(err, result) {
client.end();
var version = getFallbackVersion();
async.series(tasks, function (err, result) {
client.end();
const version = getFallbackVersion();
if (!err) {
version.setVersion(result[1][0].VERSION);
} else {
version.error = err.message;
}
if (!err) {
version.setVersion(result[1][0].VERSION);
} else {
version.error = err.message;
}
cb(null, version);
});
cb(null, version);
});
}
module.exports = {
getFallbackVersion: getFallbackVersion,
getVersion: getVersion
getFallbackVersion: getFallbackVersion,
getVersion: getVersion
};

@@ -231,3 +231,3 @@ 'use strict';

for (var i=0; i<artifactPaths.length; i++) {
var filePath = artifactPaths[i][0];
const filePath = Array.isArray(artifactPaths[i]) ? artifactPaths[i][0] : artifactPaths[i];
var fileArtifact = that.createArtifact(rootPath, filePath);

@@ -234,0 +234,0 @@

'use strict';
var FileArtifact = require('./FileArtifact.js');
var fs = require('fs');
var ModelArtifactFactory = require("../model-artifacts/ModelArtifactsFactory.js");
var zdmUtils = require('../../zdmUtils.js');
const FileArtifact = require('./FileArtifact.js');
const fs = require('fs');
const ModelArtifactFactory = require('../model-artifacts/ModelArtifactsFactory.js');
const zdmUtils = require('../../zdmUtils.js');
function HDBCDS() {
FileArtifact.prototype.constructor.apply(this, arguments);
FileArtifact.prototype.constructor.apply(this, arguments);
this._contexts = [];
this._entities = [];
this._views = [];
this._derivedTypes = [];
this._structuredTypes = [];
this._dataControlLanguageArtifacts = [];
}

@@ -16,115 +23,161 @@

HDBCDS.prototype.accept = function () {
FileArtifact.prototype.accept.apply(this, arguments);
FileArtifact.prototype.accept.apply(this, arguments);
};
function parseCDSNamespace(content) {
var rx = /^namespace (.*);$/gmi;
var arr = rx.exec(content);
if (arr !== null) {
return arr[1];
}
return '';
const rx = /^namespace (.*);$/gmi;
const arr = rx.exec(content);
if (arr !== null) {
return arr[1];
}
return '';
}
function getTopLevelArtifact(that, modelArtifacts, cb) {
var fileArtifactFileNameWithoutSuffix = that._fileNameWithoutSuffix;
function createBaseTopLevelModelArtifact(that, content, modelArtifactFactory, fileArtifactFileNameWithoutSuffix, cb) {
const topLevelArtifactRegexPattern = `([^\\s]+)\\s+(${ fileArtifactFileNameWithoutSuffix })`;
const topLevelArtifactRegex = new RegExp(topLevelArtifactRegexPattern, 'g');
const topLevelArtifactNameArray = zdmUtils.getMatches(content, topLevelArtifactRegex, 1);
for (var i = 0; i < modelArtifacts.length; i++) {
var modelArtifact = modelArtifacts[i];
if (fileArtifactFileNameWithoutSuffix === modelArtifact._name) {
return modelArtifact;
}
if (!Array.isArray(topLevelArtifactNameArray) || !topLevelArtifactNameArray.length) {
return cb(new Error(`Error parsing artifact "${ that._fullFilePath }". Could not extract top level artifact.`));
}
const topLevelArtifactName = topLevelArtifactNameArray[0];
const modelArtifact = modelArtifactFactory.createModelArtifact(topLevelArtifactName);
return modelArtifact;
}
function isTopLevelArtifact(fileArtifactFileNameWithoutSuffix, modelArtifact) {
return (fileArtifactFileNameWithoutSuffix === modelArtifact._name) ? true : false;
}
function getTopLevelArtifact(that, content, modelArtifactFactory, modelArtifacts, cb) {
const fileArtifactFileNameWithoutSuffix = that._fileNameWithoutSuffix;
for (let i = 0; i < modelArtifacts.length; i++) {
const modelArtifact = modelArtifacts[i];
if (isTopLevelArtifact(fileArtifactFileNameWithoutSuffix, modelArtifact)) {
return modelArtifact;
}
}
cb(new Error("Error parsing artifact \"" + that._fullFilePath + "\". Could not extract top level artifact."));
const topLevelArtifact = createBaseTopLevelModelArtifact(that, content, modelArtifactFactory, fileArtifactFileNameWithoutSuffix, cb);
return topLevelArtifact;
}
function getContexts(content) {
var contexts = zdmUtils.getMatches(content, /(context\s([^\s]+)\s*{)/gi, 2);
return contexts;
const contexts = zdmUtils.getMatches(content, /(context\s+([^\s]+)\s*{)/gi, 2);
return contexts;
}
function getEntities(content) {
var entities = zdmUtils.getMatches(content, /(entity\s([^\s]+)\s*{)/gi, 2);
return entities;
const entities = zdmUtils.getMatches(content, /(entity\s+([^\s]+)\s*{)/gi, 2);
return entities;
}
function getViews(content) {
var views = zdmUtils.getMatches(content, /(view\s([^\s]+)\sas select from\s)/gi, 2);
return views;
const views = zdmUtils.getMatches(content, /(view\s+([^\s]+)\s+as\s+select\s+from\s+)/gi, 2);
return views;
}
function getDerivedTypes(content) {
var derivedTypes = zdmUtils.getMatches(content, /(type\s([^\s]+)\s*[{:])/gi, 2);
return derivedTypes;
const derivedTypes = zdmUtils.getMatches(content, /(type\s+([^\s]+)\s*[{:])/gi, 2);
return derivedTypes;
}
function getStructuredTypes(content) {
var structuredTypes = zdmUtils.getMatches(content, /(table type\s([^\s]+)\s*{)/gi, 2);
return structuredTypes;
const structuredTypes = zdmUtils.getMatches(content, /(table\s+type\s+([^\s]+)\s*{)/gi, 2);
return structuredTypes;
}
function parseFile(that, cb) {
if(that._isAccepted){
return;
}
var fileContent = fs.readFileSync(that._fullFilePath, 'utf-8');
var namespace = parseCDSNamespace(fileContent);
that.setNamespace(namespace);
function getAccessPolicies(content) {
const accessPolicies = zdmUtils.getMatches(content, /(AccessPolicy\s+([^\s]+)\s*{)/gi, 2);
return accessPolicies;
}
var contextNames = getContexts(fileContent);
var entityNames = getEntities(fileContent);
var viewNames = getViews(fileContent);
var derivedTypeNames = getDerivedTypes(fileContent);
var structuredTypeNames = getStructuredTypes(fileContent);
function getDataDefinitionLanguageArtifacts(that, fileContent, modelArtifactFactory) {
const contextNames = getContexts(fileContent);
const entityNames = getEntities(fileContent);
const viewNames = getViews(fileContent);
const derivedTypeNames = getDerivedTypes(fileContent);
const structuredTypeNames = getStructuredTypes(fileContent);
var modelArtifactFactory = new ModelArtifactFactory();
that._contexts = modelArtifactFactory.createContexts(contextNames);
that._entities = modelArtifactFactory.createEntities(entityNames);
that._views = modelArtifactFactory.createViews(viewNames);
that._derivedTypes = modelArtifactFactory.createDerivedTypes(derivedTypeNames);
that._structuredTypes = modelArtifactFactory.createStructuredTypes(structuredTypeNames);
var contexts = modelArtifactFactory.createContexts(contextNames);
var entities = modelArtifactFactory.createEntities(entityNames);
var views = modelArtifactFactory.createViews(viewNames);
var derivedTypes = modelArtifactFactory.createDerivedTypes(derivedTypeNames);
var structuredTypes = modelArtifactFactory.createStructuredTypes(structuredTypeNames);
const ddlModelArtifacts = that._contexts.concat(that._entities, that._views, that._derivedTypes, that._structuredTypes);
var modelArtifacts = contexts.concat(entities, views, derivedTypes, structuredTypes);
return ddlModelArtifacts;
}
var topLevelArtifact = getTopLevelArtifact(that, modelArtifacts, cb);
that.setTopLevelArtifact(topLevelArtifact);
function getDataContolLanguageArtifacts(that, fileContent, modelArtifactFactory) {
const accessPolicyNames = getAccessPolicies(fileContent);
if (topLevelArtifact._isContext) {
topLevelArtifact.addEntities(entities);
topLevelArtifact.addViews(views);
topLevelArtifact.addDerivedTypes(derivedTypes);
topLevelArtifact.addStructuredTypes(structuredTypes);
}
that._dataControlLanguageArtifacts = modelArtifactFactory.createDataControlLanguageArtifacts(accessPolicyNames);
that._isAccepted = true;
return that._dataControlLanguageArtifacts;
}
function handleTopLevelArtifact(that, topLevelArtifact) {
if (topLevelArtifact._isContext) {
topLevelArtifact.addEntities(that._entities);
topLevelArtifact.addViews(that._views);
topLevelArtifact.addDerivedTypes(that._derivedTypes);
topLevelArtifact.addStructuredTypes(that._structuredTypes);
}
}
function parseFile(that, cb) {
if (that._isAccepted){
return;
}
const fileContent = fs.readFileSync(that._fullFilePath, 'utf-8');
const namespace = parseCDSNamespace(fileContent);
that.setNamespace(namespace);
const modelArtifactFactory = new ModelArtifactFactory();
const ddlModelArtifacts = getDataDefinitionLanguageArtifacts(that, fileContent, modelArtifactFactory);
const dclModelArtifacts = getDataContolLanguageArtifacts(that, fileContent, modelArtifactFactory);
const modelArtifacts = [].concat(ddlModelArtifacts, dclModelArtifacts);
const topLevelArtifact = getTopLevelArtifact(that, fileContent, modelArtifactFactory, modelArtifacts, cb);
handleTopLevelArtifact(that, topLevelArtifact);
that.setTopLevelArtifact(topLevelArtifact);
that._isAccepted = true;
}
HDBCDS.prototype.parseDataContainer = function (setFileArtifactOriginalContentForDataContainer, cb) {
var that = this;
parseFile(that, cb);
const that = this;
parseFile(that, cb);
// Adoption guidelines for default handling of CDS files:
// 1. Extract CDS views in separate CDS file. (Deployed only in Access schema).
// 2. CDS types and CDS table types should not be used by both entities and views or procedures. Separate CDS types and CDS table types for Data schema and Access schema respectively. They can be defined in one file.
// 2.1. Data schema - Define CDS types and CDS table types which are used only by CDS entities. Do not make backward incompatible changes on Data types in next versions. (Deployed in Data and Access schemas).
// 2.2. Access schema - Define CDS types and CDS table types which are used only by procedures, views and/or table types, but not from entities. (Deployed in Data and Access schemas).
// 3. Associations defined in CDS entities can be used only by CDS views, but not by .hdbviews. In Data schema do not model associations to objects from Access schema.
// 4. CDS files containg DCL (Data Control Language) objects should be modeled only for the Access schema.
/*
* Adoption guidelines for default handling of CDS files:
* 1. Extract CDS views in separate CDS file. (Deployed only in Access schema).
* 2. CDS types and CDS table types should not be used by both entities and views or procedures. Separate CDS types and CDS table types for Data schema and Access schema respectively. They can be defined in one file.
* 2.1. Data schema - Define CDS types and CDS table types which are used only by CDS entities. Do not make backward incompatible changes on Data types in next versions. (Deployed in Data and Access schemas).
* 2.2. Access schema - Define CDS types and CDS table types which are used only by procedures, views and/or table types, but not from entities. (Deployed in Data and Access schemas).
* 3. Associations defined in CDS entities can be used only by CDS views, but not by .hdbviews. In Data schema do not model associations to objects from Access schema.
* 4. CDS files containg DCL (Data Control Language) objects should be modeled only for the Access schema.
*/
if (that.haveEntities() && that.haveViews()) {
return cb(new Error("File artifact \"" + that._fullFilePath + "\" contains entity and view. Entities and views should be extracted in separate CDS files. CDS entities should be modeled in the 'data' folder. CDS views should be modeled in the 'access' folder."));
}
if (that.haveEntities() && that.haveViews()) {
return cb(new Error(`File artifact "${ that._fullFilePath }" contains entity and view. Entities and views should be extracted in separate CDS files. CDS entities should be modeled in the 'data' folder. CDS views should be modeled in the 'access' folder.`));
}
if (!that.isModeledInDataFolder() && !that.haveEntities() && !that.haveStructuredTypes() && !that.haveDerivedTypes()) {
that._logger.log("File artifact \"" + that._fullFilePath + "\" will be skipped for deployment in data container, because it does not contain CDS entity, type or table type.");
return cb();
}
if (!that.isModeledInDataFolder() && !that.haveEntities() && !that.haveStructuredTypes() && !that.haveDerivedTypes()) {
that._logger.log(`File artifact "${ that._fullFilePath }" will be skipped for deployment in data container, because it does not contain CDS entity, type or table type.`);
return cb();
}
setFileArtifactOriginalContentForDataContainer(that);
cb();
setFileArtifactOriginalContentForDataContainer(that);
cb();
};
module.exports = HDBCDS;

@@ -6,3 +6,3 @@ 'use strict';

var PathFilter = require('../../filters/PathFilter');
var content = require('../../content.js');
var Content = require('../../content.js');
var FileArtifactsFactory = require('./file-artifacts/FileArtifactsFactory.js');

@@ -16,4 +16,6 @@ var DataArtifactParserVisitor = require('./artifact-visitors/DataArtifactParserVisitor.js');

var handlebars = require('handlebars');
var fileWorker = require('../../fileWorker');
function Generator(options, services, zdmAction, logger) {
fileWorker.set_root(options.root);
this._options = options;

@@ -38,4 +40,6 @@ this._services = services;

var cnt = content(options.root, services, options.workingSet, options.deployDirs, contentPathsFilter, options.stripCRFromCSV, options.excludeFilter);
return cnt.deployFiles();
var cnt = new Content(options.root, services, options.workingSet, options.deployDirs, contentPathsFilter, options.stripCRFromCSV, options.excludeFilter);
const artifactPaths = [...cnt.deployFiles,...cnt.synonymGrantorFiles];
return artifactPaths;
};

@@ -42,0 +46,0 @@

'use strict';
var ModelArtifact = require('./ModelArtifact.js');
var Entity = require('./Entity.js');

@@ -8,2 +9,3 @@ var View = require('./View.js');

var Context = require('./Context.js');
var DataControlLanguageModelArtifact = require('./DataControlLanguageModelArtifact');

@@ -67,2 +69,14 @@ function ModelArtifactsFactory() {}

module.exports = ModelArtifactsFactory;
ModelArtifactsFactory.prototype.createDataControlLanguageArtifact = function(fullname) {
return new DataControlLanguageModelArtifact(fullname);
};
ModelArtifactsFactory.prototype.createDataControlLanguageArtifacts = function(dataControlLanguageArtifactNames) {
return createModelArtifacts(dataControlLanguageArtifactNames, this.createDataControlLanguageArtifact);
};
ModelArtifactsFactory.prototype.createModelArtifact = function(fullname) {
return new ModelArtifact(fullname);
};
module.exports = ModelArtifactsFactory;

@@ -1,2 +0,2 @@

/* eslint no-console: 0*/
/* eslint no-console: 0 */
'use strict';

@@ -11,2 +11,27 @@

/**
* Delete all deployer options from the given environment
*
* @param {Object} env Environment variables like process.env
* @returns {Object} Environment variables without deployer specific variables.
*/
function clean_env (env) {
const env_copy = JSON.parse(JSON.stringify(env));
delete env_copy.APPLICATION_ID;
delete env_copy.APPLICATION_VERSION_INFO;
delete env_copy.EXIT;
delete env_copy.HDI_DEPLOY_OPTIONS;
delete env_copy.HDI_DEPLOY_MODE;
delete env_copy.DEPLOY_ID;
delete env_copy.TRACE;
delete env_copy.ROOT;
delete env_copy.SERVICE_REPLACEMENTS;
delete env_copy.TARGET_CONTAINER;
// ZDM;
delete env_copy.HDI_DEPLOY_ZDM_ACTION;
return env_copy;
}
/**
* Run a deployment for the given folder and services.

@@ -21,3 +46,3 @@ *

*/
function deploy(contentDir, deployerEnv, callback, io) {
function deploy (contentDir, deployerEnv, callback, io) {
// Check user input.

@@ -67,8 +92,12 @@ if (!fs.existsSync(contentDir)) {

deployer.on('close', function (code) {
// console.log('hdi-deploy finished.');
response.exitCode = code;
deployer.on('close', function (code, signal) {
if (code === null) {
response.signal = '@sap/hdi-deploy child process was closed by signal: ' + signal;
response.exitCode = -1;
} else {
response.exitCode = code;
}
return callback(null, response);
});
} catch (e){
} catch (e) {
// In case of an exception, call the callback with the exception and any responses received so far.

@@ -80,3 +109,4 @@ return callback(e, response);

module.exports = {
deploy: deploy
deploy: deploy,
clean_env: clean_env
};
{
"name": "@sap/hdi-deploy",
"version": "3.7.0",
"version": "3.11.3",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
"@sap/xsenv": {
"version": "1.2.9",
"@sap/hana-client": {
"version": "2.4.144",
"requires": {
"debug": "3.1.0",
"verror": "1.10.0"
"debug": "3.1.0"
},
"dependencies": {
"assert-plus": {
"version": "1.0.0"
},
"core-util-is": {
"version": "1.0.2"
},
"debug": {

@@ -26,114 +19,62 @@ "version": "3.1.0",

},
"extsprintf": {
"version": "1.4.0"
},
"ms": {
"version": "2.0.0"
},
"verror": {
"version": "1.10.0",
"requires": {
"assert-plus": "^1.0.0",
"core-util-is": "1.0.2",
"extsprintf": "^1.2.0"
}
}
}
},
"align-text": {
"version": "0.1.4",
"@sap/hdi": {
"version": "2.2.3",
"requires": {
"kind-of": "^3.0.2",
"longest": "^1.0.1",
"repeat-string": "^1.5.2"
"@sap/hana-client": "2.4.144",
"async": "3.1.0"
}
},
"amdefine": {
"version": "1.0.1"
},
"async": {
"version": "2.6.0",
"@sap/xsenv": {
"version": "1.2.9",
"requires": {
"lodash": "^4.14.0"
}
},
"camelcase": {
"version": "1.2.1",
"optional": true
},
"center-align": {
"version": "0.1.3",
"optional": true,
"requires": {
"align-text": "^0.1.3",
"lazy-cache": "^1.0.3"
}
},
"cliui": {
"version": "2.1.0",
"optional": true,
"requires": {
"center-align": "^0.1.1",
"right-align": "^0.1.1",
"wordwrap": "0.0.2"
"debug": "3.1.0",
"verror": "1.10.0"
},
"dependencies": {
"wordwrap": {
"version": "0.0.2",
"optional": true
"debug": {
"version": "3.1.0",
"requires": {
"ms": "2.0.0"
}
},
"ms": {
"version": "2.0.0"
}
}
},
"decamelize": {
"version": "1.2.0",
"optional": true
"assert-plus": {
"version": "1.0.0"
},
"handlebars": {
"version": "4.0.10",
"requires": {
"async": "^1.4.0",
"optimist": "^0.6.1",
"source-map": "^0.4.4",
"uglify-js": "^2.6"
},
"dependencies": {
"async": {
"version": "1.5.2"
}
}
"async": {
"version": "3.1.0"
},
"hdb": {
"version": "0.15.4",
"requires": {
"iconv-lite": "^0.4.18"
}
"commander": {
"version": "2.20.0"
},
"iconv-lite": {
"version": "0.4.21",
"requires": {
"safer-buffer": "^2.1.0"
}
"core-util-is": {
"version": "1.0.2"
},
"is-buffer": {
"version": "1.1.6"
"extsprintf": {
"version": "1.4.0"
},
"kind-of": {
"version": "3.2.2",
"handlebars": {
"version": "4.1.2",
"requires": {
"is-buffer": "^1.1.5"
"neo-async": "^2.6.0",
"optimist": "^0.6.1",
"source-map": "^0.6.1",
"uglify-js": "^3.1.4"
}
},
"lazy-cache": {
"version": "1.0.4",
"optional": true
},
"lodash": {
"version": "4.17.10"
},
"longest": {
"version": "1.0.1"
},
"minimist": {
"version": "0.0.10"
},
"neo-async": {
"version": "2.6.1"
},
"optimist": {

@@ -146,58 +87,25 @@ "version": "0.6.1",

},
"repeat-string": {
"version": "1.6.1"
"source-map": {
"version": "0.6.1"
},
"right-align": {
"version": "0.1.3",
"uglify-js": {
"version": "3.6.0",
"optional": true,
"requires": {
"align-text": "^0.1.1"
"commander": "~2.20.0",
"source-map": "~0.6.1"
}
},
"safer-buffer": {
"version": "2.1.2"
},
"source-map": {
"version": "0.4.4",
"verror": {
"version": "1.10.0",
"requires": {
"amdefine": ">=0.0.4"
"assert-plus": "^1.0.0",
"core-util-is": "1.0.2",
"extsprintf": "^1.2.0"
}
},
"uglify-js": {
"version": "2.8.29",
"optional": true,
"requires": {
"source-map": "~0.5.1",
"uglify-to-browserify": "~1.0.0",
"yargs": "~3.10.0"
},
"dependencies": {
"source-map": {
"version": "0.5.7",
"optional": true
}
}
},
"uglify-to-browserify": {
"version": "1.0.2",
"optional": true
},
"window-size": {
"version": "0.1.0",
"optional": true
},
"wordwrap": {
"version": "0.0.3"
},
"yargs": {
"version": "3.10.0",
"optional": true,
"requires": {
"camelcase": "^1.0.2",
"cliui": "^2.1.0",
"decamelize": "^1.0.0",
"window-size": "0.1.0"
}
}
}
}

@@ -1,1 +0,1 @@

{"dependencies":{"@sap/xsenv":"1.2.9","async":"2.6.0","handlebars":"4.0.10","hdb":"0.15.4"},"description":"HDI content deployment","devDependencies":{"command-line-args":"^4.0.7","command-line-usage":"^4.0.2","eslint":"^4.18.2","filter-node-package":"2.0.0","istanbul":"0.4.5","jshint":"2.9.4","mocha":"3.1.2","node-style":"^2.0.1","rewire":"2.5.2","shelljs":"0.6.0","should":"11.1.1","sinon":"1.17.6","uuid":"3.1.0"},"engines":{"node":">=6.9.1"},"main":"deploy.js","maintainers":[{"name":"https-support.sap.com","email":"do-not-reply@sap.com"}],"name":"@sap/hdi-deploy","optionalDependencies":{},"readme":"@sap/hdi-deploy\n===============\n\n`@sap/hdi-deploy` is the [Node.js](https://nodejs.org)-based deployment module for SAP HANA DI (HDI)-based persistence models, HDI Deployer for short. The HDI Deployer can be used in XS Advanced (XSA) and in SAP Cloud Platform (SAP CP)/Cloud Foundry (CF), and it is also used by the SAP Web IDE for interactive development scenarios.\n\nFor more information about HANA DI, please check the [SAP HANA Developer Guide](https://help.sap.com/viewer/4505d0bdaf4948449b7f7379d24d0f0d/2.0.02/en-US/eaa4e37394ea4efba8148d595d025261.html).\n\nThe HDI Deployer is packaged into a database module, a `db` module, as part of a Multi-Target Application (MTA) and is used to deploy HDI design-time artifacts of the `db` module to the respective HDI container. When an MTA is deployed via the Deploy Service, the `db` module is pushed first so that it can \"prepare\" the SAP HANA persistence; by the time defined services are started, the HDI container is ready for use.\n\nThe HDI Deployer can also be used without the Deploy Service and MTAs, and also for interactive scenarios or automation scripts.\n\nFor an MTA with different modules, e.g. a `db` module, a Node.js module, etc., this looks as follows:\n\n```\n +-----------------+ +-----------------+ +-----------------+\n | db module | | Node.js module | | ... module |\n | w/ HDI Deployer | | | | |\n +-----------------+ +-----------------+ +-----------------+\n\n | | |\n | | |\n \\/ deploy persistence \\/ read/write/extend persistence \n | | |\n | | |\n\n +---------------------------------------------------------------+\n | HDI container |\n | |\n +---------------------------------------------------------------+\n```\n\nIn a HANA-Service-Broker-based HDI setup, each module of the MTA is equipped with it's own technical database user for accessing the runtime schema of the HDI container.\n\nThe following diagram illustrates the different users who are involved in this setup with regard to privileges: the application users user1 and user2 who are bound to one of the modules each, and the HDI container's object owner (the #OO user) who is the owner of the objects in the database persistence of the MTA which are managed by HDI:\n\n```\n +-----------------+ +-----------------+ +-----------------+\n | db module | | Node.js module | | ... module |\n | w/ HDI Deployer | | | | |\n +-----------------+ +-----------------+ +-----------------+\n\n | | o\n | --------------------------------- X application user user1\n | | o\n | | ----- X application user user2\n | |\n\n +---------------------------------------------------------------+\n | HDI container |\n | db object 1 db object 2 |\n +-------------------------------------\\-------------/-----------+\n \\ /\n o \\ /\n X object owner (#OO user)\n```\n\nThe HDI Deployer is packaged into the `db` module of the MTA. So, in order to use a new HDI Deployer, you need to reference a new version of the HDI Deployer in the `db` module's `package.json` file.\n\nThe HDI Deployer supports HANA 1 SPS11/SPS12 and HANA 2 SPS00/SPS01/SPS02. The HDI Deployer assumes that for newer versions of HANA, a corresponding version of the HANA Service Broker is used to create the CF/XSA service bindings.\n\nNote: The HDI Deployer assumes ownership of the `src/`, `cfg/`, and `lib/` folders in the bound target HDI container. Binding more than 1 instance of the HDI Deployer to the same HDI container as the target container, e.g. the `db` modules of 2 MTAs or 2 applications are bound to the same HDI container as the target container, is not supported and results in undefined behavior.\n\n## README.md\n\n**Installation**:\n- [Integration into a Database Module](#integration-into-a-database-module)\n- [Database Connection Details](#database-connection-details)\n\n**The Database Module**:\n- [A Database Module's File System Structure](#a-database-modules-file-system-structure)\n- [Delta Deployment and Undeploy Whitelist](#delta-deployment-and-undeploy-whitelist)\n- [The default_access_role Role](#the-default_access_role-role)\n- [Reusable Database Modules](#reusable-database-modules)\n- [Configuration File Templating](#configuration-file-templating)\n- [Permissions to Container-External Objects](#permissions-to-container-external-objects)\n\n**Configuration and Reconfiguration**:\n- [Environment Variables for Applications](#environment-variables-for-applications)\n- [Environment Variables for Infrastructure / Development Tools](#environment-variables-for-infrastructure--development-tools)\n- [Options for Interactive Scenarios](#options-for-interactive-scenarios)\n- [Ignore List](#ignore-list)\n- [Supported Features](#supported-features)\n\n**Dynamic Deployment**:\n - [Deployment via hdi-dynamic-deploy](#deployment-via-hdi-dynamic-deploy)\n - [Using hdi-deploy as a Node.js library](#using-hdi-deploy-as-a-nodejs-library)\n\n\n## Integration into a Database Module\n\nUsually, `@sap/hdi-deploy` gets installed via a `package.json`-based dependency inside your application's `db` module:\n\n`db/package.json`:\n\n```\n{\n \"name\": \"deploy\",\n \"dependencies\": {\n \"@sap/hdi-deploy\": \"3.7.0\"\n },\n \"scripts\": {\n \"start\": \"node node_modules/@sap/hdi-deploy/\"\n }\n}\n```\n## Database Connection Details\n\nConnection details for the database, e.g. host, port, credentials, and certificates, are looked up by the HDI Deployer from the standard CF/XSA [`VCAP_SERVICES`](http://docs.cloudfoundry.org/devguide/deploy-apps/environment-variable.html#VCAP-SERVICES) environment variable which contains the bound services.\n\nFor local testing, the HDI Deployer supports default configurations via the following configuration files:\n\n- `default-services.json`: a JSON file which contains a set of service bindings\n- `default-env.json`: a JSON file which contains a set of environment variables and their values\n\n## A Database Module's File System Structure\n\nThe HDI Deployer expects the following file system structure for the HDI content in your `db` module:\n\n- `src/`: folder which contains your HDI source artifacts\n- `cfg/`: optional folder with HDI configuration artifacts\n- `package.json`: this file is used by npm (the Node.js package manager) to bootstrap and start the application\n\nOther files in the root directory will be ignored by `@sap/hdi-deploy`.\n\nPlease note that the `cfg/` folder also might need a `.hdiconfig` file, e.g. in case `.hdbsynonymconfig` files are placed there.\n\nIn combination with resuable database modules, the HDI Deployer will also consider database modules which are located in the `node_modules/` folder and which will be mapped to a corresponding sub-folder hierarchy in the container's `lib/` folder.\n\n## Delta Deployment and Undeploy Whitelist\n\nThe HDI Deployer implements a delta-based deployment strategy:\n\nOn startup, the HDI Deployer recursively scans the local `src/` and `cfg/` folders, processes config templates, looks at the HDI container at the server-side and calculates the set of added, modified, and deleted files based on the difference between the local file system state and the deployed file system state of the server-side HDI container.\n\nIn normal operation, the HDI Deployer will schedule only the set of added and modified files for deployment. The set of deleted files is not scheduled for undeployment.\n\nIn order to undeploy deleted files, an application needs to include an undeploy whitelist via an `undeploy.json` file in the root directory of the `db` module (right beside the `src/` and `cfg/` folders). The undeploy whitelist `undeploy.json` file is a JSON document with a top-level array of file names:\n\n`undeploy.json`:\n\n [\n \"src/Table.hdbcds\",\n \"src/Procedure.hdbprocedure\"\n ]\n\nThe file must list all artifacts which should be undeployed. The file path of the artifacts must be relative to the root directory of the `db` module, must use the HDI file path delimiter '/', and must be based on the HDI server-side folder structure. In case of resuable database modules, the server-side top-level folder `lib/` needs to be used instead of the local folder `node_modules/`.\n\nFor interactive scenarios, it's possible to pass the `auto-undeploy` option to the HDI Deployer, e.g.\n\n node deploy --auto-undeploy\n\nIn this case, the HDI Deployer will ignore the undeploy whitelist `undeploy.json` file and will schedule all deleted files in the `src/` and `cfg/` folders for undeployment.\n\n## The default_access_role Role\n\nWhen an HDI container service instance is created by the HANA Service Broker, e.g. service instance `foo` with schema name `FOO`, the broker creates an HDI container `FOO` (consisting of the runtime schema `FOO`, the HDI metadata and API schema `FOO#DI`, and the object owner `FOO#OO`) and a global access role `FOO::access_role` for the runtime schema. This access role is equipped with a default permission set for the runtime schema which consists of `SELECT`, `INSERT`, `UPDATE`, `DELETE`, `EXECUTE`, `CREATE TEMPORARY TABLE`, and `SELECT CDS METADATA` on the runtime schema `FOO`.\n\nEvery time the service instance is bound to an application, the broker creates 2 new users which are specific to this binding. The first user is the application user who is named `user` in the instance's credentials. This user is used by the application to access the HDI container's runtime schema `FOO`. This user is equipped with the service instance's global access role `FOO::access_role`. The second user is the HDI API user who is named `hdi_user` in the credentials. This user is equipped with privileges for the container's APIs in the `FOO#DI` schema.\n\nThe following diagram illustrates the binding-specific application users and the role of the global access role (the HDI API users and the bindings for the HDI Deployer are not shown for simplicity):\n\n```\n +-----------------+ +-----------------+ +-----------------+\n | db module | | Node.js module | | ... module |\n | w/ HDI Deployer | | | | |\n +-----------------+ +-----------------+ +-----------------+\n\n | | o\n | --------------------------------- X application user user1\n | | o \\\n | | ----- X application user user2 \\\n | | \\ \\\n \\ \\\n +---------------------------------------------------------------+ role FOO::access_role\n | HDI container FOO | /\n | | SELECT/INSERT/... on schema FOO\n +---------------------------------------------------------------+\n```\n\nExemplary service binding:\n\n {\n \"hana\" : [ {\n \"name\" : \"foo\",\n \"label\" : \"hana\",\n \"tags\" : [ \"hana\", \"database\", \"relational\" ],\n \"plan\" : \"hdi-shared\",\n \"credentials\" : {\n \"schema\" : \"FOO\",\n \"driver\" : \"com.sap.db.jdbc.Driver\",\n \"port\" : \"30115\",\n \"host\" : \"srv1234567.host.name\",\n \"db_hosts\" : [ {\n \"port\" : 30115,\n \"host\" : \"srv1234567.host.name\"\n } ],\n \"user\" : \"SBSS_34599959672902195741875760873853766555404727822156060056836149475\",\n \"password\" : \"<password>\",\n \"hdi_user\" : \"SBSS_64592794580116217572062412157356606994624009694957290675610125954\",\n \"hdi_password\" : \"<password>\",\n \"url\" : \"jdbc:sap://srv1234567.host.name:30115/?currentschema=FOO\"\n }\n } ]\n }\n\nIn order to assign roles from the HDI content to the application binding users (the `user` users), the HDI Deployer implements an automatic assignment of the `default_access_role` role if it is present in the deployed content:\n\nIf a role definition file exists at the path `src/defaults/default_access_role.hdbrole`, and this file defines a role named `default_access_role`, and this file is included in the deployment (e.g. not excluded via `include-filter`), then the HDI Deployer grants the deployed `default_access_role` role to the service instance's global access role (e.g. `FOO::access_role`). In addition, the HDI Deployer revokes all default permissions (e.g. `SELECT`, `INSERT`, `UPDATE`, `DELETE`, `EXECUTE`, `CREATE TEMPORARY TABLE`, and `SELECT CDS METADATA` on the runtime schema `FOO`) from the global access role. If the `default_access_role` is undeployed, the default permission set for the runtime schema will be restored.\n\nNote: If you use a `.hdinamespace` file in `src/` which defines a real namespace prefix for subfolders, then you need to put a `.hdinamespace` file with the empty namespace `\"name\" : \"\"` at `src/defaults/` to ensure that the role can be named `default_access_role`.\n\nThe following diagram illustrates the binding-specific application users, the role of the global access role, and the container-specific default access role:\n\n```\n +-----------------+ +-----------------+ +-----------------+\n | db module | | Node.js module | | ... module |\n | w/ HDI Deployer | | | | |\n +-----------------+ +-----------------+ +-----------------+\n\n | | o\n | --------------------------------- X application user user1\n | | o \\\n | | ----- X application user user2 \\\n | | \\ \\\n \\ \\\n +---------------------------------------------------------------+ role FOO::access_role\n | HDI container FOO | /\n | role default_access_role ----------------------------+\n | / \\ |\n | role role1 role role2 |\n | / / \\ |\n | structured privileges DCL role 1 / 2 | \n +---------------------------------------------------------------+\n```\n\nNote: The `default_access_role` is assumed to be an \"umbrella\" role which aggregates other roles.\n\nA role with the default permission set which is granted by the HANA Service Broker on container creation looks as follows:\n\n`default_permissions_role.hdbrole`:\n\n```\n{\n \"role\":{\n \"name\":\"default_permissions_role\",\n \"schema_privileges\":[\n {\n \"privileges\":[\n \"SELECT\",\n \"INSERT\",\n \"UPDATE\",\n \"DELETE\",\n \"EXECUTE\",\n \"CREATE TEMPORARY TABLE\",\n \"SELECT CDS METADATA\"\n ]\n }\n ]\n }\n}\n```\n\n## Reusable Database Modules\n\nIn order to allow that an application uses (parts of) the database persistence of a reusable component inside its own persistence model, the HDI Deployer allows to link/include the design-time files of reusable components in a consuming application in an automated way. This mechanism is based on the Node.js package management mechanism for defining, publishing, and consuming reusable database modules which also supports versioning based on the semantic versioning concepts (cf. http://semver.org).\n\nA reusable database module is considered to have the same `src/` and `cfg/` folder structure as a normal database module. The `src/.hdiconfig` file is mandatory and used by the module mechanism as an indicator that the `src/` and `cfg/` folders belong to a consumable, reusable database module. In addition, the reusable database module needs to have a `package.json` file which defines the module's name, the module's version, etc.\n\nA complete reusable database module looks as follows:\n\n```\n/\n+-- src/\n| +-- .hdiconfig\n| +-- <source files ...>\n+-- cfg/\n| +-- <optional configuration files ...>\n+-- package.json\n```\n\nThe `package.json` file contains the module’s name, description, version, repository URL, and the set of files which belong to the module:\n\n`package.json`:\n\n```\n{\n \"name\": \"module1\",\n \"description\": \"A set of reusable database objects\",\n \"version\": \"1.3.1\",\n \"repository\": {\n \"url\": \"git@your.gitserver:modules/module1.git\"\n },\n \"files\": [\n \"src\",\n \"cfg\",\n \"package.json\"\n ]\n}\n```\n\nThe reusable database module should be published to a Node.js package management compliant object repository.\n\nConsumption of a reusable database module is done by adding a dependency in the consuming module's `package.json` file, right beside the dependency to `@sap/hdi-deploy`:\n\n```\n{\n \"name\": \"deploy\",\n \"dependencies\": {\n \"@sap/hdi-deploy\": \"3.7.0\",\n \"module1\": \"1.3.1\",\n \"module2\": \"1.7.0\"\n },\n \"scripts\": {\n \"start\": \"node node_modules/@sap/hdi-deploy/\"\n }\n}\n```\n\nHere, the consuming module requires `module1` in version `1.3.1` and `module2` in version `1.7.0`.\n\nWhen running `npm install` to download and install the dependencies which are listed in the dependencies section of the `package.json` file, `npm` will also download the reusable database modules and places them into the `node_modules/` folder of the consuming module. For each module a separate subfolder is created with the name of the module.\n\nWhen the HDI Deployer is triggered to do the actual deployment of the (consuming) database module, it scans the `node_modules/` folder and virtually integrates the `src/` and `cfg/` folders of found reusable database modules into the (consuming) database module’s `lib/` folder. Reusable database modules are identified by the mandatory `src/.hdiconfig` file.\n\nOn successful deployment, the HDI container will contain the consumed modules below the root-level `lib/` folder, e.g.\n\n```\n/\n+-- src/\n+-- cfg/\n+-- lib/\n| +-- module1/\n| | +-- src/\n| | +-- cfg/\n| +-- module2/\n| +-- src/\n| +-- cfg/\n```\n\nFor the time being, it’s not allowed to recursively include reusable database modules.\n\nThe `cfg/` folders of reusable database modules are also subject to configuration file templating.\n\n## Configuration File Templating\n\nThe HDI Deployer implements a templating mechanism for HDI configuration files, e.g. configuration files for synonyms, projection views, etc., based on services which are bound to the `db` module application. By means of this templating mechanism, it is possible to configure synonyms, projection views, etc. to point to the right database schema without knowing the schema name at development time.\n\nOn startup, the HDI Deployer recursively scans the local `cfg/` folder and picks up all files with a `.*config` suffix, e.g. all `.hdbsynonymconfig`, `.hdbvirtualtableconfig`, etc. files. For all collected files which contain `.configure` markers in their content, it applies the configuration templating and creates transient configuration files which are then deployed to the HDI container.\n\nFor a synonym configuration file `cfg/LOCAL_TARGET.hdbsynonymconfig`\n\n {\n \"LOCAL_TARGET\" : {\n \"target\" : {\n \"schema.configure\" : \"logical-external-service/schema\",\n \"database.configure\" : \"logical-external-service/database\",\n \"object\" : \"TARGET\"\n }\n }\n }\n\nthe section\n\n \"schema.configure\" : \"logical-external-service/schema\",\n \"database.configure\" : \"logical-external-service/database\",\n \"object\" : \"TARGET\"\n\nwill be transformed by the templating mechanism into\n\n \"schema\" : \"THE_SCHEMA\",\n \"database\" : \"THE_DATABASE\",\n \"object\" : \"TARGET\"\n\nwhere `THE_SCHEMA` and `THE_DATABASE` are the values for the `schema` and `database` fields of the bound service `logical-external-service`, which are denoted by the path expressions`logical-external-service/schema` and `logical-external-service/database`.\n\nIf a field in the service is missing, it will not be configured and will be removed instead, e.g. `database` might be optional.\n\nThe names of the services are subject to the service replacements mechanism, which can be used to map a real service, e.g. `real-external-service`, to a logical service name which is used in the configuration files, e.g. `logical-external-service`.\n\nIt's not always applicable to use `schema.configure`, `database.configure`, etc. in the configuration template files. Therefore, the HDI Deployer provides a generic way of copying a set of properties from the bound service, e.g. schema, database, remote source, etc. if they are present, although the template file doesn't mention them.\n\nFor the configuration file `cfg/LOCAL_TARGET.hdbsynonymconfig` this could looks as follows:\n\n {\n \"LOCAL_TARGET\" : {\n \"target\" : {\n \"*.configure\" : \"logical-external-service\",\n \"object\" : \"TARGET\"\n }\n }\n }\n\nWhen the HDI Deployer encounters a `*.configure` entry, it simply copies all well-known fields which are present in the bound service into the configuration file. The well-known fields are currently `remote`, `database`, and `schema`.\n\nThe HDI Deployer also supports old-style `.hdbsynonymtemplate` template files: If a `.hdbsynonymtemplate` file is found in the `cfg/` or `src/` folder, then it is processed as a configuration template file and a transient file with the suffix `.hdbsynonymconfig` is created. A field `grantor` is replaced with the `schema` value from the referenced service; so, a `grantor` field is equivalent to a `\"schema.configure\" : \"the-service/schema\"` entry in a configuration template file.\n\n## Permissions to Container-External Objects\n\nAn HDI container is by default equipped with nearly zero database privileges, e.g. the object owner (`#OO` user) is mainly equipped with the `CREATE ANY` privilege on the container's runtime schema (e.g. schema `FOO` for an HDI contaner `FOO`). Since HANA 2 SPS00, the object owner is equipped with an additional restricted set of privileges for system views in the database's `SYS` schema, e.g. `SYS.VIEWS` or `SYS.TABLES`. These system views apply an additional row-level filter based on the object owner's other privileges, e.g. the object owner can only see metadata in `SYS.VIEWS` for views he has privileges on. So, without additional privileges, the object owner can only see metadata for the objects in his container schema.\n\nIn order to access database objects inside other database schemata or other HDI containers, and in order to deploy synonyms into the HDI container which point to these container-external objects, at least the object owner needs additional privileges, e.g. for an object `object` in schema `X` `SELECT` privileges on `X.object`:\n\n```\n +---------------------------------------------------------------+ +------------------------+\n | HDI container FOO | | other schema X |\n | synonym ------------------------> object |\n +---------------------------------------------------/-----------+ +-------------\\----------+\n / \\\n o / \\\n X object owner FOO#OO -------------------- SELECT on X.object\n```\n\nPlease also refer to the official [Using Synonyms to Access External Schemas and Objects in XS Advanced](https://help.sap.com/viewer/4505d0bdaf4948449b7f7379d24d0f0d/2.0.02/en-US/bdc9f7ae66134c279a5f3683bba9b361.html) guide.\n\n#### .hdbgrants Files\n\nIn order to automatically assign privileges to the object owner and/or the application binding users, the HDI Deployer provides `.hdbgrants` files with a syntax similar to `.hdbrole` files:\n\nAn `.hdbgrants` file has the following structure:\n\n`granting-service.hdbgrants`:\n\n```\n{\n \"granting-service\": {\n \"object_owner\": {\n <privileges>\n },\n \"application_user\": {\n <privileges>\n }\n }\n}\n```\n\nThe top-level keys define the names of the bound services which \"grant\" the privileges, these are the \"grantors\", e.g. `granting-service` in the example. The next level defines to which users the privileges will be granted, these are the \"grantees\": `object_owner` is used for the HDI container's object owner, and `application_user` marks the application users which are bound to the application modules, e.g. the Node.js module. The third level defines the set of privileges in a `.hdbrole`-like structure.\n\nOn startup, the HDI Deployer looks for `.hdbgrants` files and processes them as follows: For each grantor in the file, the HDI Deployer looks up a bound service with the name (subject to service replacements), connects to the database with the service's credentials, and grants the specified privileges to the grantees. If the `schema` field is omitted for a privilege, then the grantor's `schema` property is used. If the `name` field in a `global_object_privileges` element of type `REMOTE SOURCE` is omitted, then the grantor's `remote` property is used.\n\nFor backwards compatibility, also the suffix `.hdbsynonymgrantor` is supported.\n\nExample of a `cfg/external-access.hdbgrants` file with some privileges for the object owner:\n\n```\n{\n \"external-access\": {\n \"object_owner\": {\n \"system_privileges\" : [\n {\n \"privileges\" : [ \"SYSTEM_PRIVILEGE_1\" ],\n \"privileges_with_admin_option\" : [ \"SYSTEM_PRIVILEGE_2\", \"SYSTEM_PRIVILEGE_3\" ]\n }\n ],\n \"global_roles\" : [\n {\n \"roles\" : [ \"GLOBAL_ROLE_1\", \"GLOBAL_ROLE_2\" ],\n \"roles_with_admin_option\" : [ \"GLOBAL_ROLE_3\", \"GLOBAL_ROLE_4\" ]\n }\n ],\n \"schema_privileges\" : [\n {\n \"privileges\" : [ \"INSERT\", \"UPDATE\" ],\n \"privileges_with_grant_option\" : [ \"SELECT\" ]\n }\n ],\n \"schema_roles\" : [\n {\n \"roles\" : [ \"SCHEMA_ROLE_1\", \"SCHEMA_ROLE_2\" ],\n \"roles_with_admin_option\" : [ \"SCHEMA_ROLE_3\", \"SCHEMA_ROLE_4\" ]\n }\n ],\n \"object_privileges\" : [\n {\n \"name\": \"AN_OBJECT\",\n \"privileges\": [ \"INSERT\", \"UPDATE\" ],\n \"privileges_with_grant_option\" : [ \"SELECT\" ]\n }\n ],\n \"global_object_privileges\" : [\n {\n \"name\" : \"A_REMOTE_SOURCE\",\n \"type\" : \"REMOTE SOURCE\",\n \"privileges\" : [ \"CREATE VIRTUAL TABLE\" ],\n \"privileges_with_grant_option\" : [ \"CREATE VIRTUAL PROCEDURE\" ]\n }\n ]\n }\n }\n}\n```\n\nThe following elements and keys are supported for backwards compatibility or for compatibility with `.hdbrole`:\n\n- `container_roles`: grant roles from an HDI container; superseded by `schema_roles` which works for normal schemas and HDI containers\n```\n \"container_roles\" : [ \"SCHEMA_ROLE_1\", \"SCHEMA_ROLE_2\" ]\n```\n- `roles`: grant global roles; superseded by `global_roles`:\n```\n \"roles\" : [ \"GLOBAL_ROLE_1\", \"GLOBAL_ROLE_2\" ]\n```\n- string-array-style roles and `names` key (maps to the non-grant/admin-option variant):\n```\n \"global_roles\" : [\n \"GLOBAL_ROLE_1\",\n {\n \"roles\" : [ \"GLOBAL_ROLE_1\", \"GLOBAL_ROLE_2\" ]\n },\n {\n \"names\" : [ \"GLOBAL_ROLE_1\", \"GLOBAL_ROLE_2\" ]\n },\n {\n \"roles_with_admin_option\" : [ \"GLOBAL_ROLE_3\", \"GLOBAL_ROLE_4\" ]\n },\n \"GLOBAL_ROLE_2\"\n ]\n```\n\nIf any non-container privileges are used, then the object owner (`#OO` user) will need to be given these privileges WITH GRANT option by a user-defined granting-service. Otherwise it won't be able to grant these privileges to e.g. a role in the container.\n\n#### Creating a Granting Service\n\nThe HDI Deployer supports the following types of granting-services:\n\n- `hdi`: an HDI container with access to the container's GRANT APIs\n- `sql`: a technical database user with GRANT privileges for the required object privileges, roles, system privileges, etc.\n- `procedure`: a technical database user with EXECUTE privileges on a stored procedure which has GRANT privileges for the required object privileges, roles, system privileges, etc.\n- `ignore`: grants were already given at the database-level and the HDI Deployer will ignore the content of the `.hdbgrants` file.\n\nFor the HDI container case, the corresponding service can simply be bound to the db module application. The HDI Deployer recognizes the bound service by its `hdi_user` value in the credentials section and calls the container's API procedures to grant the privileges from the `.hdbgrants` file.\n\nIn case a technical database user is used, a 'user-defined service' must be created for this purpose in the same space as the container. The service needs to be set up with the permissions of a specified database user to connect to the database and to grant the privileges specified in the `.hdbgrants` files during application deployment.\n\nSuch a user-provided service can be created as follows:\n\n- Open a command shell and log on to XSA:\n`xs login`\n- Change to the target space where you want to create the user-defined service:\n`xs target -s <SPACE>`\n- Create the user-defined service (e.g. `grantor-service`):\n`xs cups grantor-service -p '{ \"host\": \"host.acme.com\", \"port\": \"30015\", \"certificate\": \"<myCertificate>\", \"user\": \"TARGET_USER\", \"password\": \"Grant_123\", \"schema\": \"TARGET_SCHEMA\", \"tags\": [ \"hana\" ] }'`\n - `\"host\"/\"port\"`: Required for the connection to the database: port is the SQL port of the index server.\n - `\"certificate\"`: If the database is configured to only accept secure connections, then the granting-service requires an SSL certificate that must be included in the user-provided service, for example, using the \"certificate\":\"<myCertificate>\" parameter.\n - `\"user\"/\"password\"`: Connection details for a database user that has grant permissions for the objects in the schema.\n - `\"schema\"`: The database schema that contains the objects to which access is to be granted.\n - `\"type\"`: The type of the grantor mechanism; valid values are `\"hdi\"`, `\"sql\"`, or `\"procedure\"`. If the type is specified, then the type is auto-sensed (see details below).\n- Use the command `xs services` to display a list of services available in the current space; the 'grantor-service' service should be visible.\n\nFor Clound Foundry, use the corresponding `cf` commands.\n\nNote: Starting with version 3.0.0 of the HDI Deployer, the `\"host\"`, `\"port\"`, and `\"certificate\"` parameters are no longer required since they can be obtained from the target container binding. In this case, you must only specify the `\"user\"`, `\"password\"`, and `\"schema\"` when creating the user-provided service, e.g. `xs cups grantor-service -p '{ \"user\": \"TARGET_USER\", \"password\": \"Grant_123\", \"schema\": \"TARGET_SCHEMA\", \"tags\": [ \"hana\" ] }'`.\n\nIf the `\"type\"` is not specified, then the type is selected based on the following rule: if the field `hdi_user` is present, then the type is auto-sensed as `hdi`; otherwise, the type is set to `sql`.\n\nIf the technical database user does not have GRANT privileges by its own, but only EXECUTE privileges on a stored procedure which can grant the privileges, then the following settings are required:\n\n- At the datababase, a GRANT procedure must exist (or be visible) in the schema which is used in the user-provided service; an example is shown below.\n- The technical database user must have EXECUTE privileges on the GRANT procedure.\n- The name of the GRANT procedure must be specified in the user-provided service in the `\"procedure\"` field, e.g. `\"procedure\": \"GRANT\"`.\n- The scheme name of the GRANT procedure can be specified in the user-provided service in the `\"procedure_schema\"` field, e.g. `\"procedure_schema\": \"A_SCHEMA\"`.\n- The user-provided service must contain a `\"type\"` field with the value `\"procedure\"`.\n\nFor the different types of privileges, the following fields are passed to the GRANT procedure:\n\n| PRIVILEGE_TYPE | PRIVILEGE_NAME | OBJECT_SCHEMA | OBJECT_NAME | OBJECT_TYPE | GRANTEE_SCHEMA | GRANTEE_NAME | GRANTABLE |\n| --- | --- | --- | --- | --- | --- | --- | --- |\n| SCHEMA_OBJECT_PRIVILEGE | privilege | schema | object | NULL | NULL | grantee | TRUE/FALSE |\n| GLOBAL_OBJECT_PRIVILEGE | privilege | NULL | object | type | NULL | grantee | TRUE/FALSE |\n| SCHEMA_ROLE | NULL | schema | role | NULL | NULL | grantee | TRUE/FALSE |\n| GLOBAL_ROLE | NULL | NULL | role | NULL | NULL | grantee | TRUE/FALSE |\n| SCHEMA_PRIVILEGE | privilege | NULL | schema | NULL | NULL | grantee | TRUE/FALSE |\n| SYSTEM_PRIVILEGE | privilege | NULL | NULL | NULL | NULL | grantee | TRUE/FALSE |\n\nNote: This procedure does not work for HANA1 SPS11, since `REPLACE_REGEXPR` is not supported. Please use the sample procedure provided with older releases of the deployer.\nThe old sample procedure does not correctly handle component names of system privileges in .hdbgrants files.\n\nExample of a GRANT procedure:\n\n```\nCREATE PROCEDURE GRANT(\n IN PRIVILEGES TABLE (\n PRIVILEGE_TYPE NVARCHAR(128), -- 'SCHEMA_OBJECT_PRIVILEGE'\n -- 'GLOBAL_OBJECT_PRIVILEGE'\n -- 'SCHEMA_ROLE'\n -- 'GLOBAL_ROLE'\n -- 'SCHEMA_PRIVILEGE'\n -- 'SYSTEM_PRIVILEGE'\n PRIVILEGE_NAME NVARCHAR(256), -- cf. SYS.PRIVILEGES\n OBJECT_SCHEMA NVARCHAR(256), -- NULL or schema\n OBJECT_NAME NVARCHAR(256),\n OBJECT_TYPE NVARCHAR(128), -- NULL or 'REMOTE SOURCE'\n GRANTEE_SCHEMA NVARCHAR(256), -- NULL or schema\n GRANTEE_NAME NVARCHAR(256),\n GRANTABLE NVARCHAR(5) -- 'TRUE' or 'FALSE'\n )\n)\nLANGUAGE SQLSCRIPT\nSQL SECURITY DEFINER\nAS\nBEGIN\n DECLARE ERROR CONDITION FOR SQL_ERROR_CODE 10000;\n DECLARE CURSOR PRIVILEGES_CURSOR FOR SELECT * FROM :PRIVILEGES;\n\n -- TODO: add checks for valid grantees, e.g. check with _SYS_DI#<group>.M_CONTAINER_SCHEMAS\n -- or with SYS.USERS and creator and grantee like '%#OO'\n -- TODO: keep only functionality that should be allowed, e.g. only allow to grant schema-local\n -- roles, but no object privileges, etc.\n\n FOR PRIVILEGE AS PRIVILEGES_CURSOR\n DO\n DECLARE TO_GRANTEE_CLAUSE NVARCHAR(512);\n DECLARE GRANTABLE_CLAUSE NVARCHAR(512) = '';\n\n IF PRIVILEGE.GRANTEE_SCHEMA IS NULL THEN\n TO_GRANTEE_CLAUSE = ' TO \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.GRANTEE_NAME) || '\"';\n ELSE\n TO_GRANTEE_CLAUSE = ' TO \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.GRANTEE_SCHEMA)\n || '\".\"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.GRANTEE_NAME) || '\"';\n END IF;\n\n IF PRIVILEGE.GRANTABLE = 'TRUE' THEN\n IF PRIVILEGE.PRIVILEGE_TYPE = 'SYSTEM_PRIVILEGE' OR\n PRIVILEGE.PRIVILEGE_TYPE = 'GLOBAL_ROLE' OR\n PRIVILEGE.PRIVILEGE_TYPE = 'SCHEMA_ROLE' THEN\n GRANTABLE_CLAUSE = ' WITH ADMIN OPTION';\n ELSE\n GRANTABLE_CLAUSE = ' WITH GRANT OPTION';\n END IF;\n ELSEIF PRIVILEGE.GRANTABLE != 'FALSE' THEN\n SIGNAL ERROR SET MESSAGE_TEXT = 'unsupported value for GRANTABLE: '\n || PRIVILEGE.GRANTABLE;\n END IF;\n\n IF PRIVILEGE.PRIVILEGE_TYPE = 'SCHEMA_OBJECT_PRIVILEGE' THEN\n EXEC 'GRANT \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.PRIVILEGE_NAME) || '\"'\n || ' ON \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.OBJECT_SCHEMA)\n || '\".\"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.OBJECT_NAME) || '\" '\n || TO_GRANTEE_CLAUSE\n || GRANTABLE_CLAUSE;\n ELSEIF PRIVILEGE.PRIVILEGE_TYPE = 'GLOBAL_OBJECT_PRIVILEGE' THEN\n IF PRIVILEGE.OBJECT_TYPE = 'REMOTE SOURCE' THEN\n EXEC 'GRANT \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.PRIVILEGE_NAME) || '\"'\n || ' ON ' || PRIVILEGE.OBJECT_TYPE || ' \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.OBJECT_NAME) || '\" '\n || TO_GRANTEE_CLAUSE\n || GRANTABLE_CLAUSE;\n ELSE\n SIGNAL ERROR SET MESSAGE_TEXT = 'unsupported value for OBJECT_TYPE for GLOBAL_OBJECT_PRIVILEGE: '\n || PRIVILEGE.OBJECT_TYPE;\n END IF;\n ELSEIF PRIVILEGE.PRIVILEGE_TYPE = 'SCHEMA_ROLE' THEN\n EXEC 'GRANT \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.OBJECT_SCHEMA)\n || '\".\"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.OBJECT_NAME) || '\" '\n || TO_GRANTEE_CLAUSE\n || GRANTABLE_CLAUSE;\n ELSEIF PRIVILEGE.PRIVILEGE_TYPE = 'GLOBAL_ROLE' THEN\n EXEC 'GRANT \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.OBJECT_NAME) || '\" '\n || TO_GRANTEE_CLAUSE\n || GRANTABLE_CLAUSE;\n ELSEIF PRIVILEGE.PRIVILEGE_TYPE = 'SCHEMA_PRIVILEGE' THEN\n EXEC 'GRANT \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.PRIVILEGE_NAME) || '\"'\n || ' ON SCHEMA \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.OBJECT_NAME) || '\" '\n || TO_GRANTEE_CLAUSE\n || GRANTABLE_CLAUSE;\n ELSEIF PRIVILEGE.PRIVILEGE_TYPE = 'SYSTEM_PRIVILEGE' THEN\n EXEC 'GRANT \"' || REPLACE_REGEXPR('\\.' IN ESCAPE_DOUBLE_QUOTES(PRIVILEGE.PRIVILEGE_NAME) WITH '\".\"') || '\"'\n || TO_GRANTEE_CLAUSE\n || GRANTABLE_CLAUSE;\n ELSE\n SIGNAL ERROR SET MESSAGE_TEXT = 'unsupported value for PRIVILEGE_TYPE: '\n || PRIVILEGE.PRIVILEGE_TYPE;\n END IF;\n END FOR;\nEND;\n```\n\n#### Defining the Granting Service in the mta[d].yaml\n\nIf the container needs a granting-service, then besides the service itself, the Application Development Descriptor mta.yaml needs to be adjusted for the deployer to be able to find the service. The mta.yaml must be modified to:\n\n1. The container of the `db` module needs to get a `TARGET_CONTAINER` property to mark the service that corresponds to the container\n2. A new entry in `requires` is added for the granting-service\n3. A new entry in `resources` is added for the granting-service\n\nExample:\n`mta.yaml`:\n\n```\nschema-version: '2.0'\nID: granting-service-example\nversion: 0.0.1\n\nmodules:\n - name: db\n type: hdb\n path: db\n requires:\n - name: hdi-container\n properties: # 1.\n TARGET_CONTAINER: ~{hdi-container-service} # 1.\n \n - name: granting-service # 2.\n \nresources:\n - name: hdi-container\n type: com.sap.xs.hdi-container\n properties:\n hdi-container-service: ${service-name} \n\n - name: granting-service # 3.\n type: org.cloudfoundry.existing-service # 3.\n```\n\n## Environment Variables for Applications\n\n`@sap/hdi-deploy` supports (re-)configuration via the following environment variables which are exposed to applications, e.g. via the CF/XSA `manifest.yml` or the MTA descriptor `mta.yaml`:\n\n- `TARGET_CONTAINER`: (optional) service name that specifies the HDI target container (needed, if more than one service is bound to the HDI Deployer)\n- `SERVICE_REPLACEMENTS`: (optional) JSON-structured list of service replacements, e.g. `[ { \"key\": \"logical-service-name-1\", \"service\":\"real-service-name-1\"}, { \"key\": \"logical-service-name-2\", \"service\":\"real-service-name-2\"} ]`, where the logical service names refer to the names in the HDI content and the real service names refer to the services which are bound to the HDI Deployer via `VCAP_SERVICES`; if the HDI content references a service name which is not listed in the replacements, then this name is used as a real service name\n\nThe structure of the `SERVICE_REPLACEMENTS` environment variable is based on the MTA specification in order to enable MTA group assignments.\n\nExample `manifest.yml`:\n\n applications:\n - name: app-db\n path: db\n services:\n - app-database\n - real-grantor-service\n - real-external-service\n env:\n TARGET_CONTAINER: app-database\n SERVICE_REPLACEMENTS: >\n [\n {\n \"key\" : \"logical-grantor-service\",\n \"service\" : \"real-grantor-service\"\n },\n {\n \"key\" : \"logical-external-service\",\n \"service\" : \"real-external-service\"\n }\n ]\n\n## Environment Variables for Infrastructure / Development Tools\n\n`@sap/hdi-deploy` supports (re-)configuration via the following environment variables for infrastructure / development tools like the Deploy Service or internal build tools of the WEB IDE\n\n- `DEPLOY_ID`: (optional) if set, the given id will be written to the final application log entry (custom id, to support processes in parsing log output\n- `HDI_DEPLOY_OPTIONS`: (optional) JSON-structured set of options for the HDI Deployer, e.g. `{ \"auto_undeploy\" : true, \"exit\" : true, \"root\" : \"/volumes/A/workspaces/B/db/\", \"include_filter\" : [ \"src/\", \"cfg/\" ] }`\n\n## Ignore List\nThe hdi deployer supports ignoring certain files via an `.hdiignore` file. The file has to be placed at the root of the project folder, just like the `undeploy.json`.\nThe file has a structure similar to a `.gitignore` file, simply lines of texts specifying the paths to exclude. Both \"real\" paths and path patterns are supported.\n```\nsrc/table_1.hdbtable\nsrc/*_2.hdbtable\n```\n\nThe file works just like the `--exclude-filter` option and they can be used at the same time.\n\n## Options for Interactive Scenarios\n\n`@sap/hdi-deploy` supports the following options for interactive deployment scenarios, e.g. for orchestration via the WEB IDE or for CI scripts:\n\n- `--[no-]verbose`: [don't] print detailed log messages to the console\n- `--structured-log <file>`: write log messages as JSON objects into the given file; messages are appended if the file already exists\n- `--[no-]exit`: [don't] exit after deployment of artifacts\n- `--[no-]lock-container`: [don't] acquire the container lock while working with the container\n- `--root <path>`: use the given root path for artifacts\n- `--working-set [<path> ..]`: define the given paths (directories and files) as the working set; a non-default working set applies additional restrictions, e.g. other options might be disallowed\n- `--include-filter [<path> ..]`: only include the given paths (directories and files) during delta detection\n- `--deploy [<file> ..]`: explicitly schedule the given files for deploy; extends the `include-filter` for collecting local files. Instead of a real path, a path pattern like src/**/*.hdbtable can be used as well.\n- `--[no-]treat-unmodified-as-modified`: [don't] treat unmodified files during delta detection as modified files\n- `--undeploy [<file> ..]`: explicitly schedule the given files for undeploy\n- `--parameter [<key>=<value> ..]`: pass the given list of key-value parameters to the deployment\n- `--[no-]auto-undeploy`: [don't] undeploy artifacts automatically based on delta detection and ignore the `undeploy.json` file\n- `--[no-]treat-warnings-as-errors`: [don't] treat warnings as errors\n- `--[no-]simulate-make`: [don't] simulate the make and skip post-make activities; pre-make activities still take effect, e.g. grants\n- `--connection-timeout <ms>`: number of milliseconds to wait for the database connection(s)\n- `--lock-container-timeout <ms>`: number of milliseconds to wait for the container lock\n- `--exclude-filter [<path> ..]`: exclude the given paths during: file walk, delta detection and when explicitly scheduled via --(un)deploy\n\nSee `--help` for details and defaults.\n\nOptions can also be passed to `@sap/hdi-deploy` via the `HDI_DEPLOY_OPTIONS` environment variable.\n\n## Supported Features\n\n`@sap/hdi-deploy` exposes its set of features via the `info` option, which can be passed as `--option` or via `HDI_DEPLOY_OPTIONS`, e.g.\n\n node deploy --info [<component> [<component> [...]]]\n\nwhere a list of components can be specified.\n\nThe `info` option allows to pass multiple components. The `info` request for these components is optional, e.g. if the HDI Deployer doesn't support the component, then it will not throw an error, but simply not return information for that component. The special component `all` will return the information for all known components; `all` is the default if no component is specified. For certain future components, e.g. `server`, the HDI Deployer might need to connect to the HDI container in the database and retrieve feature information from there.\n\nExamples:\n\n```\nnode deploy --info all\nnode deploy --info client server\n```\n\nThe result of an `info` call is a JSON document where the top-level objects correspond to the requested components. Each component should at least report its name, its version, and the set of supported features with name and version number (version numbers are simple numbers (no dots, no double-dots)).\n\nIf a version number is negative, then the feature is supported by the client, but not supported by the server.\n\nFor a `--info client` call, the document looks as follows:\n\n```\n{\n \"client\": {\n \"name\": \"@sap/hdi-deploy\",\n \"version\": \"3.7.0\",\n \"features\": {\n \"info\": 2,\n \"verbose\": 1,\n \"structured-log\": 1,\n \"lock-container\": 1,\n \"default-access-role\": 1,\n \"grants\": 4,\n \"working-set\": 1,\n \"include-filter\": 1,\n \"deploy\": 1,\n \"treat-unmodified-as-modified\": 1,\n \"undeploy\": 1,\n \"parameter\": 1,\n \"treat-warnings-as-errors\": 1,\n \"simulate-make\": 1,\n \"service-replacements\": 1,\n \"modules\": 2,\n \"config-templates\": 2,\n \"environment-options\": 1,\n \"undeploy-whitelist\": 1\n }\n }\n}\n```\n\nFor the `server` component, the document would also contain the following data:\n\n```\n{\n...\n \"server\": {\n \"name\": \"sap-hana-database\",\n \"version\": \"1.00.120.04.0000000000\",\n \"features\": {}\n }\n}\n```\n\n\n## Deployment via hdi-dynamic-deploy\n\nThe standard XSA/CF way for deploying HDI content at runtime is to make use of @sap/hdi-dynamic-deploy instead of @sap/hdi-deploy directly. The @sap/hdi-dynamic-deploy app is an http server that calls @sap/hdi-deploy when it receives a corresponding HTTP POST request. See the @sap/hdi-dynamic-deploy module for more information.\n\n## Using hdi-deploy as a Node.js library\n\nSince version 3.3.0 of @sap/hdi-deploy it is also possible to use it as a Node.js library. By requiring the library.js file from the project root it is possible to start the deployer app from within another Node.js app. The module exports the function\n\n```\nfunction deploy(contentDir, deployerEnv, callback, io)\n```\n\nwith the following parameters:\n\n- `contentDir`: string containing a path pointing to the root of the db module to be deployed\n- `deployerEnv`: javascript object containing the OS environment for the call to the deployer (e.g. containing VCAP_SERVICES)\n- `callback`: a callback for the result of the call to the deployer accepting a response parameter of the form:\n\n```\n{\n messages: [<list of result messages from the di server>],\n exitCode: <exit code of the call to the deployer app>\n}\n```\n \n- `io` (optional): javascript object containing two callback functions `io.stdoutCB` and `io.stderrCB` of the form `function(data)` for streaming stdout and stderr of the call to the deployer, defaults to piping stdout and stderr of the deployer to stdout and stderr of the calling Node.js app\n","readmeFilename":"README.md","repository":{},"scripts":{"eslint":"eslint *.js lib/","lint":"jshint *.js lib/ test/","prepare-release":"clean-packages && npm prune --production && ls -al","start":"node deploy.js","test":"test/test-quick.sh","test-all":"test/test-all.sh","test-all-filtered":"test/test-all-filtered.sh","test-all-filtered.win":"test\\test-all-filtered.bat","test-all.js":"node test-all-filtered.js","test-all.win":"test\\test-all.bat","test.js":"node test-quick.js","test.win":"test\\test-quick.bat"},"version":"3.7.0","license":"SEE LICENSE IN developer-license-3.1.txt"}
{"bundleDependencies":false,"dependencies":{"@sap/hana-client":"2.4.144","@sap/hdi":"2.2.3","@sap/xsenv":"1.2.9","async":"3.1.0","handlebars":"4.1.2"},"deprecated":false,"description":"HDI content deployment","devDependencies":{"command-line-args":"^4.0.7","command-line-usage":"^4.0.2","eslint":"^4.18.2","filter-node-package":"2.0.0","istanbul":"0.4.5","jshint":"2.9.4","mocha":"3.1.2","node-style":"^2.0.1","rewire":"2.5.2","semver":"^6.1.1","shelljs":"0.6.0","should":"11.1.1","sinon":"1.17.6","uuid":"3.1.0"},"engines":{"node":">=6.9.1 <=10.16.x"},"main":"deploy.js","name":"@sap/hdi-deploy","repository":{},"scripts":{"eslint":"eslint *.js lib/","lint":"jshint *.js lib/ test/","prepare-release":"clean-packages && npm prune --production && ls -al","start":"node deploy.js","test":"test/test-quick.sh","test-all":"test/test-all.sh","test-all-filtered":"test/test-all-filtered.sh","test-all-filtered.win":"test\\test-all-filtered.bat","test-all.js":"node test-all-filtered.js","test-all.win":"test\\test-all.bat","test.js":"node test-quick.js","test.win":"test\\test-quick.bat","wrap":"rm -rf node_modules && rm npm-shrinkwrap.json && npm install && npm dedupe && npm shrinkwrap && clean-shrinkwrap"},"version":"3.11.3","license":"SEE LICENSE IN developer-license-3.1.txt"}
@sap/hdi-deploy
===============
`@sap/hdi-deploy` is the [Node.js](https://nodejs.org)-based deployment module for SAP HANA DI (HDI)-based persistence models, HDI Deployer for short. The HDI Deployer can be used in XS Advanced (XSA) and in SAP Cloud Platform (SAP CP)/Cloud Foundry (CF), and it is also used by the SAP Web IDE for interactive development scenarios.
`@sap/hdi-deploy` is a [Node.js](https://nodejs.org)-based deployment module for SAP HANA DI (HDI)-based persistence models, HDI Deployer for short. The HDI Deployer can be used in XS Advanced (XSA) and in SAP Cloud Platform (SAP CP)/Cloud Foundry (CF), and it is also used by the SAP Web IDE for interactive development scenarios. It can also be used in scenarios without XSA (or SAP CP), e.g. for deploying HDI persistence models into a HANA database where no XSA is installed.
For more information about HANA DI, please check the [SAP HANA Developer Guide](https://help.sap.com/viewer/4505d0bdaf4948449b7f7379d24d0f0d/2.0.02/en-US/eaa4e37394ea4efba8148d595d025261.html).
For more information about HANA DI, please check the [SAP HANA Developer Guide](https://help.sap.com/viewer/4505d0bdaf4948449b7f7379d24d0f0d/2.0.03/en-US/eaa4e37394ea4efba8148d595d025261.html) and the [SAP HANA Administration Guide](https://help.sap.com/viewer/6b94445c94ae495c83a19646e7c3fd56/2.0.03/en-US/3ef0ee9da11440e4b01708455b8497a9.html).
The HDI Deployer is packaged into a database module, a `db` module, as part of a Multi-Target Application (MTA) and is used to deploy HDI design-time artifacts of the `db` module to the respective HDI container. When an MTA is deployed via the Deploy Service, the `db` module is pushed first so that it can "prepare" the SAP HANA persistence; by the time defined services are started, the HDI container is ready for use.
Usually, the HDI Deployer is packaged into a database module, a `db` module, as part of a Multi-Target Application (MTA) and is used to deploy HDI design-time artifacts of the `db` module to the respective HDI container. When an MTA is deployed via the Deploy Service, the `db` module is pushed first so that it can "prepare" the SAP HANA persistence; by the time defined services are started, the HDI container is ready for use.
The HDI Deployer can also be used without the Deploy Service and MTAs, and also for interactive scenarios or automation scripts.
The HDI Deployer can also be used without the Deploy Service and MTAs, without XSA, and also for interactive scenarios or automation scripts.

@@ -59,3 +59,3 @@ For an MTA with different modules, e.g. a `db` module, a Node.js module, etc., this looks as follows:

The HDI Deployer supports HANA 1 SPS11/SPS12 and HANA 2 SPS00/SPS01/SPS02. The HDI Deployer assumes that for newer versions of HANA, a corresponding version of the HANA Service Broker is used to create the CF/XSA service bindings.
The HDI Deployer supports HANA 1 SPS11/SPS12 and HANA 2. The HDI Deployer assumes that for newer versions of HANA, a corresponding version of the HANA Service Broker is used to create the CF/XSA service bindings.

@@ -69,2 +69,4 @@ Note: The HDI Deployer assumes ownership of the `src/`, `cfg/`, and `lib/` folders in the bound target HDI container. Binding more than 1 instance of the HDI Deployer to the same HDI container as the target container, e.g. the `db` modules of 2 MTAs or 2 applications are bound to the same HDI container as the target container, is not supported and results in undefined behavior.

- [Database Connection Details](#database-connection-details)
- [Deployment via Push and Tasks](#deployment-via-push-and-tasks)
- [Deployment via Local Run](#deployment-via-local-run)

@@ -75,2 +77,3 @@ **The Database Module**:

- [The default_access_role Role](#the-default_access_role-role)
- [The development_debug_role Role](#the-development_debug_role-role)
- [Reusable Database Modules](#reusable-database-modules)

@@ -89,2 +92,4 @@ - [Configuration File Templating](#configuration-file-templating)

- [Deployment via hdi-dynamic-deploy](#deployment-via-hdi-dynamic-deploy)
**Library Usage**:
- [Using hdi-deploy as a Node.js library](#using-hdi-deploy-as-a-nodejs-library)

@@ -103,3 +108,3 @@

"dependencies": {
"@sap/hdi-deploy": "3.7.0"
"@sap/hdi-deploy": "3.11.3"
},

@@ -113,9 +118,201 @@ "scripts": {

Connection details for the database, e.g. host, port, credentials, and certificates, are looked up by the HDI Deployer from the standard CF/XSA [`VCAP_SERVICES`](http://docs.cloudfoundry.org/devguide/deploy-apps/environment-variable.html#VCAP-SERVICES) environment variable which contains the bound services.
Connection details for the database, e.g. host, port, credentials, certificates, hostname_in_certificate, encrypt and validate_certificate, are looked up by the HDI Deployer from the standard CF/XSA `VCAP_SERVICES` environment variable which contains the bound services.
In order to use mutual authentication, `client_authentication_private_key` and `client_authentication_certificate` can be supplied via the service binding.
For local testing, the HDI Deployer supports default configurations via the following configuration files:
- `default-env.json`: a JSON file which contains a set of environment variables and their values
- `default-services.json`: a JSON file which contains a set of service bindings
- `default-env.json`: a JSON file which contains a set of environment variables and their values
Details of a bound service from a HANA-Service-Broker-based service binding in CF/XSA usually look as follows:
``` JSON
{
"name" : "foo",
"label" : "hana",
"tags" : [ "hana", "database", "relational" ],
"plan" : "hdi-shared",
"credentials" : {
"schema" : "FOO",
"user" : "FOO_345999596729_RT",
"password" : "<password>",
"hdi_user" : "FOO_645927945801_DT",
"hdi_password" : "<password>",
"host" : "srv1234567.host.name",
"port" : "30115",
"db_hosts" : [ {
"port" : 30115,
"host" : "srv1234567.host.name"
} ],
"url" : "jdbc:sap://srv1234567.host.name:30115/?currentschema=FOO",
"driver" : "com.sap.db.jdbc.Driver"
}
}
```
Here, the credentials section contains all the data which is needed by the HDI Deployer for connecting to the database. The HDI Deployer uses the `hdi_user`/`hdi_password` credentials from a direct service binding. The `hdi_user` should be minimal, i.e. only have the privileges required to fulfill the deployment. In most cases, access to a container FOO's API in the FOO#DI schema is sufficient.
### Splitting passwords across services
The `password` property and the `hdi_password` property can also be specified as a combination of passwords from other bound services. Consider the following service binding:
``` JSON
{
"hana" : [],
"user-provided" : [
{
"name" : "split_password_service",
"label" : "user-provided",
"tags" : [],
"credentials" : {
"user" : "user",
"schema": "schema",
"password": ["password_and_hdi_password_service", "password_only_service"],
"hdi_password": ["password_and_hdi_password_service", "hdi_password_only_service"],
"tags" : [ "hana" ]
}
},
{
"name" : "password_and_hdi_password_service",
"label" : "user-provided",
"tags" : [],
"credentials" : {
"password" : "PASSWORD",
"hdi_password": "HDI_PASSWORD",
"tags" : [ "password" ]
}
},
{
"name" : "hdi_password_only_service",
"label" : "user-provided",
"tags" : [],
"credentials" : {
"hdi_password": "123",
"tags" : [ "password" ]
}
},
{
"name" : "password_only_service",
"label" : "user-provided",
"tags" : [],
"credentials" : {
"password" : "456",
"tags" : [ "password" ]
}
} ]
}
```
When the service `shared_password_service` is used, the services specified in `password` and/or `hdi_password` will be checked and their `password` and/or `hdi_password` will be concatenated. The services will be accessed in the order they are defined. The resulting `shared_password_service` would have the `password` "PASSWORD456" and the `hdi_password` "HDI_PASSWORD123".
0 to n services can be specified, specifying 0 services results in the `password`/`hdi_password` ''.
### VCAP_SERVICES
Connection details for the database are stored in the following format in the [`VCAP_SERVICES`](http://docs.cloudfoundry.org/devguide/deploy-apps/environment-variable.html#VCAP-SERVICES) environment variable:
`VCAP_SERVICES`:
```
{
"hana" : [
<hana-service-binding-1>,
<hana-service-binding-2>,
...
<hana-service-binding-n>
],
"user-provided" : [
<user-provided-service-binding-1>,
<user-provided-service-binding-2>,
...
<user-provided-service-binding-m>
]
}
'
```
### default-env.json
A `default-env.json` file can contain a set of environment variables and their values. The HDI Deployer will pick up these settings on startup:
`default-env.json:`
```
{
"TARGET_CONTAINER" : "<name-of-the-service-instance-to-use-as-deployment-target>",
"VCAP_SERVICES" : {
"hana" : [
<hana-service-binding-1>,
<hana-service-binding-2>,
...
<hana-service-binding-n>
],
"user-provided" : [
<user-provided-service-binding-1>,
<user-provided-service-binding-2>,
...
<user-provided-service-binding-m>
]
}
}
```
`default-env.json` example file with a target container binding and a user-provided service:
```
{
"TARGET_CONTAINER" : "target-service",
"VCAP_SERVICES" : {
"hana" : [ {
"name" : "target-service",
"label" : "hana",
"tags" : [ "hana", "database", "relational" ],
"plan" : "hdi-shared",
"credentials" : {
"schema" : "SCHEMA",
"hdi_user" : "USER_DT",
"hdi_password" : "PASSWORD_DT",
"certificate" : "-----BEGIN CERTIFICATE-----\nABCD...1234\n-----END CERTIFICATE-----\n",
"host" : "host",
"port" : "30015"
}
} ],
"user-provided" : [ {
"name" : "GRANTING_SERVICE",
"label" : "user-provided",
"tags" : [ ],
"credentials" : {
"schema" : "SYS",
"user" : "GRANT_USER",
"password" : "PASSWORD",
"procedure_schema" : "PRIVILEGE_PROCEDURE_GRANTOR_DEFINER",
"procedure" : "GRANT",
"type" : "procedure",
"tags" : [ "hana" ]
}
} ]
}
}
```
## Deployment via Push and Tasks
There are two ways of using the HDI Deployer as an application:
- Push the application with one instance. The application will then start and do the HDI deployment of the data model. After a successful deployment, the application will enter an idle loop and can be stopped.
- Push the application with zero instances and then trigger a task on the application which does the HDI deployment of the data model. After deployment of the data model, the task will be completed. An instance of the application is only running while the task is being executed.
For both scenarios, ensure that the `health-check-type` in the manifest is set to `process`, instead of the default, `port`-based health check.
In order to push the application with zero instances, the application can either be pushed with the `--no-start` option or the number of instances can be set to zero in the `manifest.yml` file via `instances: 0`.
The deployment task can be started via `xs run-task <app> deployment-task "npm run start -- --exit" --wait-for-completion` on XSA. The task will run and the call will propagate the success/failure of the deployment task. On CF, the `--wait-for-completion` option is not available and the status of the task needs to be checked periodically.
## Deployment via Local Run
An HDI deployment can also be triggered without using an application. In this case, the HDI Deployer will be run locally and directly connects to the database. This is possible in the following scenarios: the database is accessible locally from a network point of view or a network tunnel with a local endpoint was established, e.g. a `cf ssh`-based tunnel is set up in CF.
Apply the following steps to run the HDI Deployer locally: run `npm install` in the db module's folder to install the HDI Deployer module, then create a `default-env.json` file in the db module's folder which contains the required service bindings and the `TARGET_CONTAINER` setting, then run `npm run start -- --exit` in the db module's folder to trigger the deployment of the data model.
If the database uses SSL/TLS encryption, please ensure that the `hostname_in_certificate` value is set up correctly in the service bindings, because the network tunnel's local endpoint (e.g. localhost:9000) doesn't match the hostname in the SSL/TLS certificate.
## A Database Module's File System Structure

@@ -133,4 +330,6 @@

In combination with resuable database modules, the HDI Deployer will also consider database modules which are located in the `node_modules/` folder and which will be mapped to a corresponding sub-folder hierarchy in the container's `lib/` folder.
In combination with reusable database modules, the HDI Deployer will also consider database modules which are located in the `node_modules/` folder and which will be mapped to a corresponding sub-folder hierarchy in the container's `lib/` folder.
Note: The design-time files should be protected against unauthorized modifications to guard against unwanted undeployments or deployment of foreign objects. For applications running on XSA or Cloud Foundry, this is taken care of by the platform.
## Delta Deployment and Undeploy Whitelist

@@ -153,3 +352,3 @@

The file must list all artifacts which should be undeployed. The file path of the artifacts must be relative to the root directory of the `db` module, must use the HDI file path delimiter '/', and must be based on the HDI server-side folder structure. In case of resuable database modules, the server-side top-level folder `lib/` needs to be used instead of the local folder `node_modules/`.
The file must list all artifacts which should be undeployed. The file path of the artifacts must be relative to the root directory of the `db` module, must use the HDI file path delimiter '/', and must be based on the HDI server-side folder structure. In case of reusable database modules, the server-side top-level folder `lib/` needs to be used instead of the local folder `node_modules/`.

@@ -190,25 +389,27 @@ For interactive scenarios, it's possible to pass the `auto-undeploy` option to the HDI Deployer, e.g.

{
"hana" : [ {
"name" : "foo",
"label" : "hana",
"tags" : [ "hana", "database", "relational" ],
"plan" : "hdi-shared",
"credentials" : {
"schema" : "FOO",
"driver" : "com.sap.db.jdbc.Driver",
"port" : "30115",
"host" : "srv1234567.host.name",
"db_hosts" : [ {
"port" : 30115,
"host" : "srv1234567.host.name"
} ],
"user" : "SBSS_34599959672902195741875760873853766555404727822156060056836149475",
"password" : "<password>",
"hdi_user" : "SBSS_64592794580116217572062412157356606994624009694957290675610125954",
"hdi_password" : "<password>",
"url" : "jdbc:sap://srv1234567.host.name:30115/?currentschema=FOO"
}
} ]
}
```
{
"hana" : [ {
"name" : "foo",
"label" : "hana",
"tags" : [ "hana", "database", "relational" ],
"plan" : "hdi-shared",
"credentials" : {
"schema" : "FOO",
"user" : "FOO_345999596729_RT",
"password" : "<password>",
"hdi_user" : "FOO_645927945801_DT,
"hdi_password" : "<password>",
"host" : "srv1234567.host.name",
"port" : "30115",
"db_hosts" : [ {
"port" : 30115,
"host" : "srv1234567.host.name"
} ],
"url" : "jdbc:sap://srv1234567.host.name:30115/?currentschema=FOO",
"driver" : "com.sap.db.jdbc.Driver"
}
} ]
}
```

@@ -272,2 +473,10 @@ In order to assign roles from the HDI content to the application binding users (the `user` users), the HDI Deployer implements an automatic assignment of the `default_access_role` role if it is present in the deployed content:

## The development_debug_role Role
Similarly to the default_access_role, a development_debug_role can be used to add additional privileges to the access role. This is only intended for development and debugging, not for productive use!
If a role definition file exists at the path `src/defaults/development_debug_role.hdbrole`, and this file defines a role named `development_debug_role`, and this file is explicitly included in the deployment via the `--deploy` option, then the HDI Deployer grants the deployed `development_debug_role` role to the service instance's global access role (e.g. `FOO::access_role`).
In order to remove the privileges granted this way, the file has to be undeployed.
## Reusable Database Modules

@@ -319,3 +528,3 @@

"dependencies": {
"@sap/hdi-deploy": "3.7.0",
"@sap/hdi-deploy": "3.11.3",
"module1": "1.3.1",

@@ -410,3 +619,3 @@ "module2": "1.7.0"

An HDI container is by default equipped with nearly zero database privileges, e.g. the object owner (`#OO` user) is mainly equipped with the `CREATE ANY` privilege on the container's runtime schema (e.g. schema `FOO` for an HDI contaner `FOO`). Since HANA 2 SPS00, the object owner is equipped with an additional restricted set of privileges for system views in the database's `SYS` schema, e.g. `SYS.VIEWS` or `SYS.TABLES`. These system views apply an additional row-level filter based on the object owner's other privileges, e.g. the object owner can only see metadata in `SYS.VIEWS` for views he has privileges on. So, without additional privileges, the object owner can only see metadata for the objects in his container schema.
An HDI container is by default equipped with nearly zero database privileges, e.g. the object owner (`#OO` user) is mainly equipped with the `CREATE ANY` privilege on the container's runtime schema (e.g. schema `FOO` for an HDI container `FOO`). Since HANA 2 SPS00, the object owner is equipped with an additional restricted set of privileges for system views in the database's `SYS` schema, e.g. `SYS.VIEWS` or `SYS.TABLES`. These system views apply an additional row-level filter based on the object owner's other privileges, e.g. the object owner can only see metadata in `SYS.VIEWS` for views he has privileges on. So, without additional privileges, the object owner can only see metadata for the objects in his container schema.

@@ -427,2 +636,9 @@ In order to access database objects inside other database schemata or other HDI containers, and in order to deploy synonyms into the HDI container which point to these container-external objects, at least the object owner needs additional privileges, e.g. for an object `object` in schema `X` `SELECT` privileges on `X.object`:

#### .hdbrevokes Files
Starting with version 3.8, the deployer now allows revoking rights. Anything that can be granted via .hdbgrants can be revoked via .hdbrevokes. Both files, .hdbgrants and .hdbrevokes use the same structure.
For more information on the structure of these files, see the section about [.hdbgrants Files](#hdbgrants-files).
The .hdbrevokes file will be processed before the .hdbgrants file.
#### .hdbgrants Files

@@ -559,6 +775,6 @@

- `"schema"`: The database schema that contains the objects to which access is to be granted.
- `"type"`: The type of the grantor mechanism; valid values are `"hdi"`, `"sql"`, or `"procedure"`. If the type is specified, then the type is auto-sensed (see details below).
- `"type"`: The type of the grantor mechanism; valid values are `"hdi"`, `"sql"`, or `"procedure"`. If the type is not specified, then the type is auto-sensed (see details below).
- Use the command `xs services` to display a list of services available in the current space; the 'grantor-service' service should be visible.
For Clound Foundry, use the corresponding `cf` commands.
For Cloud Foundry, use the corresponding `cf` commands.

@@ -571,3 +787,3 @@ Note: Starting with version 3.0.0 of the HDI Deployer, the `"host"`, `"port"`, and `"certificate"` parameters are no longer required since they can be obtained from the target container binding. In this case, you must only specify the `"user"`, `"password"`, and `"schema"` when creating the user-provided service, e.g. `xs cups grantor-service -p '{ "user": "TARGET_USER", "password": "Grant_123", "schema": "TARGET_SCHEMA", "tags": [ "hana" ] }'`.

- At the datababase, a GRANT procedure must exist (or be visible) in the schema which is used in the user-provided service; an example is shown below.
- At the database, a GRANT procedure must exist (or be visible) in the schema which is used in the user-provided service; an example is shown below.
- The technical database user must have EXECUTE privileges on the GRANT procedure.

@@ -732,3 +948,3 @@ - The name of the GRANT procedure must be specified in the user-provided service in the `"procedure"` field, e.g. `"procedure": "GRANT"`.

- `TARGET_CONTAINER`: (optional) service name that specifies the HDI target container (needed, if more than one service is bound to the HDI Deployer)
- `TARGET_CONTAINER`: (optional) service name that specifies the HDI target container (needed, if more than one HDI service is bound to the HDI Deployer)
- `SERVICE_REPLACEMENTS`: (optional) JSON-structured list of service replacements, e.g. `[ { "key": "logical-service-name-1", "service":"real-service-name-1"}, { "key": "logical-service-name-2", "service":"real-service-name-2"} ]`, where the logical service names refer to the names in the HDI content and the real service names refer to the services which are bound to the HDI Deployer via `VCAP_SERVICES`; if the HDI content references a service name which is not listed in the replacements, then this name is used as a real service name

@@ -743,2 +959,3 @@

path: db
health-check-type: process
services:

@@ -766,5 +983,10 @@ - app-database

- `EXIT`: (optional) if set, the HDI Deployer will exit when the deployment is done; using the environment variable is equivalent to passing a `--exit` on the command line
- `DEPLOY_ID`: (optional) if set, the given id will be written to the final application log entry (custom id, to support processes in parsing log output
- `HDI_DEPLOY_OPTIONS`: (optional) JSON-structured set of options for the HDI Deployer, e.g. `{ "auto_undeploy" : true, "exit" : true, "root" : "/volumes/A/workspaces/B/db/", "include_filter" : [ "src/", "cfg/" ] }`
- `HDI_DEPLOY_OPTIONS`: (optional) JSON-structured set of options for the HDI Deployer, e.g. `{ "auto_undeploy" : true, "exit" : true, "root" : "/volumes/A/workspaces/B/db/", "include_filter" : [ "src/", "cfg/" ] }`; command line options can be translated to `HDI_DEPLOY_OPTIONS` options by replacing the `-`s in the option names with `_`s; options which can accept multiple values require a JSON array with the values, e.g. path options like the include-filter option.
- `APPLICATION_ID`: (optional, fallback `SAP_HDI`) this will be used, in conjunction with the `space_name` and the `organization_name` of the `VCAP_APPLICATION` to set the session variable `APPLICATION` for all connections to the database. This setting may only be used by applications from SAP.
- `APPLICATION_VERSION_INFO`: (optional) this will be logged to the command line, to allow logging of some additional information about the application.
Options from `HDI_DEPLOY_OPTIONS` override options which are passed on the command line.
## Ignore List

@@ -795,2 +1017,3 @@ The hdi deployer supports ignoring certain files via an `.hdiignore` file. The file has to be placed at the root of the project folder, just like the `undeploy.json`.

- `--parameter [<key>=<value> ..]`: pass the given list of key-value parameters to the deployment
- `--path-parameter [<path>:<key>=<value> ..]`: pass the given list of path-key-value parameters to the deployment
- `--[no-]auto-undeploy`: [don't] undeploy artifacts automatically based on delta detection and ignore the `undeploy.json` file

@@ -802,2 +1025,6 @@ - `--[no-]treat-warnings-as-errors`: [don't] treat warnings as errors

- `--exclude-filter [<path> ..]`: exclude the given paths during: file walk, delta detection and when explicitly scheduled via --(un)deploy
- `--[no-]treat-wrong-ownership-as-errors`: [don't] treat wrong ownership of objects as errors, not enabled by default
- `--[no-]migrationtable-development-mode`: [don't] pass the development mode flag for migration tables to HDI, if the parameter is supported by the server, not enabled by default
- `--[no-]liveness-ping`: [don't] send a sign of life from time to time, by default, a sign of life will be sent
- ` --[no-]live-messages`: [don't] display the make messages while the make is still in progress, by default, the messages will be displayed while the make is in progress

@@ -835,3 +1062,3 @@ See `--help` for details and defaults.

"name": "@sap/hdi-deploy",
"version": "3.7.0",
"version": "3.11.3",
"features": {

@@ -850,2 +1077,3 @@ "info": 2,

"parameter": 1,
"path-parameter": 1,
"treat-warnings-as-errors": 1,

@@ -893,3 +1121,3 @@ "simulate-make": 1,

- `deployerEnv`: javascript object containing the OS environment for the call to the deployer (e.g. containing VCAP_SERVICES)
- `callback`: a callback for the result of the call to the deployer accepting a response parameter of the form:
- `callback`: a standard callback of the form (errors, result), where result is the result of the call to the deployer of the form:

@@ -899,3 +1127,4 @@ ```

messages: [<list of result messages from the di server>],
exitCode: <exit code of the call to the deployer app>
exitCode: <exit code of the call to the deployer app, one of -1, 0, 1.>,
signal: <signal that the child process was closed with>
}

@@ -905,1 +1134,9 @@ ```

- `io` (optional): javascript object containing two callback functions `io.stdoutCB` and `io.stderrCB` of the form `function(data)` for streaming stdout and stderr of the call to the deployer, defaults to piping stdout and stderr of the deployer to stdout and stderr of the calling Node.js app
The exit codes have different meanings:
- -1: The child process was most likely killed externally, check the signal property for details.
- 0: Deployment done succesfully.
- 1: Deployment failed, errors occurred.
The signal property is only set, if exitCode is -1.

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc