Socket
Socket
Sign inDemoInstall

@sap/hdi-deploy

Package Overview
Dependencies
Maintainers
3
Versions
60
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@sap/hdi-deploy - npm Package Compare versions

Comparing version 3.3.3 to 3.6.0

CHANGELOG.md

15

deploy.js

@@ -97,2 +97,4 @@ 'use strict';

args.translateJSONEnvStringArrayOptionToOption(logger, options, name, option, process.argv);
} else if (option === 'treat_unmodified_as_modified') {
args.translateJSONEnvBooleanOptionToOption(logger, options, name, option, process.argv);
} else if (option === 'undeploy') {

@@ -182,2 +184,4 @@ args.translateJSONEnvStringArrayOptionToOption(logger, options, name, option, process.argv);

" --deploy [<file> ..] explicitly schedule the given files for deploy; extends the include-filter for collecting local files",
" --[no-]treat-unmodified-as-modified",
" [don't] treat unmodified files during delta detection as modified files",
" --undeploy [<file> ..] explicitly schedule the given files for undeploy",

@@ -239,2 +243,6 @@ " --[no-]auto-undeploy [don't] undeploy artifacts automatically based on delta detection and ignore the undeploy.json file",

i = args.translatePathListOptionToPathFilter(i, process.argv, opt.deploy, checkPathInDeployOption);
} else if (arg === '--treat-unmodified-as-modified') {
opt.treatUnmodifiedAsModified = true;
} else if (arg === '--no-treat-unmodified-as-modified') {
opt.treatUnmodifiedAsModified = false;
} else if (arg === '--undeploy') {

@@ -521,3 +529,8 @@ i = args.translatePathListOptionToPathFilter(i, process.argv, opt.undeploy, checkPathInUndeployOption);

var deploy = require('./lib/deploy.js');
deploy(opt, services, cb);
if (process.env.HDI_DEPLOY_MODE === "ZDM") {
var zdmDeployer = require('./lib/zdm/zdmDeployer.js');
zdmDeployer.deploy(deploy, opt, services, logger, cb);
} else {
deploy(opt, services, cb);
}
} catch (error) {

@@ -524,0 +537,0 @@ cb(error.message);

11

lib/content.js
'use strict';
/* jslint indent: 4 */
/* jslint indent: 4, esversion: 6 */

@@ -196,4 +196,9 @@ var fs = require('fs');

// file needs to be added
files.push(file);
// file needs to be added
if(file.endsWith('.hdbsynonymtemplate')){
logger.log(`File ${file} is using old-style .hdbsynonymtemplate. Please switch to .hdbsynonymconfig`);
} else if(file.endsWith('.hdbsynonymgrantor')){
logger.log(`File ${file} is using old-style .hdbsynonymgrantor. Please switch to .hdbgrants`);
}
files.push(file);
return true;

@@ -200,0 +205,0 @@ }

@@ -5,55 +5,69 @@ 'use strict';

var async = require('async');
const async = require('async');
var content = require('./content.js');
var hdi = require('./hdi.js');
var logger = require('./logger.js');
var privileges = require('./privileges.js');
var filters = require('./filters.js');
const content = require('./content.js');
const hdi = require('./hdi.js');
const logger = require('./logger.js');
const privileges = require('./privileges.js');
const filters = require('./filters.js');
/**
* Run a deployment, first grants privileges, then runs the actual deployment.
*
* @param {any} options Options for the deployment.
* @param {ServiceAccessor} services Services for the deployment.
* @param {function} cb Nodeback
* @returns {(undefined | function)} Either undefined or function, if we exit early.
*/
module.exports = function(options, services, cb) {
var contentPathsFilter = filters.invalidPathFilter();
if (options.includeFilter.valid) {
// an include-filter is given, merge the include-filter and the deploy options to ensure that we upload all files which are included in the deploy option;
// if no include-filter is given, we don't need to care about the deploy option during the file walk
contentPathsFilter.addPaths(options.includeFilter);
contentPathsFilter.addPaths(options.deploy);
}
const contentPathsFilter = filters.invalidPathFilter();
if (options.includeFilter.valid) {
/*
* an include-filter is given, merge the include-filter and the deploy options to ensure that we upload all files which are included in the deploy option;
* if no include-filter is given, we don't need to care about the deploy option during the file walk
*/
contentPathsFilter.addPaths(options.includeFilter);
contentPathsFilter.addPaths(options.deploy);
}
var
cnt = content(options.root, services, options.workingSet, options.deployDirs, contentPathsFilter, options.stripCRFromCSV),
tasks = [],
targetCreds,
container,
containerRole;
const cnt = content(options.root, services, options.workingSet, options.deployDirs, contentPathsFilter, options.stripCRFromCSV);
const tasks = [];
logger.trace('VCAP_SERVICES:', process.env.VCAP_SERVICES);
try {
const target_service = services.getTarget();
logger.log('Target service:', target_service.name);
} catch (e){
// Ignore errors.
}
logger.trace('top directories: ', cnt.serverTopDirs());
logger.trace('VCAP_SERVICES:', process.env.VCAP_SERVICES);
logger.trace('deploy files: ', cnt.deployFiles());
logger.trace('top directories: ', cnt.serverTopDirs());
targetCreds = services.getTargetCreds();
logger.trace('target credentials:', targetCreds);
logger.trace('deploy files: ', cnt.deployFiles());
container = targetCreds.schema + '#OO';
containerRole = targetCreds.schema + '::access_role';
const targetCreds = services.getTargetCreds();
logger.trace('target credentials:', targetCreds);
// if we have a default_access_role file in the processing set, check its content
if (cnt.containsDefaultAccessRoleFile()) {
var checkDefaultAccessRoleFileResult = cnt.checkDefaultAccessRoleFile();
if (checkDefaultAccessRoleFileResult !== undefined) {
cb(checkDefaultAccessRoleFileResult);
}
const container = `${targetCreds.schema }#OO`;
const containerRole = `${targetCreds.schema }::access_role`;
// if we have a default_access_role file in the processing set, check its content
if (cnt.containsDefaultAccessRoleFile()) {
const checkDefaultAccessRoleFileResult = cnt.checkDefaultAccessRoleFile();
if (checkDefaultAccessRoleFileResult !== undefined) {
return cb(checkDefaultAccessRoleFileResult);
}
}
tasks.push(function(cb) {
privileges.grant(options, services, cnt, container, containerRole, cb);
});
tasks.push(function(callback) {
privileges.grant(options, services, cnt, container, containerRole, callback);
});
tasks.push(function(cb) {
hdi.deploy(options, targetCreds, cnt, cb);
});
tasks.push(function(callback) {
hdi.deploy(options, targetCreds, cnt, callback);
});
async.series(tasks, cb);
async.series(tasks, cb);
};

@@ -16,6 +16,7 @@ 'use strict';

'default-access-role': 1,
'grants': 3,
'grants': 4,
'working-set': 1,
'include-filter': 1,
'deploy': 1,
'treat-unmodified-as-modified': 1,
'undeploy': 1,

@@ -22,0 +23,0 @@ 'parameter': 1 * sinceServerVersion_1_0_120_0,

@@ -192,6 +192,3 @@ 'use strict';

var client;
if (host) {
client = hdb.createClient(credentials);
}
var client = hdb.createClient(credentials);

@@ -337,2 +334,4 @@ function formatSqlError(sql, err) {

quotedSQLIdentifier: identifier,
execute: execute,

@@ -339,0 +338,0 @@

@@ -70,4 +70,10 @@ 'use strict';

function prepareMake(result) {
deployFiles = result.filter(function(item) { return item.STATUS === 'A' || item.STATUS === 'M'; })
.map(function(item) { return item.PATH; });
if (options.treatUnmodifiedAsModified) {
// schedule all locally collected files for deploy; this maps to Added, Modified, or Unmodified
deployFiles = content.deployFiles().map(function(item) { return item[0]; });
} else {
// schedule the Added or Modified files for deploy
deployFiles = result.filter(function(item) { return item.STATUS === 'A' || item.STATUS === 'M'; })
.map(function(item) { return item.PATH; });
}
undeployFiles = result.filter(function(item) { return item.STATUS === 'D'; })

@@ -104,3 +110,7 @@ .map(function(item) { return item.PATH; });

logger.log(deployFiles.length + " modified or added files are scheduled for deploy based on delta detection");
if (options.treatUnmodifiedAsModified) {
logger.log(deployFiles.length + " modified, unmodified, or added files are scheduled for deploy");
} else {
logger.log(deployFiles.length + " modified or added files are scheduled for deploy based on delta detection");
}
logger.log(undeployFiles.length + " deleted files are scheduled for undeploy based on delta detection (filtered by undeploy whitelist)");

@@ -237,30 +247,52 @@

var defaultAccessRoleName = 'default_access_role';
var containerAccessRoleName = creds.schema + "::access_role";
if (content.containsDefaultAccessRoleFile()) {
if (options.simulateMake) {
tasks.push(logger.logfn('Default-access-role file "src/defaults/default_access_role.hdbrole" found and scheduled for deploy, but simulate-make option was given; global role "%s" will not be adapted', containerAccessRoleName));
tasks.push(function(callback) {
var defaultAccessRoleFile = 'src/defaults/default_access_role.hdbrole';
var defaultAccessRoleName = 'default_access_role';
var containerAccessRoleName = creds.schema + "::access_role";
var defaultPermissionSet = [
['CREATE TEMPORARY TABLE', '', containerAccessRoleName],
['DELETE', '', containerAccessRoleName],
['EXECUTE', '', containerAccessRoleName],
['INSERT', '', containerAccessRoleName],
['SELECT', '', containerAccessRoleName],
['SELECT CDS METADATA', '', containerAccessRoleName],
['UPDATE', '', containerAccessRoleName]
];
if (undeployFiles.indexOf(defaultAccessRoleFile) !== -1 && deployFiles.indexOf(defaultAccessRoleFile) === -1) {
if (options.simulateMake) {
logger.log('Default-access-role file "src/defaults/default_access_role.hdbrole" undeployed, but simulate-make option was given; global role "%s" will not be adapted', containerAccessRoleName);
callback();
} else {
async.series([
logger.logfn('Default-access-role file "src/defaults/default_access_role.hdbrole" undeployed; global role "%s" will be adapted', containerAccessRoleName),
logger.logfnTimerInit('regrant-default-permissions', 'Regranting default permission set to global role "%s"...', containerAccessRoleName),
function(cb) { client.grantContainerSchemaPrivileges(defaultPermissionSet, {}, cb); },
logger.logfnTimerDelta('regrant-default-permissions', 'Regranting default permission set to global role "%s"... ok', containerAccessRoleName)
], callback);
}
} else {
var defaultPermissionSet = [
['CREATE TEMPORARY TABLE', '', containerAccessRoleName],
['DELETE', '', containerAccessRoleName],
['EXECUTE', '', containerAccessRoleName],
['INSERT', '', containerAccessRoleName],
['SELECT', '', containerAccessRoleName],
['SELECT CDS METADATA', '', containerAccessRoleName],
['UPDATE', '', containerAccessRoleName]
];
if (content.containsDefaultAccessRoleFile()) {
if (options.simulateMake) {
logger.log('Default-access-role file "src/defaults/default_access_role.hdbrole" scheduled for deploy, but simulate-make option was given; global role "%s" will not be adapted', containerAccessRoleName);
callback();
} else {
async.series([
logger.logfn('Default-access-role file "src/defaults/default_access_role.hdbrole" scheduled for deploy; global role "%s" will be adapted', containerAccessRoleName),
logger.logfnTimerInit('grant-default-role', 'Granting container-local default access role "%s"."%s" to global role "%s"...', creds.schema, defaultAccessRoleName, containerAccessRoleName),
function(cb) { client.grantContainerSchemaRoles([[defaultAccessRoleName, '', containerAccessRoleName]], {}, cb); },
logger.logfnTimerDelta('grant-default-role', 'Granting container-local default access role "%s"."%s" to global role "%s"... ok', creds.schema, defaultAccessRoleName, containerAccessRoleName),
tasks.push(logger.logfn('Default-access-role file "src/defaults/default_access_role.hdbrole" found and scheduled for deploy; global role "%s" will be adapted', containerAccessRoleName));
tasks.push(logger.logfnTimerInit('grant-default-role', 'Granting container-local default access role "%s"."%s" to global role "%s"...', creds.schema, defaultAccessRoleName, containerAccessRoleName));
tasks.push(function(callback) { client.grantContainerSchemaRoles([[defaultAccessRoleName, '', containerAccessRoleName]], {}, callback); });
tasks.push(logger.logfnTimerDelta('grant-default-role', 'Granting container-local default access role "%s"."%s" to global role "%s"... ok', creds.schema, defaultAccessRoleName, containerAccessRoleName));
tasks.push(logger.logfnTimerInit('revoke-default-permissions', 'Revoking default permission set from global role "%s"...', containerAccessRoleName));
tasks.push(function(callback) { client.revokeContainerSchemaPrivileges(defaultPermissionSet, {}, callback); });
tasks.push(logger.logfnTimerDelta('revoke-default-permissions', 'Revoking default permission set from global role "%s"... ok', containerAccessRoleName));
logger.logfnTimerInit('revoke-default-permissions', 'Revoking default permission set from global role "%s"...', containerAccessRoleName),
function(cb) { client.revokeContainerSchemaPrivileges(defaultPermissionSet, {}, cb); },
logger.logfnTimerDelta('revoke-default-permissions', 'Revoking default permission set from global role "%s"... ok', containerAccessRoleName)
], callback);
}
} else {
logger.log('No default-access-role handling needed; global role "%s" will not be adapted', containerAccessRoleName);
callback();
}
}
} else {
tasks.push(logger.logfn('Default-access-role file "src/defaults/default_access_role.hdbrole" not found or not scheduled for deploy; global role "%s" will not be adapted', containerAccessRoleName));
}
});

@@ -267,0 +299,0 @@ if (options.lockContainer) {

@@ -19,2 +19,3 @@ 'use strict';

deploy: filters.invalidPathFilter(),
treatUnmodifiedAsModified: false,
undeploy: filters.invalidPathFilter(),

@@ -21,0 +22,0 @@ parameters: {},

@@ -24,152 +24,329 @@ 'use strict';

function grantPrivileges(client, grantor_is_hdi_container, privileges, grantee, grantor_schema, grantor_remote, cb) {
var tasks = [];
var container_roles = [];
function createSQLGrantorStrategy(client, tasks) {
return {
client: client,
tasks: tasks,
initialize: function() {
},
finalize: function() {
},
grantSystemPrivileges: function(privileges, grantee, grantable) {
tasks.push(client.grantSystemPrivileges(privileges, grantee, grantable));
},
grantSchemaPrivileges: function(privileges, schema, grantee, grantable) {
tasks.push(client.grantSchemaPrivileges(privileges, schema, grantee, grantable));
},
grantGlobalObjectPrivileges: function(privileges, name, type, grantee, grantable) {
tasks.push(client.grantGlobalObjectPrivileges(privileges, name, type, grantee, grantable));
},
grantSchemaObjectPrivileges: function(privileges, schema, name, grantee, grantable) {
tasks.push(client.grantObjectPrivileges(privileges, schema, name, grantee, grantable));
},
grantGlobalRoles: function(roles, grantee, grantable) {
tasks.push(client.grantRoles(roles, grantee, grantable));
},
grantSchemaRoles: function(schema, roles, grantee, grantable) {
tasks.push(client.grantSchemaRoles(schema, roles, grantee, grantable));
}
};
}
// the rule for schema selection is:
// 1. obj.schema
// 2. obj.reference, only used for schema_privileges
// 3. grantor_schema
function createHDIContainerGrantorStrategy(client, tasks, grantor_schema) {
return {
client: client,
tasks: tasks,
grantor_schema: grantor_schema,
container_roles: [],
initialize: function() {
this.container_roles = [];
},
finalize: function() {
if (this.container_roles.length > 0) {
// grant all container roles which were collected into container_roles
var tempTablesForContainerRoles = [
['#CONTAINER_ROLES_PARAMETERS', hdiTables.parameters.type],
['#CONTAINER_ROLES', hdiTables.schemaRoles.type]
];
if (privileges.roles) {
// roles is supported for backwards compatibility
// string format: "roles": [ "X", "Y" ]
// object format: "roles": [ { "names": [ "X", "Y" ], "roles": [ "X", "Y" ], "roles_with_admin_option": [ "A", "B" ] } ]
var string_format_roles = [];
privileges.roles.forEach(function(obj) {
if (typeof obj === 'string') {
string_format_roles.push(obj);
} else {
if (obj.names) {
tasks.push(client.grantRoles(obj.names, grantee, false));
}
if (obj.roles) {
tasks.push(client.grantRoles(obj.roles, grantee, false));
}
if (obj.roles_with_admin_option) {
tasks.push(client.grantRoles(obj.roles_with_admin_option, grantee, true));
}
}
});
tasks.push(client.createTmpTables(tempTablesForContainerRoles));
if (string_format_roles.length > 0) {
tasks.push(client.grantRoles(string_format_roles, grantee, false));
}
}
tasks.push(client.bulkInsert('#CONTAINER_ROLES', hdiTables.schemaRoles.fields, this.container_roles));
tasks.push(client.hdiGrantSchemaRoles(grantor_schema, '#CONTAINER_ROLES', '#CONTAINER_ROLES_PARAMETERS', client.hdiCheckResult('grant container roles', true)));
if (privileges.global_roles) {
// global_roles is supported for symmetry with hdbrole
// string format: "global_roles": [ "X", "Y" ]
// object format: "global_roles": [ { "names": [ "X", "Y" ], "roles": [ "X", "Y" ], "roles_with_admin_option": [ "A", "B" ] } ]
var string_format_global_roles = [];
privileges.global_roles.forEach(function(obj) {
if (typeof obj === 'string') {
string_format_global_roles.push(obj);
} else {
if (obj.names) {
tasks.push(client.grantRoles(obj.names, grantee, false));
}
if (obj.roles) {
tasks.push(client.grantRoles(obj.roles, grantee, false));
}
if (obj.roles_with_admin_option) {
tasks.push(client.grantRoles(obj.roles_with_admin_option, grantee, true));
}
tasks.push(client.dropTmpTables(tempTablesForContainerRoles));
}
});
if (string_format_global_roles.length > 0) {
tasks.push(client.grantRoles(string_format_global_roles, grantee, false));
},
grantSystemPrivileges: function() {
throw new Error('system privileges are not supported in case of an HDI container service binding');
},
grantSchemaPrivileges: function() {
throw new Error('schema privileges are not supported in case of an HDI container service binding');
},
grantGlobalObjectPrivileges: function() {
throw new Error('global object privileges are not supported in case of an HDI container service binding');
},
grantSchemaObjectPrivileges: function() {
throw new Error('object privileges are not supported in case of an HDI container service binding');
},
grantSchemaRoles: function(schema, roles, grantee, grantable) {
if (schema !== this.grantor_schema) {
throw new Error('schema is not supported for schema roles in case of an HDI container service binding');
}
if (grantable) {
throw new Error('schema roles with admin option are not supported in case of an HDI container service binding');
}
var container_roles = this.container_roles;
roles.forEach(function(role) {
container_roles.push([role, grantee]);
});
},
grantGlobalRoles: function() {
throw new Error('global roles are not supported in case of an HDI container service binding');
}
}
};
}
if (privileges.system_privileges) {
// string format: "system_privileges": [ "X", "Y" ]
// object format: "system_privileges": [ { "privileges": [ "X", "Y" ], "privileges_with_admin_option": [ "A", "B" ] } ]
var string_format_privileges = [];
privileges.system_privileges.forEach(function(obj) {
if (typeof obj === 'string') {
string_format_privileges.push(obj);
} else {
if (obj.privileges) {
tasks.push(client.grantSystemPrivileges(obj.privileges, grantee, false));
}
if (obj.privileges_with_admin_option) {
tasks.push(client.grantSystemPrivileges(obj.privileges_with_admin_option, grantee, true));
}
function createProcedureGrantorStrategy(client, tasks, grantor_procedure, grantor_procedure_schema) {
return {
client: client,
tasks: tasks,
grantor_procedure: grantor_procedure,
grantor_procedure_schema: grantor_procedure_schema,
grant_privileges: [],
initialize: function() {
this.privileges = [];
},
finalize: function() {
if (this.grant_privileges.length > 0) {
tasks.push(client.execute('CREATE LOCAL TEMPORARY COLUMN TABLE #PRIVILEGES (PRIVILEGE_TYPE NVARCHAR(128), PRIVILEGE_NAME NVARCHAR(256), OBJECT_SCHEMA NVARCHAR(256), OBJECT_NAME NVARCHAR(256), OBJECT_TYPE NVARCHAR(128), GRANTEE_SCHEMA NVARCHAR(256), GRANTEE_NAME NVARCHAR(256), GRANTABLE NVARCHAR(5))'));
tasks.push(client.bulkInsert('#PRIVILEGES', [ 'PRIVILEGE_TYPE', 'PRIVILEGE_NAME', 'OBJECT_SCHEMA', 'OBJECT_NAME', 'OBJECT_TYPE', 'GRANTEE_SCHEMA', 'GRANTEE_NAME', 'GRANTABLE' ], this.grant_privileges));
var schema_prefix = this.grantor_procedure_schema ? client.quotedSQLIdentifier(this.grantor_procedure_schema) + '.' : '';
tasks.push(client.execute('CALL ' + schema_prefix + client.quotedSQLIdentifier(this.grantor_procedure) + '(#PRIVILEGES)'));
tasks.push(client.execute('DROP TABLE #PRIVILEGES'));
}
});
},
grantSystemPrivileges: function(privileges, grantee, grantable) {
privileges.forEach(function (privilege) {
this.grant_privileges.push([
'SYSTEM_PRIVILEGE',
privilege,
null,
null,
null,
null,
grantee,
grantable ? 'TRUE' : 'FALSE'
]);
}.bind(this));
},
grantSchemaPrivileges: function(privileges, schema, grantee, grantable) {
privileges.forEach(function (privilege) {
this.grant_privileges.push([
'SCHEMA_PRIVILEGE',
privilege,
null,
schema,
null,
null,
grantee,
grantable ? 'TRUE' : 'FALSE'
]);
}.bind(this));
},
grantGlobalObjectPrivileges: function(privileges, name, type, grantee, grantable) {
privileges.forEach(function (privilege) {
this.grant_privileges.push([
'GLOBAL_OBJECT_PRIVILEGE',
privilege,
null,
name,
type,
null,
grantee,
grantable ? 'TRUE' : 'FALSE'
]);
}.bind(this));
},
grantSchemaObjectPrivileges: function(privileges, schema, name, grantee, grantable) {
privileges.forEach(function (privilege) {
this.grant_privileges.push([
'SCHEMA_OBJECT_PRIVILEGE',
privilege,
schema,
name,
null,
null,
grantee,
grantable ? 'TRUE' : 'FALSE'
]);
}.bind(this));
},
grantSchemaRoles: function(schema, roles, grantee, grantable) {
roles.forEach(function (role) {
this.grant_privileges.push([
'SCHEMA_ROLE',
null,
schema,
role,
null,
null,
grantee,
grantable ? 'TRUE' : 'FALSE'
]);
}.bind(this));
},
grantGlobalRoles: function(roles, grantee, grantable) {
roles.forEach(function (role) {
this.grant_privileges.push([
'GLOBAL_ROLE',
null,
null,
role,
null,
null,
grantee,
grantable ? 'TRUE' : 'FALSE'
]);
}.bind(this));
}
};
}
if (string_format_privileges.length > 0) {
tasks.push(client.grantSystemPrivileges(string_format_privileges, grantee, false));
function grantPrivileges(client, grantor_type, privileges, grantee, grantor_schema, grantor_remote, grantor_procedure, grantor_procedure_schema, cb) {
try {
var tasks = [];
var grantorStrategy;
if (grantor_type === 'hdi') {
grantorStrategy = createHDIContainerGrantorStrategy(client, tasks, grantor_schema);
} else if (grantor_type === 'procedure') {
grantorStrategy = createProcedureGrantorStrategy(client, tasks, grantor_procedure, grantor_procedure_schema);
} else {
grantorStrategy = createSQLGrantorStrategy(client, tasks);
}
}
if (privileges.schema_privileges) {
privileges.schema_privileges.forEach(function(obj) {
if (obj.privileges) {
tasks.push(client.grantSchemaPrivileges(obj.privileges, selectSchema(obj.schema, obj.reference, grantor_schema), grantee, false));
}
if (obj.privileges_with_grant_option) {
tasks.push(client.grantSchemaPrivileges(obj.privileges_with_grant_option, selectSchema(obj.schema, obj.reference, grantor_schema), grantee, true));
}
});
}
grantorStrategy.initialize();
if (privileges.object_privileges) {
privileges.object_privileges.forEach(function(obj) {
if (obj.privileges) {
tasks.push(client.grantObjectPrivileges(obj.privileges, selectSchema(obj.schema, grantor_schema), obj.name, grantee, false));
}
if (obj.privileges_with_grant_option) {
tasks.push(client.grantObjectPrivileges(obj.privileges_with_grant_option, selectSchema(obj.schema, grantor_schema), obj.name, grantee, true));
}
});
}
// the rule for schema selection is:
// 1. obj.schema
// 2. obj.reference, only used for schema_privileges
// 3. grantor_schema
if (privileges.global_object_privileges) {
privileges.global_object_privileges.forEach(function(obj) {
var name = obj.name;
if (typeof name === 'undefined' && obj.type === 'REMOTE SOURCE') {
name = grantor_remote;
}
if (privileges.roles) {
// roles is supported for backwards compatibility
// string format: "roles": [ "X", "Y" ]
// object format: "roles": [ { "names": [ "X", "Y" ], "roles": [ "X", "Y" ], "roles_with_admin_option": [ "A", "B" ] } ]
var string_format_roles = [];
privileges.roles.forEach(function(obj) {
if (typeof obj === 'string') {
string_format_roles.push(obj);
} else {
if (obj.names) {
grantorStrategy.grantGlobalRoles(obj.names, grantee, false);
}
if (obj.roles) {
grantorStrategy.grantGlobalRoles(obj.roles, grantee, false);
}
if (obj.roles_with_admin_option) {
grantorStrategy.grantGlobalRoles(obj.roles_with_admin_option, grantee, true);
}
}
});
if (obj.privileges) {
tasks.push(client.grantGlobalObjectPrivileges(obj.privileges, name, obj.type, grantee, false));
if (string_format_roles.length > 0) {
grantorStrategy.grantGlobalRoles(string_format_roles, grantee, false);
}
if (obj.privileges_with_grant_option) {
tasks.push(client.grantGlobalObjectPrivileges(obj.privileges_with_grant_option, name, obj.type, grantee, true));
}
});
}
}
if (privileges.schema_roles) {
// string format: "schema_roles": [ "X", "Y" ]
// object format: "schema_roles": [ { "names": [ "X", "Y" ], "roles": [ "X", "Y" ], "roles_with_admin_option": [ "A", "B" ] } ]
if (grantor_is_hdi_container) {
privileges.schema_roles.forEach(function(obj) {
if (obj.schema) {
cb(new Error('"schema" is not supported in "schema_roles" in case of an HDI container service binding'));
return;
}
if (privileges.global_roles) {
// global_roles is supported for symmetry with hdbrole
// string format: "global_roles": [ "X", "Y" ]
// object format: "global_roles": [ { "names": [ "X", "Y" ], "roles": [ "X", "Y" ], "roles_with_admin_option": [ "A", "B" ] } ]
var string_format_global_roles = [];
privileges.global_roles.forEach(function(obj) {
if (typeof obj === 'string') {
container_roles.push([obj, grantee]);
string_format_global_roles.push(obj);
} else {
if (obj.names) {
obj.names.forEach(function(role) {
container_roles.push([role, grantee]);
});
grantorStrategy.grantGlobalRoles(obj.names, grantee, false);
}
if (obj.roles) {
obj.roles.forEach(function(role) {
container_roles.push([role, grantee]);
});
grantorStrategy.grantGlobalRoles(obj.roles, grantee, false);
}
if (obj.roles_with_admin_option) {
cb(new Error('"roles_with_admin_option" is not supported in "schema_roles" in case of an HDI container service binding'));
return;
grantorStrategy.grantGlobalRoles(obj.roles_with_admin_option, grantee, true);
}
}
});
} else {
if (string_format_global_roles.length > 0) {
grantorStrategy.grantGlobalRoles(string_format_global_roles, grantee, false);
}
}
if (privileges.system_privileges) {
// string format: "system_privileges": [ "X", "Y" ]
// object format: "system_privileges": [ { "privileges": [ "X", "Y" ], "privileges_with_admin_option": [ "A", "B" ] } ]
var string_format_privileges = [];
privileges.system_privileges.forEach(function(obj) {
if (typeof obj === 'string') {
string_format_privileges.push(obj);
} else {
if (obj.privileges) {
grantorStrategy.grantSystemPrivileges(obj.privileges, grantee, false);
}
if (obj.privileges_with_admin_option) {
grantorStrategy.grantSystemPrivileges(obj.privileges_with_admin_option, grantee, true);
}
}
});
if (string_format_privileges.length > 0) {
grantorStrategy.grantSystemPrivileges(string_format_privileges, grantee, false);
}
}
if (privileges.schema_privileges) {
privileges.schema_privileges.forEach(function(obj) {
if (obj.privileges) {
grantorStrategy.grantSchemaPrivileges(obj.privileges, selectSchema(obj.schema, obj.reference, grantor_schema), grantee, false);
}
if (obj.privileges_with_grant_option) {
grantorStrategy.grantSchemaPrivileges(obj.privileges_with_grant_option, selectSchema(obj.schema, obj.reference, grantor_schema), grantee, true);
}
});
}
if (privileges.object_privileges) {
privileges.object_privileges.forEach(function(obj) {
if (obj.privileges) {
grantorStrategy.grantSchemaObjectPrivileges(obj.privileges, selectSchema(obj.schema, grantor_schema), obj.name, grantee, false);
}
if (obj.privileges_with_grant_option) {
grantorStrategy.grantSchemaObjectPrivileges(obj.privileges_with_grant_option, selectSchema(obj.schema, grantor_schema), obj.name, grantee, true);
}
});
}
if (privileges.global_object_privileges) {
privileges.global_object_privileges.forEach(function(obj) {
var name = obj.name;
if (typeof name === 'undefined' && obj.type === 'REMOTE SOURCE') {
name = grantor_remote;
}
if (obj.privileges) {
grantorStrategy.grantGlobalObjectPrivileges(obj.privileges, name, obj.type, grantee, false);
}
if (obj.privileges_with_grant_option) {
grantorStrategy.grantGlobalObjectPrivileges(obj.privileges_with_grant_option, name, obj.type, grantee, true);
}
});
}
if (privileges.schema_roles) {
// string format: "schema_roles": [ "X", "Y" ]
// object format: "schema_roles": [ { "names": [ "X", "Y" ], "roles": [ "X", "Y" ], "roles_with_admin_option": [ "A", "B" ] } ]
var string_format_schema_roles = [];

@@ -181,9 +358,9 @@ privileges.schema_roles.forEach(function(obj) {

if (obj.names) {
tasks.push(client.grantSchemaRoles(selectSchema(obj.schema, grantor_schema), obj.names, grantee, false));
grantorStrategy.grantSchemaRoles(selectSchema(obj.schema, grantor_schema), obj.names, grantee, false);
}
if (obj.roles) {
tasks.push(client.grantSchemaRoles(selectSchema(obj.schema, grantor_schema), obj.roles, grantee, false));
grantorStrategy.grantSchemaRoles(selectSchema(obj.schema, grantor_schema), obj.roles, grantee, false);
}
if (obj.roles_with_admin_option) {
tasks.push(client.grantSchemaRoles(selectSchema(obj.schema, grantor_schema), obj.roles_with_admin_option, grantee, true));
grantorStrategy.grantSchemaRoles(selectSchema(obj.schema, grantor_schema), obj.roles_with_admin_option, grantee, true);
}

@@ -194,130 +371,148 @@ }

if (string_format_schema_roles.length > 0) {
tasks.push(client.grantSchemaRoles(grantor_schema, string_format_schema_roles, grantee, false));
grantorStrategy.grantSchemaRoles(grantor_schema, string_format_schema_roles, grantee, false);
}
}
}
if (privileges.container_roles) {
// collect container roles into container_roles
privileges.container_roles.forEach(function(role) {
container_roles.push([role, grantee]);
});
}
if (privileges.container_roles) {
grantorStrategy.grantSchemaRoles(grantor_schema, privileges.container_roles, grantee, false);
}
if (container_roles.length > 0) {
// grant all container roles which were collected into container_roles
var tempTablesForContainerRoles = [
['#CONTAINER_ROLES_PARAMETERS', hdiTables.parameters.type],
['#CONTAINER_ROLES', hdiTables.schemaRoles.type]
];
grantorStrategy.finalize();
tasks.push(client.createTmpTables(tempTablesForContainerRoles));
tasks.push(client.bulkInsert('#CONTAINER_ROLES', hdiTables.schemaRoles.fields, container_roles));
tasks.push(client.hdiGrantSchemaRoles(grantor_schema, '#CONTAINER_ROLES', '#CONTAINER_ROLES_PARAMETERS', client.hdiCheckResult('grant container roles', true)));
tasks.push(client.dropTmpTables(tempTablesForContainerRoles));
async.series(tasks, cb);
} catch (err) {
cb(err);
}
async.series(tasks, cb);
}
function grantUsers(privileges, grantor, fileName, creds, targetCreds, container, user, cb) {
var tasks = [];
var grantor_is_hdi_container = creds.hdi_user ? true : false;
function grantUsers(privileges, grantor, fileName, creds, targetCreds, container, grantee, cb) {
try {
var tasks = [];
var grantor_type;
var host = creds.host;
var port = creds.port;
var hosts = creds.db_hosts;
var certificate = creds.certificate;
if (creds.type !== undefined) {
// if the grantor object contains a type field, then use this for selecting the grantor's type
if (creds.type === 'hdi' || creds.type === 'sql' || creds.type === 'procedure' || creds.type === 'ignore') {
grantor_type = creds.type;
} else {
throw new Error("unknown grantor type, known grantor types are 'hdi', 'sql', 'procedure', 'ignore'");
}
} else {
// otherwise, fallback to old auto-sensing for sql and hdi types
grantor_type = 'sql';
if (creds.hdi_user) {
grantor_type = 'hdi';
}
}
if (!Array.isArray(hosts) && host === undefined && port === undefined && certificate === undefined) {
// host, port, certificate are optional in the service credentials, fallback to target credentials if undefined
host = targetCreds.host;
port = targetCreds.port;
hosts = targetCreds.db_hosts;
certificate = targetCreds.certificate;
}
var host = creds.host;
var port = creds.port;
var hosts = creds.db_hosts;
var certificate = creds.certificate;
var client = grantor_is_hdi_container ? hdb(host, port, creds.hdi_user, creds.hdi_password, certificate, hosts)
: hdb(host, port, creds.user, creds.password, certificate, hosts);
if (!Array.isArray(hosts) && host === undefined && port === undefined && certificate === undefined) {
// host, port, certificate are optional in the service credentials, fallback to target credentials if undefined
host = targetCreds.host;
port = targetCreds.port;
hosts = targetCreds.db_hosts;
certificate = targetCreds.certificate;
}
tasks.push(client.connect());
if (creds.schema) {
tasks.push(client.setSchema(creds.schema));
}
var user = creds.user;
var password = creds.password;
if (grantor_type === 'hdi') {
user = creds.hdi_user;
password = creds.hdi_password;
}
if (privileges.object_owner) {
tasks.push(function(cb) {
grantPrivileges(client, grantor_is_hdi_container, privileges.object_owner, container, creds.schema, creds.remote, cb);
});
}
logger.log(' Using service "' + grantor + '" of type "' + grantor_type + '"');
if (privileges.application_user) {
tasks.push(function(cb) {
grantPrivileges(client, grantor_is_hdi_container, privileges.application_user, user, creds.schema, creds.remote, cb);
});
}
if (grantor_type === 'ignore') {
cb(null);
return;
}
tasks.push(client.disconnect());
var client = hdb(host, port, user, password, certificate, hosts);
async.series(tasks, function(err, results) {
client.end();
tasks.push(client.connect());
if (creds.schema && grantor_type !== 'procedure') {
tasks.push(client.setSchema(creds.schema));
}
if (err) { // add information about grantor service & underlying user
err.message += '\ngrantor service: ' + grantor + ', granting user: ';
if (creds.hdi_user) {
err.message += creds.hdi_user + ' (hdi_user)';
} else {
err.message += creds.user;
}
if (privileges.object_owner) {
tasks.push(function(cb) {
grantPrivileges(client, grantor_type, privileges.object_owner, container, creds.schema, creds.remote, creds.procedure, creds.procedure_schema, cb);
});
}
err.message += '\nfile name: ' + fileName;
if (privileges.application_user) {
tasks.push(function(cb) {
grantPrivileges(client, grantor_type, privileges.application_user, grantee, creds.schema, creds.remote, creds.procedure, creds.procedure_schema, cb);
});
}
cb(err, results);
});
tasks.push(client.disconnect());
}
async.series(tasks, function(err, results) {
client.end();
function grantFile(services, root, fileName, container, user, cb) {
var
file,
tasks = [];
if (err) { // add information about grantor service & underlying user
err.message += '\ngrantor service: "' + grantor + '", type: "' + grantor_type + '", user: "' + user + '"';
if (grantor_type === 'hdi') {
err.message += ' (hdi_user)';
}
if (grantor_type === 'procedure') {
err.message += ', procedure: "' + creds.procedure + '"';
if (creds.procedure_schema) {
err.message += ', procedure_schema: "' + creds.procedure_schema + '"';
}
}
fileName = paths.join(root, paths.clientPath(fileName));
err.message += '\nfile name: ' + fileName;
}
try {
file = utils.readJSONFile(fileName);
cb(err, results);
});
} catch (err) {
cb(err);
return;
}
}
var targetCreds = services.getTargetCreds();
function grantFile(services, root, fileName, container, grantee, cb) {
try {
var
file,
tasks = [];
Object.keys(file).forEach(function(grantor) {
var creds;
try {
creds = services.getCreds(grantor);
} catch (err) {
cb(err);
return;
}
fileName = paths.join(root, paths.clientPath(fileName));
logger.trace('grantor', file[grantor]);
file = utils.readJSONFile(fileName);
tasks.push(function(cb) {
grantUsers(file[grantor], grantor, fileName, creds, targetCreds, container, user, cb);
});
var targetCreds = services.getTargetCreds();
});
Object.keys(file).forEach(function(grantor) {
var creds;
try {
creds = services.getCreds(grantor);
} catch (err) {
cb(err);
return;
}
async.series(tasks, cb);
logger.trace('grantor', file[grantor]);
tasks.push(function(cb) {
grantUsers(file[grantor], grantor, fileName, creds, targetCreds, container, grantee, cb);
});
});
async.series(tasks, cb);
} catch (err) {
cb(err);
}
}
// grant target container oo and target container access role
exports.grant = function(options, services, content, container, user, cb) {
exports.grant = function(options, services, content, container, grantee, cb) {
try {

@@ -337,3 +532,3 @@ var

tasks.push(function(cb) {
grantFile(services, content.root, fileName, container, user, cb);
grantFile(services, content.root, fileName, container, grantee, cb);
});

@@ -354,4 +549,3 @@ tasks.push(logger.logfn(' Processing "' + fileName + '"... ok'));

cb(err);
return;
}
};
{
"name": "@sap/hdi-deploy",
"version": "3.3.3",
"version": "3.6.0",
"dependencies": {
"@sap/xsenv": {
"version": "1.2.7"
"version": "1.2.9",
"dependencies": {
"assert-plus": {
"version": "1.0.0"
},
"core-util-is": {
"version": "1.0.2"
},
"debug": {
"version": "3.1.0"
},
"extsprintf": {
"version": "1.4.0"
},
"ms": {
"version": "2.0.0"
},
"verror": {
"version": "1.10.0"
}
}
},
"assert-plus": {
"version": "1.0.0"
"align-text": {
"version": "0.1.4"
},
"amdefine": {
"version": "1.0.1"
},
"async": {
"version": "2.5.0"
"version": "2.6.0"
},
"core-util-is": {
"version": "1.0.2"
"camelcase": {
"version": "1.2.1",
"optional": true
},
"debug": {
"version": "2.6.7"
"center-align": {
"version": "0.1.3",
"optional": true
},
"extsprintf": {
"version": "1.3.0"
"cliui": {
"version": "2.1.0",
"optional": true,
"dependencies": {
"wordwrap": {
"version": "0.0.2",
"optional": true
}
}
},
"decamelize": {
"version": "1.2.0",
"optional": true
},
"handlebars": {
"version": "4.0.10",
"dependencies": {
"async": {
"version": "1.5.2"
}
}
},
"hdb": {
"version": "0.12.4"
"version": "0.15.4"
},

@@ -29,12 +73,60 @@ "iconv-lite": {

},
"is-buffer": {
"version": "1.1.6"
},
"kind-of": {
"version": "3.2.2"
},
"lazy-cache": {
"version": "1.0.4",
"optional": true
},
"lodash": {
"version": "4.17.4"
"version": "4.17.5"
},
"ms": {
"version": "2.0.0"
"longest": {
"version": "1.0.1"
},
"verror": {
"version": "1.10.0"
"minimist": {
"version": "0.0.10"
},
"optimist": {
"version": "0.6.1"
},
"repeat-string": {
"version": "1.6.1"
},
"right-align": {
"version": "0.1.3",
"optional": true
},
"source-map": {
"version": "0.4.4"
},
"uglify-js": {
"version": "2.8.29",
"optional": true,
"dependencies": {
"source-map": {
"version": "0.5.7",
"optional": true
}
}
},
"uglify-to-browserify": {
"version": "1.0.2",
"optional": true
},
"window-size": {
"version": "0.1.0",
"optional": true
},
"wordwrap": {
"version": "0.0.3"
},
"yargs": {
"version": "3.10.0",
"optional": true
}
}
}

@@ -1,1 +0,1 @@

{"dependencies":{"@sap/xsenv":"1.2.7","async":"2.5.0","hdb":"0.12.4"},"description":"HDI content deployment","devDependencies":{"filter-node-package":"2.0.0","istanbul":"0.4.5","jshint":"2.9.4","mocha":"3.1.2","rewire":"2.5.2","shelljs":"0.6.0","should":"11.1.1","sinon":"1.17.6"},"engines":{"node":">=6.9.1"},"main":"deploy.js","maintainers":[{"name":"https-support.sap.com","email":"do-not-reply@sap.com"}],"name":"@sap/hdi-deploy","optionalDependencies":{},"readme":"ERROR: No README data found!","repository":{},"scripts":{"lint":"jshint *.js lib/","prepare-release":"rm -r lib/zdm/ && rm README.md && rm CHANGELOG.md && clean-packages && npm prune --production && ls -al","start":"node deploy.js","test":"test/test-quick.sh","test-all":"test/test-all.sh","test-all-filtered":"test/test-all-filtered.sh","test-all-filtered.win":"test\\test-all-filtered.bat","test-all.win":"test\\test-all.bat","test.win":"test\\test-quick.bat"},"version":"3.3.3","license":"SEE LICENSE IN developer-license-3.1.txt"}
{"dependencies":{"@sap/xsenv":"1.2.9","async":"2.6.0","handlebars":"4.0.10","hdb":"0.15.4"},"description":"HDI content deployment","devDependencies":{"command-line-args":"^4.0.7","command-line-usage":"^4.0.2","eslint":"^4.18.2","filter-node-package":"2.0.0","istanbul":"0.4.5","jshint":"2.9.4","mocha":"3.1.2","node-style":"^2.0.1","rewire":"2.5.2","shelljs":"0.6.0","should":"11.1.1","sinon":"1.17.6","uuid":"3.1.0"},"engines":{"node":">=6.9.1"},"main":"deploy.js","maintainers":[{"name":"https-support.sap.com","email":"do-not-reply@sap.com"}],"name":"@sap/hdi-deploy","optionalDependencies":{},"readme":"@sap/hdi-deploy\n===============\n\n`@sap/hdi-deploy` is the [Node.js](https://nodejs.org)-based deployment module for SAP HANA DI (HDI)-based persistence models, HDI Deployer for short. The HDI Deployer can be used in XS Advanced (XSA) and in SAP Cloud Platform (SAP CP)/Cloud Foundry (CF), and it is also used by the SAP Web IDE for interactive development scenarios.\n\nFor more information about HANA DI, please check the [SAP HANA Developer Guide](https://help.sap.com/viewer/4505d0bdaf4948449b7f7379d24d0f0d/2.0.02/en-US/eaa4e37394ea4efba8148d595d025261.html).\n\nThe HDI Deployer is packaged into a database module, a `db` module, as part of a Multi-Target Application (MTA) and is used to deploy HDI design-time artifacts of the `db` module to the respective HDI container. When an MTA is deployed via the Deploy Service, the `db` module is pushed first so that it can \"prepare\" the SAP HANA persistence; by the time defined services are started, the HDI container is ready for use.\n\nThe HDI Deployer can also be used without the Deploy Service and MTAs, and also for interactive scenarios or automation scripts.\n\nFor an MTA with different modules, e.g. a `db` module, a Node.js module, etc., this looks as follows:\n\n```\n +-----------------+ +-----------------+ +-----------------+\n | db module | | Node.js module | | ... module |\n | w/ HDI Deployer | | | | |\n +-----------------+ +-----------------+ +-----------------+\n\n | | |\n | | |\n \\/ deploy persistence \\/ read/write/extend persistence \n | | |\n | | |\n\n +---------------------------------------------------------------+\n | HDI container |\n | |\n +---------------------------------------------------------------+\n```\n\nIn a HANA-Service-Broker-based HDI setup, each module of the MTA is equipped with it's own technical database user for accessing the runtime schema of the HDI container.\n\nThe following diagram illustrates the different users who are involved in this setup with regard to privileges: the application users user1 and user2 who are bound to one of the modules each, and the HDI container's object owner (the #OO user) who is the owner of the objects in the database persistence of the MTA which are managed by HDI:\n\n```\n +-----------------+ +-----------------+ +-----------------+\n | db module | | Node.js module | | ... module |\n | w/ HDI Deployer | | | | |\n +-----------------+ +-----------------+ +-----------------+\n\n | | o\n | --------------------------------- X application user user1\n | | o\n | | ----- X application user user2\n | |\n\n +---------------------------------------------------------------+\n | HDI container |\n | db object 1 db object 2 |\n +-------------------------------------\\-------------/-----------+\n \\ /\n o \\ /\n X object owner (#OO user)\n```\n\nThe HDI Deployer is packaged into the `db` module of the MTA. So, in order to use a new HDI Deployer, you need to reference a new version of the HDI Deployer in the `db` module's `package.json` file.\n\nThe HDI Deployer supports HANA 1 SPS11/SPS12 and HANA 2 SPS00/SPS01/SPS02. The HDI Deployer assumes that for newer versions of HANA, a corresponding version of the HANA Service Broker is used to create the CF/XSA service bindings.\n\nNote: The HDI Deployer assumes ownership of the `src/`, `cfg/`, and `lib/` folders in the bound target HDI container. Binding more than 1 instance of the HDI Deployer to the same HDI container as the target container, e.g. the `db` modules of 2 MTAs or 2 applications are bound to the same HDI container as the target container, is not supported and results in undefined behavior.\n\n## README.md\n\n**Installation**:\n- [Integration into a Database Module](#integration-into-a-database-module)\n- [Database Connection Details](#database-connection-details)\n\n**The Database Module**:\n- [A Database Module's File System Structure](#a-database-modules-file-system-structure)\n- [Delta Deployment and Undeploy Whitelist](#delta-deployment-and-undeploy-whitelist)\n- [The default_access_role Role](#the-default_access_role-role)\n- [Reusable Database Modules](#reusable-database-modules)\n- [Configuration File Templating](#configuration-file-templating)\n- [Permissions to Container-External Objects](#permissions-to-container-external-objects)\n\n**Configuration and Reconfiguration**:\n- [Environment Variables for Applications](#environment-variables-for-applications)\n- [Environment Variables for Infrastructure / Development Tools](#environment-variables-for-infrastructure--development-tools)\n- [Options for Interactive Scenarios](#options-for-interactive-scenarios)\n- [Supported Features](#supported-features)\n\n**Dynamic Deployment**:\n - [Deployment via hdi-dynamic-deploy](#deployment-via-hdi-dynamic-deploy)\n - [Using hdi-deploy as a Node.js library](#using-hdi-deploy-as-a-nodejs-library)\n\n\n## Integration into a Database Module\n\nUsually, `@sap/hdi-deploy` gets installed via a `package.json`-based dependency inside your application's `db` module:\n\n`db/package.json`:\n\n```\n{\n \"name\": \"deploy\",\n \"dependencies\": {\n \"@sap/hdi-deploy\": \"3.6.0\"\n },\n \"scripts\": {\n \"start\": \"node node_modules/@sap/hdi-deploy/\"\n }\n}\n```\n## Database Connection Details\n\nConnection details for the database, e.g. host, port, credentials, and certificates, are looked up by the HDI Deployer from the standard CF/XSA [`VCAP_SERVICES`](http://docs.cloudfoundry.org/devguide/deploy-apps/environment-variable.html#VCAP-SERVICES) environment variable which contains the bound services.\n\nFor local testing, the HDI Deployer supports default configurations via the following configuration files:\n\n- `default-services.json`: a JSON file which contains a set of service bindings\n- `default-env.json`: a JSON file which contains a set of environment variables and their values\n\n## A Database Module's File System Structure\n\nThe HDI Deployer expects the following file system structure for the HDI content in your `db` module:\n\n- `src/`: folder which contains your HDI source artifacts\n- `cfg/`: optional folder with HDI configuration artifacts\n- `package.json`: this file is used by npm (the Node.js package manager) to bootstrap and start the application\n\nOther files in the root directory will be ignored by `@sap/hdi-deploy`.\n\nPlease note that the `cfg/` folder also might need a `.hdiconfig` file, e.g. in case `.hdbsynonymconfig` files are placed there.\n\nIn combination with resuable database modules, the HDI Deployer will also consider database modules which are located in the `node_modules/` folder and which will be mapped to a corresponding sub-folder hierarchy in the container's `lib/` folder.\n\n## Delta Deployment and Undeploy Whitelist\n\nThe HDI Deployer implements a delta-based deployment strategy:\n\nOn startup, the HDI Deployer recursively scans the local `src/` and `cfg/` folders, processes config templates, looks at the HDI container at the server-side and calculates the set of added, modified, and deleted files based on the difference between the local file system state and the deployed file system state of the server-side HDI container.\n\nIn normal operation, the HDI Deployer will schedule only the set of added and modified files for deployment. The set of deleted files is not scheduled for undeployment.\n\nIn order to undeploy deleted files, an application needs to include an undeploy whitelist via an `undeploy.json` file in the root directory of the `db` module (right beside the `src/` and `cfg/` folders). The undeploy whitelist `undeploy.json` file is a JSON document with a top-level array of file names:\n\n`undeploy.json`:\n\n [\n \"src/Table.hdbcds\",\n \"src/Procedure.hdbprocedure\"\n ]\n\nThe file must list all artifacts which should be undeployed. The file path of the artifacts must be relative to the root directory of the `db` module and must use the HDI file path delimiter '/'.\n\nFor interactive scenarios, it's possible to pass the `auto-undeploy` option to the HDI Deployer, e.g.\n\n node deploy --auto-undeploy\n\nIn this case, the HDI Deployer will ignore the undeploy whitelist `undeploy.json` file and will schedule all deleted files in the `src/` and `cfg/` folders for undeployment.\n\n## The default_access_role Role\n\nWhen an HDI container service instance is created by the HANA Service Broker, e.g. service instance `foo` with schema name `FOO`, the broker creates an HDI container `FOO` (consisting of the runtime schema `FOO`, the HDI metadata and API schema `FOO#DI`, and the object owner `FOO#OO`) and a global access role `FOO::access_role` for the runtime schema. This access role is equipped with a default permission set for the runtime schema which consists of `SELECT`, `INSERT`, `UPDATE`, `DELETE`, `EXECUTE`, `CREATE TEMPORARY TABLE`, and `SELECT CDS METADATA` on the runtime schema `FOO`.\n\nEvery time the service instance is bound to an application, the broker creates 2 new users which are specific to this binding. The first user is the application user who is named `user` in the instance's credentials. This user is used by the application to access the HDI container's runtime schema `FOO`. This user is equipped with the service instance's global access role `FOO::access_role`. The second user is the HDI API user who is named `hdi_user` in the credentials. This user is equipped with privileges for the container's APIs in the `FOO#DI` schema.\n\nThe following diagram illustrates the binding-specific application users and the role of the global access role (the HDI API users and the bindings for the HDI Deployer are not shown for simplicity):\n\n```\n +-----------------+ +-----------------+ +-----------------+\n | db module | | Node.js module | | ... module |\n | w/ HDI Deployer | | | | |\n +-----------------+ +-----------------+ +-----------------+\n\n | | o\n | --------------------------------- X application user user1\n | | o \\\n | | ----- X application user user2 \\\n | | \\ \\\n \\ \\\n +---------------------------------------------------------------+ role FOO::access_role\n | HDI container FOO | /\n | | SELECT/INSERT/... on schema FOO\n +---------------------------------------------------------------+\n```\n\nExemplary service binding:\n\n {\n \"hana\" : [ {\n \"name\" : \"foo\",\n \"label\" : \"hana\",\n \"tags\" : [ \"hana\", \"database\", \"relational\" ],\n \"plan\" : \"hdi-shared\",\n \"credentials\" : {\n \"schema\" : \"FOO\",\n \"driver\" : \"com.sap.db.jdbc.Driver\",\n \"port\" : \"30115\",\n \"host\" : \"srv1234567.host.name\",\n \"db_hosts\" : [ {\n \"port\" : 30115,\n \"host\" : \"srv1234567.host.name\"\n } ],\n \"user\" : \"SBSS_34599959672902195741875760873853766555404727822156060056836149475\",\n \"password\" : \"<password>\",\n \"hdi_user\" : \"SBSS_64592794580116217572062412157356606994624009694957290675610125954\",\n \"hdi_password\" : \"<password>\",\n \"url\" : \"jdbc:sap://srv1234567.host.name:30115/?currentschema=FOO\"\n }\n } ]\n }\n\nIn order to assign roles from the HDI content to the application binding users (the `user` users), the HDI Deployer implements an automatic assignment of the `default_access_role` role if it is present in the deployed content:\n\nIf a role definition file exists at the path `src/defaults/default_access_role.hdbrole`, and this file defines a role named `default_access_role`, and this file is included in the deployment (e.g. not excluded via `include-filter`), then the HDI Deployer grants the deployed `default_access_role` role to the service instance's global access role (e.g. `FOO::access_role`). In addition, the HDI Deployer revokes all default permissions (e.g. `SELECT`, `INSERT`, `UPDATE`, `DELETE`, `EXECUTE`, `CREATE TEMPORARY TABLE`, and `SELECT CDS METADATA` on the runtime schema `FOO`) from the global access role. If the `default_access_role` is undeployed, the default permission set for the runtime schema will be restored.\n\nNote: If you use a `.hdinamespace` file in `src/` which defines a real namespace prefix for subfolders, then you need to put a `.hdinamespace` file with the empty namespace `\"name\" : \"\"` at `src/defaults/` to ensure that the role can be named `default_access_role`.\n\nThe following diagram illustrates the binding-specific application users, the role of the global access role, and the container-specific default access role:\n\n```\n +-----------------+ +-----------------+ +-----------------+\n | db module | | Node.js module | | ... module |\n | w/ HDI Deployer | | | | |\n +-----------------+ +-----------------+ +-----------------+\n\n | | o\n | --------------------------------- X application user user1\n | | o \\\n | | ----- X application user user2 \\\n | | \\ \\\n \\ \\\n +---------------------------------------------------------------+ role FOO::access_role\n | HDI container FOO | /\n | role default_access_role ----------------------------+\n | / \\ |\n | role role1 role role2 |\n | / / \\ |\n | structured privileges DCL role 1 / 2 | \n +---------------------------------------------------------------+\n```\n\nNote: The `default_access_role` is assumed to be an \"umbrella\" role which aggregates other roles.\n\nA role with the default permission set which is granted by the HANA Service Broker on container creation looks as follows:\n\n`default_permissions_role.hdbrole`:\n\n```\n{\n \"role\":{\n \"name\":\"default_permissions_role\",\n \"schema_privileges\":[\n {\n \"privileges\":[\n \"SELECT\",\n \"INSERT\",\n \"UPDATE\",\n \"DELETE\",\n \"EXECUTE\",\n \"CREATE TEMPORARY TABLE\",\n \"SELECT CDS METADATA\"\n ]\n }\n ]\n }\n}\n```\n\n## Reusable Database Modules\n\nIn order to allow that an application uses (parts of) the database persistence of a reusable component inside its own persistence model, the HDI Deployer allows to link/include the design-time files of reusable components in a consuming application in an automated way. This mechanism is based on the Node.js package management mechanism for defining, publishing, and consuming reusable database modules which also supports versioning based on the semantic versioning concepts (cf. http://semver.org).\n\nA reusable database module is considered to have the same `src/` and `cfg/` folder structure as a normal database module. The `src/.hdiconfig` file is mandatory and used by the module mechanism as an indicator that the `src/` and `cfg/` folders belong to a consumable, reusable database module. In addition, the reusable database module needs to have a `package.json` file which defines the module's name, the module's version, etc.\n\nA complete reusable database module looks as follows:\n\n```\n/\n+-- src/\n| +-- .hdiconfig\n| +-- <source files ...>\n+-- cfg/\n| +-- <optional configuration files ...>\n+-- package.json\n```\n\nThe `package.json` file contains the module’s name, description, version, repository URL, and the set of files which belong to the module:\n\n`package.json`:\n\n```\n{\n \"name\": \"module1\",\n \"description\": \"A set of reusable database objects\",\n \"version\": \"1.3.1\",\n \"repository\": {\n \"url\": \"git@your.gitserver:modules/module1.git\"\n },\n \"files\": [\n \"src\",\n \"cfg\",\n \"package.json\"\n ]\n}\n```\n\nThe reusable database module should be published to a Node.js package management compliant object repository.\n\nConsumption of a reusable database module is done by adding a dependency in the consuming module's `package.json` file, right beside the dependency to `@sap/hdi-deploy`:\n\n```\n{\n \"name\": \"deploy\",\n \"dependencies\": {\n \"@sap/hdi-deploy\": \"3.6.0\",\n \"module1\": \"1.3.1\",\n \"module2\": \"1.7.0\"\n },\n \"scripts\": {\n \"start\": \"node node_modules/@sap/hdi-deploy/\"\n }\n}\n```\n\nHere, the consuming module requires `module1` in version `1.3.1` and `module2` in version `1.7.0`.\n\nWhen running `npm install` to download and install the dependencies which are listed in the dependencies section of the `package.json` file, `npm` will also download the reusable database modules and places them into the `node_modules/` folder of the consuming module. For each module a separate subfolder is created with the name of the module.\n\nWhen the HDI Deployer is triggered to do the actual deployment of the (consuming) database module, it scans the `node_modules/` folder and virtually integrates the `src/` and `cfg/` folders of found reusable database modules into the (consuming) database module’s `lib/` folder. Reusable database modules are identified by the mandatory `src/.hdiconfig` file.\n\nOn successful deployment, the HDI container will contain the consumed modules below the root-level `lib/` folder, e.g.\n\n```\n/\n+-- src/\n+-- cfg/\n+-- lib/\n| +-- module1/\n| | +-- src/\n| | +-- cfg/\n| +-- module2/\n| +-- src/\n| +-- cfg/\n```\n\nFor the time being, it’s not allowed to recursively include reusable database modules.\n\nThe `cfg/` folders of reusable database modules are also subject to configuration file templating.\n\n## Configuration File Templating\n\nThe HDI Deployer implements a templating mechanism for HDI configuration files, e.g. configuration files for synonyms, projection views, etc., based on services which are bound to the `db` module application. By means of this templating mechanism, it is possible to configure synonyms, projection views, etc. to point to the right database schema without knowing the schema name at development time.\n\nOn startup, the HDI Deployer recursively scans the local `cfg/` folder and picks up all files with a `.*config` suffix, e.g. all `.hdbsynonymconfig`, `.hdbvirtualtableconfig`, etc. files. For all collected files which contain `.configure` markers in their content, it applies the configuration templating and creates transient configuration files which are then deployed to the HDI container.\n\nFor a synonym configuration file `cfg/LOCAL_TARGET.hdbsynonymconfig`\n\n {\n \"LOCAL_TARGET\" : {\n \"target\" : {\n \"schema.configure\" : \"logical-external-service/schema\",\n \"database.configure\" : \"logical-external-service/database\",\n \"object\" : \"TARGET\"\n }\n }\n }\n\nthe section\n\n \"schema.configure\" : \"logical-external-service/schema\",\n \"database.configure\" : \"logical-external-service/database\",\n \"object\" : \"TARGET\"\n\nwill be transformed by the templating mechanism into\n\n \"schema\" : \"THE_SCHEMA\",\n \"database\" : \"THE_DATABASE\",\n \"object\" : \"TARGET\"\n\nwhere `THE_SCHEMA` and `THE_DATABASE` are the values for the `schema` and `database` fields of the bound service `logical-external-service`, which are denoted by the path expressions`logical-external-service/schema` and `logical-external-service/database`.\n\nIf a field in the service is missing, it will not be configured and will be removed instead, e.g. `database` might be optional.\n\nThe names of the services are subject to the service replacements mechanism, which can be used to map a real service, e.g. `real-external-service`, to a logical service name which is used in the configuration files, e.g. `logical-external-service`.\n\nIt's not always applicable to use `schema.configure`, `database.configure`, etc. in the configuration template files. Therefore, the HDI Deployer provides a generic way of copying a set of properties from the bound service, e.g. schema, database, remote source, etc. if they are present, although the template file doesn't mention them.\n\nFor the configuration file `cfg/LOCAL_TARGET.hdbsynonymconfig` this could looks as follows:\n\n {\n \"LOCAL_TARGET\" : {\n \"target\" : {\n \"*.configure\" : \"logical-external-service\",\n \"object\" : \"TARGET\"\n }\n }\n }\n\nWhen the HDI Deployer encounters a `*.configure` entry, it simply copies all well-known fields which are present in the bound service into the configuration file. The well-known fields are currently `remote`, `database`, and `schema`.\n\nThe HDI Deployer also supports old-style `.hdbsynonymtemplate` template files: If a `.hdbsynonymtemplate` file is found in the `cfg/` or `src/` folder, then it is processed as a configuration template file and a transient file with the suffix `.hdbsynonymconfig` is created. A field `grantor` is replaced with the `schema` value from the referenced service; so, a `grantor` field is equivalent to a `\"schema.configure\" : \"the-service/schema\"` entry in a configuration template file.\n\n## Permissions to Container-External Objects\n\nAn HDI container is by default equipped with nearly zero database privileges, e.g. the object owner (`#OO` user) is mainly equipped with the `CREATE ANY` privilege on the container's runtime schema (e.g. schema `FOO` for an HDI contaner `FOO`). Since HANA 2 SPS00, the object owner is equipped with an additional restricted set of privileges for system views in the database's `SYS` schema, e.g. `SYS.VIEWS` or `SYS.TABLES`. These system views apply an additional row-level filter based on the object owner's other privileges, e.g. the object owner can only see metadata in `SYS.VIEWS` for views he has privileges on. So, without additional privileges, the object owner can only see metadata for the objects in his container schema.\n\nIn order to access database objects inside other database schemata or other HDI containers, and in order to deploy synonyms into the HDI container which point to these container-external objects, at least the object owner needs additional privileges, e.g. for an object `object` in schema `X` `SELECT` privileges on `X.object`:\n\n```\n +---------------------------------------------------------------+ +------------------------+\n | HDI container FOO | | other schema X |\n | synonym ------------------------> object |\n +---------------------------------------------------/-----------+ +-------------\\----------+\n / \\\n o / \\\n X object owner FOO#OO -------------------- SELECT on X.object\n```\n\nPlease also refer to the official [Using Synonyms to Access External Schemas and Objects in XS Advanced](https://help.sap.com/viewer/4505d0bdaf4948449b7f7379d24d0f0d/2.0.02/en-US/bdc9f7ae66134c279a5f3683bba9b361.html) guide.\n\n#### .hdbgrants Files\n\nIn order to automatically assign privileges to the object owner and/or the application binding users, the HDI Deployer provides `.hdbgrants` files with a syntax similar to `.hdbrole` files:\n\nAn `.hdbgrants` file has the following structure:\n\n`granting-service.hdbgrants`:\n\n```\n{\n \"granting-service\": {\n \"object_owner\": {\n <privileges>\n },\n \"application_user\": {\n <privileges>\n }\n }\n}\n```\n\nThe top-level keys define the names of the bound services which \"grant\" the privileges, these are the \"grantors\", e.g. `granting-service` in the example. The next level defines to which users the privileges will be granted, these are the \"grantees\": `object_owner` is used for the HDI container's object owner, and `application_user` marks the application users which are bound to the application modules, e.g. the Node.js module. The third level defines the set of privileges in a `.hdbrole`-like structure.\n\nOn startup, the HDI Deployer looks for `.hdbgrants` files and processes them as follows: For each grantor in the file, the HDI Deployer looks up a bound service with the name (subject to service replacements), connects to the database with the service's credentials, and grants the specified privileges to the grantees. If the `schema` field is omitted for a privilege, then the grantor's `schema` property is used. If the `name` field in a `global_object_privileges` element of type `REMOTE SOURCE` is omitted, then the grantor's `remote` property is used.\n\nFor backwards compatibility, also the suffix `.hdbsynonymgrantor` is supported.\n\nExample of a `cfg/external-access.hdbgrants` file with some privileges for the object owner:\n\n```\n{\n \"external-access\": {\n \"object_owner\": {\n \"system_privileges\" : [\n {\n \"privileges\" : [ \"SYSTEM_PRIVILEGE_1\" ],\n \"privileges_with_admin_option\" : [ \"SYSTEM_PRIVILEGE_2\", \"SYSTEM_PRIVILEGE_3\" ]\n }\n ],\n \"global_roles\" : [\n {\n \"roles\" : [ \"GLOBAL_ROLE_1\", \"GLOBAL_ROLE_2\" ],\n \"roles_with_admin_option\" : [ \"GLOBAL_ROLE_3\", \"GLOBAL_ROLE_4\" ]\n }\n ],\n \"schema_privileges\" : [\n {\n \"privileges\" : [ \"INSERT\", \"UPDATE\" ],\n \"privileges_with_grant_option\" : [ \"SELECT\" ]\n }\n ],\n \"schema_roles\" : [\n {\n \"roles\" : [ \"SCHEMA_ROLE_1\", \"SCHEMA_ROLE_2\" ],\n \"roles_with_admin_option\" : [ \"SCHEMA_ROLE_3\", \"SCHEMA_ROLE_4\" ]\n }\n ],\n \"object_privileges\" : [\n {\n \"name\": \"AN_OBJECT\",\n \"privileges\": [ \"INSERT\", \"UPDATE\" ],\n \"privileges_with_grant_option\" : [ \"SELECT\" ]\n }\n ],\n \"global_object_privileges\" : [\n {\n \"name\" : \"A_REMOTE_SOURCE\",\n \"type\" : \"REMOTE SOURCE\",\n \"privileges\" : [ \"CREATE VIRTUAL TABLE\" ],\n \"privileges_with_grant_option\" : [ \"CREATE VIRTUAL PROCEDURE\" ]\n }\n ]\n }\n }\n}\n```\n\nThe following elements and keys are supported for backwards compatibility or for compatibility with `.hdbrole`:\n\n- `container_roles`: grant roles from an HDI container; superseded by `schema_roles` which works for normal schemas and HDI containers\n```\n \"container_roles\" : [ \"SCHEMA_ROLE_1\", \"SCHEMA_ROLE_2\" ]\n```\n- `roles`: grant global roles; superseded by `global_roles`:\n```\n \"roles\" : [ \"GLOBAL_ROLE_1\", \"GLOBAL_ROLE_2\" ]\n```\n- string-array-style roles and `names` key (maps to the non-grant/admin-option variant):\n```\n \"global_roles\" : [\n \"GLOBAL_ROLE_1\",\n {\n \"roles\" : [ \"GLOBAL_ROLE_1\", \"GLOBAL_ROLE_2\" ]\n },\n {\n \"names\" : [ \"GLOBAL_ROLE_1\", \"GLOBAL_ROLE_2\" ]\n },\n {\n \"roles_with_admin_option\" : [ \"GLOBAL_ROLE_3\", \"GLOBAL_ROLE_4\" ]\n },\n \"GLOBAL_ROLE_2\"\n ]\n```\n\nIf any non-container privileges are used, then the object owner (`#OO` user) will need to be given these privileges WITH GRANT option by a user-defined granting-service. Otherwise it won't be able to grant these privileges to e.g. a role in the container.\n\n#### Creating a Granting Service\n\nThe HDI Deployer supports the following types of granting-services:\n\n- `hdi`: an HDI container with access to the container's GRANT APIs\n- `sql`: a technical database user with GRANT privileges for the required object privileges, roles, system privileges, etc.\n- `procedure`: a technical database user with EXECUTE privileges on a stored procedure which has GRANT privileges for the required object privileges, roles, system privileges, etc.\n- `ignore`: grants were already given at the database-level and the HDI Deployer will ignore the content of the `.hdbgrants` file.\n\nFor the HDI container case, the corresponding service can simply be bound to the db module application. The HDI Deployer recognizes the bound service by its `hdi_user` value in the credentials section and calls the container's API procedures to grant the privileges from the `.hdbgrants` file.\n\nIn case a technical database user is used, a 'user-defined service' must be created for this purpose in the same space as the container. The service needs to be set up with the permissions of a specified database user to connect to the database and to grant the privileges specified in the `.hdbgrants` files during application deployment.\n\nSuch a user-provided service can be created as follows:\n\n- Open a command shell and log on to XSA:\n`xs login`\n- Change to the target space where you want to create the user-defined service:\n`xs target -s <SPACE>`\n- Create the user-defined service (e.g. `grantor-service`):\n`xs cups grantor-service -p '{ \"host\": \"host.acme.com\", \"port\": \"30015\", \"certificate\": \"<myCertificate>\", \"user\": \"TARGET_USER\", \"password\": \"Grant_123\", \"schema\": \"TARGET_SCHEMA\", \"tags\": [ \"hana\" ] }'`\n - `\"host\"/\"port\"`: Required for the connection to the database: port is the SQL port of the index server.\n - `\"certificate\"`: If the database is configured to only accept secure connections, then the granting-service requires an SSL certificate that must be included in the user-provided service, for example, using the \"certificate\":\"<myCertificate>\" parameter.\n - `\"user\"/\"password\"`: Connection details for a database user that has grant permissions for the objects in the schema.\n - `\"schema\"`: The database schema that contains the objects to which access is to be granted.\n - `\"type\"`: The type of the grantor mechanism; valid values are `\"hdi\"`, `\"sql\"`, or `\"procedure\"`. If the type is specified, then the type is auto-sensed (see details below).\n- Use the command `xs services` to display a list of services available in the current space; the 'grantor-service' service should be visible.\n\nFor Clound Foundry, use the corresponding `cf` commands.\n\nNote: Starting with version 3.0.0 of the HDI Deployer, the `\"host\"`, `\"port\"`, and `\"certificate\"` parameters are no longer required since they can be obtained from the target container binding. In this case, you must only specify the `\"user\"`, `\"password\"`, and `\"schema\"` when creating the user-provided service, e.g. `xs cups grantor-service -p '{ \"user\": \"TARGET_USER\", \"password\": \"Grant_123\", \"schema\": \"TARGET_SCHEMA\", \"tags\": [ \"hana\" ] }'`.\n\nIf the `\"type\"` is not specified, then the type is selected based on the following rule: if the field `hdi_user` is present, then the type is auto-sensed as `hdi`; otherwise, the type is set to `sql`.\n\nIf the technical database user does not have GRANT privileges by its own, but only EXECUTE privileges on a stored procedure which can grant the privileges, then the following settings are required:\n\n- At the datababase, a GRANT procedure must exist (or be visible) in the schema which is used in the user-provided service; an example is shown below.\n- The technical database user must have EXECUTE privileges on the GRANT procedure.\n- The name of the GRANT procedure must be specified in the user-provided service in the `\"procedure\"` field, e.g. `\"procedure\": \"GRANT\"`.\n- The scheme name of the GRANT procedure can be specified in the user-provided service in the `\"procedure_schema\"` field, e.g. `\"procedure_schema\": \"A_SCHEMA\"`.\n- The user-provided service must contain a `\"type\"` field with the value `\"procedure\"`.\n\nFor the different types of privileges, the following fields are passed to the GRANT procedure:\n\n| PRIVILEGE_TYPE | PRIVILEGE_NAME | OBJECT_SCHEMA | OBJECT_NAME | OBJECT_TYPE | GRANTEE_SCHEMA | GRANTEE_NAME | GRANTABLE |\n| --- | --- | --- | --- | --- | --- | --- | --- |\n| SCHEMA_OBJECT_PRIVILEGE | privilege | schema | object | NULL | NULL | grantee | TRUE/FALSE |\n| GLOBAL_OBJECT_PRIVILEGE | privilege | NULL | object | type | NULL | grantee | TRUE/FALSE |\n| SCHEMA_ROLE | NULL | schema | role | NULL | NULL | grantee | TRUE/FALSE |\n| GLOBAL_ROLE | NULL | NULL | role | NULL | NULL | grantee | TRUE/FALSE |\n| SCHEMA_PRIVILEGE | privilege | NULL | schema | NULL | NULL | grantee | TRUE/FALSE |\n| SYSTEM_PRIVILEGE | privilege | NULL | NULL | NULL | NULL | grantee | TRUE/FALSE |\n\nExample of a GRANT procedure:\n\n```\nCREATE PROCEDURE GRANT(\n IN PRIVILEGES TABLE (\n PRIVILEGE_TYPE NVARCHAR(128), -- 'SCHEMA_OBJECT_PRIVILEGE'\n -- 'GLOBAL_OBJECT_PRIVILEGE'\n -- 'SCHEMA_ROLE'\n -- 'GLOBAL_ROLE'\n -- 'SCHEMA_PRIVILEGE'\n -- 'SYSTEM_PRIVILEGE'\n PRIVILEGE_NAME NVARCHAR(256), -- cf. SYS.PRIVILEGES\n OBJECT_SCHEMA NVARCHAR(256), -- NULL or schema\n OBJECT_NAME NVARCHAR(256),\n OBJECT_TYPE NVARCHAR(128), -- NULL or 'REMOTE SOURCE'\n GRANTEE_SCHEMA NVARCHAR(256), -- NULL or schema\n GRANTEE_NAME NVARCHAR(256),\n GRANTABLE NVARCHAR(5) -- 'TRUE' or 'FALSE'\n )\n)\nLANGUAGE SQLSCRIPT\nSQL SECURITY DEFINER\nAS\nBEGIN\n DECLARE ERROR CONDITION FOR SQL_ERROR_CODE 10000;\n DECLARE CURSOR PRIVILEGES_CURSOR FOR SELECT * FROM :PRIVILEGES;\n\n -- TODO: add checks for valid grantees, e.g. check with _SYS_DI#<group>.M_CONTAINER_SCHEMAS\n -- or with SYS.USERS and creator and grantee like '%#OO'\n -- TODO: keep only functionality that should be allowed, e.g. only allow to grant schema-local\n -- roles, but no object privileges, etc.\n\n FOR PRIVILEGE AS PRIVILEGES_CURSOR\n DO\n DECLARE TO_GRANTEE_CLAUSE NVARCHAR(512);\n DECLARE GRANTABLE_CLAUSE NVARCHAR(512) = '';\n\n IF PRIVILEGE.GRANTEE_SCHEMA IS NULL THEN\n TO_GRANTEE_CLAUSE = ' TO \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.GRANTEE_NAME) || '\"';\n ELSE\n TO_GRANTEE_CLAUSE = ' TO \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.GRANTEE_SCHEMA)\n || '\".\"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.GRANTEE_NAME) || '\"';\n END IF;\n\n IF PRIVILEGE.GRANTABLE = 'TRUE' THEN\n IF PRIVILEGE.PRIVILEGE_TYPE = 'SYSTEM_PRIVILEGE' OR\n PRIVILEGE.PRIVILEGE_TYPE = 'GLOBAL_ROLE' OR\n PRIVILEGE.PRIVILEGE_TYPE = 'SCHEMA_ROLE' THEN\n GRANTABLE_CLAUSE = ' WITH ADMIN OPTION';\n ELSE\n GRANTABLE_CLAUSE = ' WITH GRANT OPTION';\n END IF;\n ELSEIF PRIVILEGE.GRANTABLE != 'FALSE' THEN\n SIGNAL ERROR SET MESSAGE_TEXT = 'unsupported value for GRANTABLE: '\n || PRIVILEGE.GRANTABLE;\n END IF;\n\n IF PRIVILEGE.PRIVILEGE_TYPE = 'SCHEMA_OBJECT_PRIVILEGE' THEN\n EXEC 'GRANT \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.PRIVILEGE_NAME) || '\"'\n || ' ON \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.OBJECT_SCHEMA)\n || '\".\"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.OBJECT_NAME) || '\" '\n || TO_GRANTEE_CLAUSE\n || GRANTABLE_CLAUSE;\n ELSEIF PRIVILEGE.PRIVILEGE_TYPE = 'GLOBAL_OBJECT_PRIVILEGE' THEN\n IF PRIVILEGE.OBJECT_TYPE = 'REMOTE SOURCE' THEN\n EXEC 'GRANT \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.PRIVILEGE_NAME) || '\"'\n || ' ON ' || PRIVILEGE.OBJECT_TYPE || ' \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.OBJECT_NAME) || '\" '\n || TO_GRANTEE_CLAUSE\n || GRANTABLE_CLAUSE;\n ELSE\n SIGNAL ERROR SET MESSAGE_TEXT = 'unsupported value for OBJECT_TYPE for GLOBAL_OBJECT_PRIVILEGE: '\n || PRIVILEGE.OBJECT_TYPE;\n END IF;\n ELSEIF PRIVILEGE.PRIVILEGE_TYPE = 'SCHEMA_ROLE' THEN\n EXEC 'GRANT \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.OBJECT_SCHEMA)\n || '\".\"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.OBJECT_NAME) || '\" '\n || TO_GRANTEE_CLAUSE\n || GRANTABLE_CLAUSE;\n ELSEIF PRIVILEGE.PRIVILEGE_TYPE = 'GLOBAL_ROLE' THEN\n EXEC 'GRANT \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.OBJECT_NAME) || '\" '\n || TO_GRANTEE_CLAUSE\n || GRANTABLE_CLAUSE;\n ELSEIF PRIVILEGE.PRIVILEGE_TYPE = 'SCHEMA_PRIVILEGE' THEN\n EXEC 'GRANT \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.PRIVILEGE_NAME) || '\"'\n || ' ON SCHEMA \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.OBJECT_NAME) || '\" '\n || TO_GRANTEE_CLAUSE\n || GRANTABLE_CLAUSE;\n ELSEIF PRIVILEGE.PRIVILEGE_TYPE = 'SYSTEM_PRIVILEGE' THEN\n EXEC 'GRANT \"' || ESCAPE_DOUBLE_QUOTES(PRIVILEGE.PRIVILEGE_NAME) || '\"'\n || TO_GRANTEE_CLAUSE\n || GRANTABLE_CLAUSE;\n ELSE\n SIGNAL ERROR SET MESSAGE_TEXT = 'unsupported value for PRIVILEGE_TYPE: '\n || PRIVILEGE.PRIVILEGE_TYPE;\n END IF;\n END FOR;\nEND;\n```\n\n#### Defining the Granting Service in the mta[d].yaml\n\nIf the container needs a granting-service, then besides the service itself, the Application Development Descriptor mta.yaml needs to be adjusted for the deployer to be able to find the service. The mta.yaml must be modified to:\n\n1. The container of the `db` module needs to get a `TARGET_CONTAINER` property to mark the service that corresponds to the container\n2. A new entry in `requires` is added for the granting-service\n3. A new entry in `resources` is added for the granting-service\n\nExample:\n`mta.yaml`:\n\n```\nschema-version: '2.0'\nID: granting-service-example\nversion: 0.0.1\n\nmodules:\n - name: db\n type: hdb\n path: db\n requires:\n - name: hdi-container\n properties: # 1.\n TARGET_CONTAINER: ~{hdi-container-service} # 1.\n \n - name: granting-service # 2.\n \nresources:\n - name: hdi-container\n type: com.sap.xs.hdi-container\n properties:\n hdi-container-service: ${service-name} \n\n - name: granting-service # 3.\n type: org.cloudfoundry.existing-service # 3.\n```\n\n## Environment Variables for Applications\n\n`@sap/hdi-deploy` supports (re-)configuration via the following environment variables which are exposed to applications, e.g. via the CF/XSA `manifest.yml` or the MTA descriptor `mta.yaml`:\n\n- `TARGET_CONTAINER`: (optional) service name that specifies the HDI target container (needed, if more than one service is bound to the HDI Deployer)\n- `SERVICE_REPLACEMENTS`: (optional) JSON-structured list of service replacements, e.g. `[ { \"key\": \"logical-service-name-1\", \"service\":\"real-service-name-1\"}, { \"key\": \"logical-service-name-2\", \"service\":\"real-service-name-2\"} ]`, where the logical service names refer to the names in the HDI content and the real service names refer to the services which are bound to the HDI Deployer via `VCAP_SERVICES`; if the HDI content references a service name which is not listed in the replacements, then this name is used as a real service name\n\nThe structure of the `SERVICE_REPLACEMENTS` environment variable is based on the MTA specification in order to enable MTA group assignments.\n\nExample `manifest.yml`:\n\n applications:\n - name: app-db\n path: db\n services:\n - app-database\n - real-grantor-service\n - real-external-service\n env:\n TARGET_CONTAINER: app-database\n SERVICE_REPLACEMENTS: >\n [\n {\n \"key\" : \"logical-grantor-service\",\n \"service\" : \"real-grantor-service\"\n },\n {\n \"key\" : \"logical-external-service\",\n \"service\" : \"real-external-service\"\n }\n ]\n\n## Environment Variables for Infrastructure / Development Tools\n\n`@sap/hdi-deploy` supports (re-)configuration via the following environment variables for infrastructure / development tools like the Deploy Service or internal build tools of the WEB IDE\n\n- `DEPLOY_ID`: (optional) if set, the given id will be written to the final application log entry (custom id, to support processes in parsing log output\n- `HDI_DEPLOY_OPTIONS`: (optional) JSON-structured set of options for the HDI Deployer, e.g. `{ \"auto_undeploy\" : true, \"exit\" : true, \"root\" : \"/volumes/A/workspaces/B/db/\", \"include_filter\" : [ \"src/\", \"cfg/\" ] }`\n\n## Options for Interactive Scenarios\n\n`@sap/hdi-deploy` supports the following options for interactive deployment scenarios, e.g. for orchestration via the WEB IDE or for CI scripts:\n\n- `--[no-]verbose`: [don't] print detailed log messages to the console\n- `--structured-log <file>`: write log messages as JSON objects into the given file; messages are appended if the file already exists\n- `--[no-]exit`: [don't] exit after deployment of artifacts\n- `--[no-]lock-container`: [don't] acquire the container lock while working with the container\n- `--root <path>`: use the given root path for artifacts\n- `--working-set [<path> ..]`: define the given paths (directories and files) as the working set; a non-default working set applies additional restrictions, e.g. other options might be disallowed\n- `--include-filter [<path> ..]`: only include the given paths (directories and files) during delta detection\n- `--deploy [<file> ..]`: explicitly schedule the given files for deploy; extends the `include-filter` for collecting local files\n- `--[no-]treat-unmodified-as-modified`: [don't] treat unmodified files during delta detection as modified files\n- `--undeploy [<file> ..]`: explicitly schedule the given files for undeploy\n- `--parameter [<key>=<value> ..]`: pass the given list of key-value parameters to the deployment\n- `--[no-]auto-undeploy`: [don't] undeploy artifacts automatically based on delta detection and ignore the `undeploy.json` file\n- `--[no-]treat-warnings-as-errors`: [don't] treat warnings as errors\n- `--[no-]simulate-make`: [don't] simulate the make and skip post-make activities; pre-make activities still take effect, e.g. grants\n- `--connection-timeout <ms>`: number of milliseconds to wait for the database connection(s)\n- `--lock-container-timeout <ms>`: number of milliseconds to wait for the container lock\n\nSee `--help` for details and defaults.\n\nOptions can also be passed to `@sap/hdi-deploy` via the `HDI_DEPLOY_OPTIONS` environment variable.\n\n## Supported Features\n\n`@sap/hdi-deploy` exposes its set of features via the `info` option, which can be passed as `--option` or via `HDI_DEPLOY_OPTIONS`, e.g.\n\n node deploy --info [<component> [<component> [...]]]\n\nwhere a list of components can be specified.\n\nThe `info` option allows to pass multiple components. The `info` request for these components is optional, e.g. if the HDI Deployer doesn't support the component, then it will not throw an error, but simply not return information for that component. The special component `all` will return the information for all known components; `all` is the default if no component is specified. For certain future components, e.g. `server`, the HDI Deployer might need to connect to the HDI container in the database and retrieve feature information from there.\n\nExamples:\n\n```\nnode deploy --info all\nnode deploy --info client server\n```\n\nThe result of an `info` call is a JSON document where the top-level objects correspond to the requested components. Each component should at least report its name, its version, and the set of supported features with name and version number (version numbers are simple numbers (no dots, no double-dots)).\n\nIf a version number is negative, then the feature is supported by the client, but not supported by the server.\n\nFor a `--info client` call, the document looks as follows:\n\n```\n{\n \"client\": {\n \"name\": \"@sap/hdi-deploy\",\n \"version\": \"3.6.0\",\n \"features\": {\n \"info\": 2,\n \"verbose\": 1,\n \"structured-log\": 1,\n \"lock-container\": 1,\n \"default-access-role\": 1,\n \"grants\": 4,\n \"working-set\": 1,\n \"include-filter\": 1,\n \"deploy\": 1,\n \"treat-unmodified-as-modified\": 1,\n \"undeploy\": 1,\n \"parameter\": 1,\n \"treat-warnings-as-errors\": 1,\n \"simulate-make\": 1,\n \"service-replacements\": 1,\n \"modules\": 2,\n \"config-templates\": 2,\n \"environment-options\": 1,\n \"undeploy-whitelist\": 1\n }\n }\n}\n```\n\nFor the `server` component, the document would also contain the following data:\n\n```\n{\n...\n \"server\": {\n \"name\": \"sap-hana-database\",\n \"version\": \"1.00.120.04.0000000000\",\n \"features\": {}\n }\n}\n```\n\n\n## Deployment via hdi-dynamic-deploy\n\nThe standard XSA/CF way for deploying HDI content at runtime is to make use of @sap/hdi-dynamic-deploy instead of @sap/hdi-deploy directly. The @sap/hdi-dynamic-deploy app is an http server that calls @sap/hdi-deploy when it receives a corresponding HTTP POST request. See the @sap/hdi-dynamic-deploy module for more information.\n\n## Using hdi-deploy as a Node.js library\n\nSince version 3.3.0 of @sap/hdi-deploy it is also possible to use it as a Node.js library. By requiring the library.js file from the project root it is possible to start the deployer app from within another Node.js app. The module exports the function\n\n```\nfunction deploy(contentDir, deployerEnv, callback, io)\n```\n\nwith the following parameters:\n\n- `contentDir`: string containing a path pointing to the root of the db module to be deployed\n- `deployerEnv`: javascript object containing the OS environment for the call to the deployer (e.g. containing VCAP_SERVICES)\n- `callback`: a callback for the result of the call to the deployer accepting a response parameter of the form:\n\n```\n{\n messages: [<list of result messages from the di server>],\n exitCode: <exit code of the call to the deployer app>\n}\n```\n \n- `io` (optional): javascript object containing two callback functions `io.stdoutCB` and `io.stderrCB` of the form `function(data)` for streaming stdout and stderr of the call to the deployer, defaults to piping stdout and stderr of the deployer to stdout and stderr of the calling Node.js app\n","readmeFilename":"README.md","repository":{},"scripts":{"eslint":"eslint *.js lib/","lint":"jshint *.js lib/","prepare-release":"clean-packages && npm prune --production && ls -al","start":"node deploy.js","test":"test/test-quick.sh","test-all":"test/test-all.sh","test-all-filtered":"test/test-all-filtered.sh","test-all-filtered.win":"test\\test-all-filtered.bat","test-all.js":"node test-all-filtered.js","test-all.win":"test\\test-all.bat","test.js":"node test-quick.js","test.win":"test\\test-quick.bat"},"version":"3.6.0","license":"SEE LICENSE IN developer-license-3.1.txt"}
SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc