Socket
Socket
Sign inDemoInstall

@sap/cds-mtxs

Package Overview
Dependencies
Maintainers
1
Versions
61
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@sap/cds-mtxs - npm Package Compare versions

Comparing version 1.12.1 to 1.13.0

14

CHANGELOG.md

@@ -9,4 +9,18 @@ # Change Log

## Version 1.13.0 - 2023-11-13
### Added
- `GET /-/cds/saas-provisioning/tenant` now also returns `createdAt` and `modifiedAt` fields.
### Changed
- The internal job runner now has an in-memory queuing mechanism. For non-scaled sidecar instances, this avoids tasks for the same tenant from being run at the same time.
## Version 1.12.1 - 2023-10-17
### Added
- API `PUT /-/cds/extensibility/Extensions/<extension id>` now also accepts i18n-files.
### Fixed

@@ -13,0 +27,0 @@

24

lib/migration/migration.js

@@ -152,3 +152,3 @@ const path = require('path')

const { directory, dry, force, tagRule: tagRegex, tag: defaultTag, "skip-verification": skipVerification, "ignore-migrations": ignoreMigrations } = options
const { directory, dry, force, tagRule, tag: defaultTag, "skip-verification": skipVerification, "ignore-migrations": ignoreMigrations } = options

@@ -227,3 +227,3 @@ const migrationResult = new MigrationResult()

const tags = await createProjects(mtxExtension, tenantProjectFolder, tagRegex, defaultTag)
const tags = await createProjects(mtxExtension, tenantProjectFolder, tagRule, defaultTag)

@@ -253,2 +253,8 @@ // upload and verify extensions

if (!dry && force) {
// cleanup in case extension ids have changed
if (tenant) cds.context = { tenant }
await DELETE.from('cds.xt.Extensions')
}
for (const tag of tags) {

@@ -282,3 +288,3 @@ const projectFolder = path.join(tenantProjectFolder, tag)

if (!dry) await mtxAdapter.setMigrated(tenant)
if (!dry) await mtxAdapter.setMigrated(tenant, tagRule, defaultTag, !!force /* ensure it is not undefined so that a record is written */)
migrationResult.log(tenant, `Migration of tenant ${tenant} done.`)

@@ -367,5 +373,15 @@ } catch (error) {

let parameters = { tagRule, defaultTag }
if (!(tagRule || defaultTag)) {
try {
const storedParameters = await mtxAdapter.getMigrationParameters(tenant) ?? {}
parameters = { ...parameters, ...storedParameters }
} catch (e) {
req.reject(404, `No migrated projects found for tenant ${tenant}`)
}
}
// run dry migration with force + own temp directory
// { directory, dry, force, tagRule: tagRegex, tag: defaultTag, "skip-verification": skipVerification, "ignore-migrations": ignoreMigrations }
await module.exports.migrate([tenant], { directory: temp, dry: true, skipVerification: true, tagRule, tag: defaultTag, force: true})
await module.exports.migrate([tenant], { directory: temp, dry: true, skipVerification: true, tagRule: parameters.tagRule, tag: parameters.defaultTag, force: true})

@@ -372,0 +388,0 @@ const projectsLocation = path.join(temp, tenant)

11

lib/migration/mtx-adapter.js

@@ -66,3 +66,3 @@ const cds = require('@sap/cds')

async function saveMetadata(tenant, type, data) {
module.exports.saveMetadata = async (tenant, type, data) => {
const domain = cds.env.mtx && cds.env.mtx.domain || '__default__'

@@ -187,3 +187,7 @@ const statement = 'UPSERT TENANT_METADATA (ID, CONTENT, DOMAIN) VALUES(?, ?, ?) WHERE ID = ? AND DOMAIN = ?';

module.exports.setMigrated = async (tenant) => {
module.exports.getMigrationParameters = async (tenant) => {
return getMetadata(tenant, 'migrationParameters')
}
module.exports.setMigrated = async (tenant, tagRule, defaultTag, force) => {
const timestamp = new Date();

@@ -194,3 +198,4 @@ const timestampIso = timestamp.toISOString();

}
await saveMetadata(tenant, 'migrated', data)
await module.exports.saveMetadata(tenant, 'migrated', data)
await module.exports.saveMetadata(tenant, 'migrationParameters', { tagRule, defaultTag, force })
}

@@ -197,0 +202,0 @@

@@ -1,3 +0,2 @@

const { readdir, isdir } = require('@sap/cds').utils
const path = require('path')
const LOG = cds.log('mtx')
const { promisify } = require('util')

@@ -108,3 +107,3 @@

// REVISIT: not for all error cases a retry is an appropriate handling mechanism -> error code allow/blocklist
const retry = async(fn, LOG, retryCount = 5, retryGap = 5 * 1000) => {
const retry = async(fn, retryCount = 5, retryGap = 5 * 1000) => {
let errorCount = 0

@@ -126,2 +125,12 @@ let finalError

const t0 = cds.env.requires.multitenancy?.t0 ?? 't0'
// TODO: Decide on retry
let _t0Csn
const t0_ = async (query) => /*retry(async () =>*/ {
_t0Csn ??= cds.compile.for.nodejs(
await cds.load(`${__dirname}/../db/t0.cds`, { silent: true })
)
return cds.tx({ tenant: t0 }, tx => { tx.model = _t0Csn; return tx.run(query) })
}/*)*/
module.exports = {

@@ -135,3 +144,4 @@ EXT_BACK_PACK,

getCompilerError,
retry
t0_,
retry,
}
{
"name": "@sap/cds-mtxs",
"version": "1.12.1",
"version": "1.13.0",
"description": "SAP Cloud Application Programming Model - Multitenancy library",

@@ -5,0 +5,0 @@ "homepage": "https://cap.cloud.sap/",

@@ -98,10 +98,11 @@ const cds = require('@sap/cds/lib')

const one = await cds.tx({ tenant: t0 }, tx =>
tx.run(SELECT.one.from(Tenants, { ID: tenant }, tenant => { tenant.metadata }))
)
tx.run(SELECT.one.from(Tenants).columns(['metadata', 'createdAt', 'modifiedAt']).where({ ID: tenant }))
)
if (!one) cds.error(`Tenant ${tenant} not found`, { status: 404 })
return { subscribedTenantId: tenant, ...JSON.parse(one.metadata ?? '{}') }
const { metadata, createdAt, modifiedAt } = one
return { subscribedTenantId: tenant, ...JSON.parse(metadata ?? '{}'), createdAt, modifiedAt }
}
return (await cds.tx({ tenant: t0 }, tx =>
tx.run(SELECT.from(Tenants, tenant => { tenant.ID, tenant.metadata }))
)).map(({ ID, metadata }) => ({ subscribedTenantId: ID, ...JSON.parse(metadata) }))
tx.run(SELECT.from(Tenants).columns(['ID', 'metadata', 'createdAt', 'modifiedAt']))
)).map(({ ID, metadata, createdAt, modifiedAt }) => ({ subscribedTenantId: ID, ...JSON.parse(metadata), createdAt, modifiedAt }))
}

@@ -108,0 +109,0 @@

@@ -31,3 +31,3 @@ const cds = require('@sap/cds/lib')

this.on('getMigratedProjects', (req) => {
const { tagRule, defaultTag } = req.data
let { tagRule, defaultTag } = req.data

@@ -38,3 +38,3 @@ // REVIEW check if access for arbitrary tenants needed

return getMigratedProjects(req, tagRule, defaultTag, tenant)
return getMigratedProjects(req, tagRule || undefined, defaultTag || undefined, tenant)
})

@@ -41,0 +41,0 @@

@@ -45,4 +45,5 @@ const cds = require('@sap/cds/lib')

const _getFiles = function (resources) {
const bundles = {}
const _getFiles = function (resources, req) {
let bundles = {}
let fromJson = false
const csvs = {}

@@ -53,5 +54,17 @@ if (resources && Array.isArray(resources) && resources.length) {

if (key) {
if (fromJson) req.reject(422, `Mixed i18n file types not supported: i18n.json and ${file.name}`)
bundles[key[1]] = _toJson(file.content)
return
}
key = file.name.match(/i18n.json/)
if (key) {
if (Object.entries(bundles).length) req.reject(422, `Mixed i18n file types not supported: i18n.json and .properties`)
try {
bundles = JSON.parse(file.content)
fromJson = true
} catch (e) {
req.reject(422, 'Invalid json content in i18n.json')
}
return
}
key = file.name.match(/.*\.csv/)

@@ -143,3 +156,3 @@ if (key) {

const { bundles, csvs } = _getFiles(resources)
const { bundles, csvs } = _getFiles(resources, req)
if (tag) await DELETE.from('cds.xt.Extensions').where({ tag })

@@ -146,0 +159,0 @@ await _addExtension(extCsn, tag, bundles, csvs, tenant, activate, req)

@@ -11,4 +11,4 @@ const cds = require('@sap/cds/lib')

const ext = !req.data?.ID ? await SELECT.from('cds.xt.Extensions') : await SELECT.one.from('cds.xt.Extensions').where({ tag: req.data.ID })
if (Array.isArray(ext)) return ext.filter(item => item.tag !== TOMBSTONE_ID).map(item => ({ ID: item.tag, csn: item.csn, timestamp: item.timestamp }))
if (ext) return { ID: ext.tag, csn: ext.csn, timestamp: ext.timestamp }
if (Array.isArray(ext)) return ext.filter(item => item.tag !== TOMBSTONE_ID).map(item => ({ ID: item.tag, csn: item.csn, i18n: item.i18n !== '{}' ? item.i18n : undefined, timestamp: item.timestamp }))
if (ext) return { ID: ext.tag, csn: ext.csn, i18n: ext.i18n !== '{}' ? ext.i18n : undefined, timestamp: ext.timestamp }
return ext

@@ -20,5 +20,5 @@ }

// set handles the tenant switch in that case
await set_(req, { extension: [...req.data.csn], tag: req.data.ID, tenant: tenant ?? req.tenant })
await set_(req, { extension: [...req.data.csn], resources: req.data.i18n, tag: req.data.ID, tenant: tenant ?? req.tenant })
const res = await SELECT.one.from('cds.xt.Extensions').where({ tag: req.data.ID })
return { ID: res.tag, csn: res.csn, timestamp: res.timestamp }
return { ID: res.tag, csn: res.csn, i18n: res.i18n !== '{}' ? res.i18n : undefined, timestamp: res.timestamp }
}

@@ -25,0 +25,0 @@

@@ -63,3 +63,3 @@ const cds = require('@sap/cds/lib');

const toLog = `Authentication failed: ${axError.message} ${details}Passcode URL: ${passcodeUrl}`;
const toSend = `Authentication failed: ${axError.message}. Passcode URL: ${passcodeUrl}`;
const toSend = `Authentication failed: ${axError.message} Passcode URL: ${passcodeUrl}`;

@@ -66,0 +66,0 @@ const status = axError.status ?? 500;

const { inspect } = require('util')
const cds = require('@sap/cds/lib'), { uuid } = cds.utils
const { retry } = require('../lib/utils')
const LOG = cds.log('mtx')
const cds = require('@sap/cds'), { uuid } = cds.utils
const LOG = cds.log('mtx'), DEBUG = cds.debug('mtx')
const Jobs = 'cds.xt.Jobs', Tasks = 'cds.xt.Tasks'
const t0 = cds.env.requires.multitenancy.t0 ?? 't0'
const { t0_ } = require('../lib/utils')
const {
clusterSize = 1, workerSize = 1, poolSize = 1
queueSize = 100, clusterSize = 1, workerSize = 1, poolSize = 1
} = cds.env.requires.multitenancy.jobs ?? cds.env.requires['cds.xt.SaasProvisioningService']?.jobs ?? {}

@@ -16,4 +15,40 @@

// A queue, implemented as a circular buffer for O(1) insert + delete
class Queue {
constructor(capacity) {
this.buffer = new Array(capacity)
this.pointer = 0
this.size = 0
this.capacity = capacity
DEBUG?.(`initialized tenant operation job queue with capacity ${capacity}`)
}
enqueue(value) {
if (this.size === this.capacity) cds.error('Tenant operation job queue is full. Please try again later.', { status: 429 })
this.buffer[(this.pointer + this.size) % this.capacity] = value
this.size++
}
peek() {
return this.buffer[this.pointer]
}
dequeue() {
const value = this.buffer[this.pointer]
this.pointer = (this.pointer + 1) % this.capacity
this.size--
return value
}
}
const jobQueue = new Queue(queueSize)
let runningJobs = []
module.exports = class JobsService extends cds.ApplicationService {
async init() {
this.on('READ', 'Jobs', async () => {
const jobs = await t0_(
SELECT.from(Jobs, j => { j.ID, j.op, j.status, j.error, j.tasks(t => { t.ID, t.status, t.tenant, t.error } )}).orderBy('createdAt desc')
)
return jobs.map(job => ({ ...job, tasks: job.tasks.sort((a, b) => a.tenant.localeCompare(b.tenant))}))
})
}
async enqueue(clusters, op, args = [], onJobDone) {

@@ -25,17 +60,15 @@ const _args = args.length > 0 ? ['with args', inspect(args.filter(Boolean), { depth: 5, colors: true })] : []

const job = { ID: job_ID, createdAt: (new Date).toISOString(), op }
await retry(() => cds.tx({ tenant: t0 }, tx => tx.run(INSERT.into(Jobs, job))), LOG)
const jobs = clusters?.map(cluster => Array.from(cluster).map(tenant => ({ job_ID, ID: uuid(), tenant, op }))) ?? []
await t0_(INSERT.into(Jobs, job))
const jobs = clusters.map(cluster => Array.from(cluster).map(tenant => ({ job_ID, ID: uuid(), tenant, op })))
const tasks = jobs.flat()
if (tasks.length) {
await retry(() => cds.tx({ tenant: t0 }, tx => tx.run(INSERT.into(Tasks, tasks))), LOG)
_nextJob(jobs, task => {
await t0_(INSERT.into(Tasks, tasks))
jobQueue.enqueue({ job_ID, clusters: jobs, fn: task => {
const { 'cds.xt.DeploymentService': ds } = cds.services
return ds.tx({ tenant: cds.context.tenant }, tx => tx[op](task.tenant, ...args))
}, onJobDone).catch(err => LOG.error('next job raised an error', err))
}, onJobDone })
pickJob()
} else {
await retry(() => cds.tx({ tenant: t0 }, tx =>
tx.run(UPDATE(Jobs, { ID: job_ID }).with({ status: FINISHED }))
), LOG)
await t0_(UPDATE(Jobs, { ID: job_ID }).with({ status: FINISHED }))
}

@@ -51,2 +84,5 @@

[task.tenant, { ...task, job_ID: undefined, tenant: undefined, op: undefined }]
)),
tasks: Object.fromEntries(tasks.sort((a, b) => a.tenant.localeCompare(b.tenant)).map(task =>
[task.tenant, { ...task, job_ID: job.ID, tenant: task.tenant, op: job.op }]
))

@@ -57,26 +93,17 @@ }

async pollJob(ID) {
const job = await cds.tx({ tenant: t0 }, tx =>
tx.run(SELECT.one.from(Jobs).where({ ID }))
const job = await t0_(
SELECT.one.from(Jobs, j => { j.ID, j.op, j.error, j.status, j.tasks(t => { t.ID, t.status, t.tenant, t.error } )}).where({ ID })
)
if (!job) cds.error(`No job found for ID ${ID}`, { status: 404 })
let tenants
if (job.status ?? job.STATUS in { FINISHED: 1, FAILED: 1 }) {
tenants = Object.fromEntries((await cds.tx({ tenant: t0 }, tx =>
tx.run(SELECT.from(Tasks).where({ job_ID: job.ID }))
)).map(task => [task.tenant ?? task.TENANT, {
status: task.status ?? task.STATUS,
error: task.error ?? task.ERROR ?? undefined
}]))
}
return {
status: job.status ?? job.STATUS,
op: job.op ?? job.OP,
tenants
}
job.tasks.sort((a, b) => a.tenant.localeCompare(b.tenant)) // REVISIT: Ideally j.tasks supports orderBy
job.tenants = Object.fromEntries(job.tasks.map(task => [task.tenant ?? task.TENANT, {
status: task.status ?? task.STATUS,
error: task.error ?? task.ERROR ?? undefined
}]))
return job
}
async pollTask(ID) {
const task = await cds.tx({ tenant: t0 }, tx =>
tx.run(SELECT.one.from(Tasks).where({ ID }))
)
const task = await t0_(SELECT.one.from(Tasks).where({ ID }))
return {

@@ -93,5 +120,7 @@ status: task.status ?? task.STATUS,

for (const payload of payloads) {
const { ID, tenant } = payload
if (asTask) await t0_(UPDATE(Tasks, { ID, tenant }).with({ status: RUNNING }))
const execute = asTask ? _nextTask(payload, fn(payload)) : fn(payload)
all.push(execute)
const executeAndRemove = execute.catch(() => {}).finally(() => pending.splice(pending.indexOf(executeAndRemove), 1))
const executeAndRemove = execute.catch(LOG.error).finally(() => pending.splice(pending.indexOf(executeAndRemove), 1))
pending.push(executeAndRemove)

@@ -105,2 +134,26 @@ if (pending.length >= limit) {

async function pickJob() {
if (jobQueue.size === 0) return
const next = new Set(jobQueue.peek().clusters.flat().map(t => t.tenant).flat())
// Later, for scaled instances. Requires Redis/heartbeat messages to prevent starvation, though.
// const running = await _run(SELECT.one.from(Jobs).where ({ status: RUNNING }))
// (!running) pickJob()
const running = runningJobs.map(j => j.clusters.flat().map(t => t.tenant)).flat()
if (running.some(t => next.has(t))) return
try {
const job = jobQueue.dequeue()
runningJobs.push(job)
const { job_ID, clusters, fn, onJobDone } = job
await t0_(UPDATE(Jobs, { ID: job_ID }).with({ status: RUNNING }))
await _nextJob(clusters, fn, onJobDone).catch(err =>
LOG.error('@sap/cds-mtxs JobsService error while running job:', err)
)
} finally {
runningJobs.splice(runningJobs.findIndex(j => j.job_ID === next.job_ID), 1)
}
setImmediate(pickJob)
}
async function _nextJob(clusters, fn, onJobDone) {

@@ -113,19 +166,11 @@ if (clusters.length > 1) {

const { job_ID } = clusters.flat()[0] // all tasks have the same job ID -> just take the first
const failed = await retry(() => cds.tx({ tenant: t0 }, tx =>
tx.run(SELECT.one.from(Tasks).where ({ job_ID, and: { status: FAILED }}))
), LOG)
const running = await retry(() => cds.tx({ tenant: t0 }, tx =>
tx.run(SELECT.one.from(Tasks).where ({ job_ID, and: { status: RUNNING }}))
), LOG)
const { job_ID } = clusters[0][0] // all tasks have the same job ID -> just take the first
const failed = await t0_(SELECT.one.from(Tasks).where ({ job_ID, and: { status: FAILED }}))
const running = await t0_(SELECT.one.from(Tasks).where ({ job_ID, and: { status: RUNNING }}))
if (failed) {
await retry(() => cds.tx({ tenant: t0 }, tx =>
tx.run(UPDATE(Jobs, { ID: job_ID }).with({ status: FAILED }))
), LOG)
await t0_(UPDATE(Jobs, { ID: job_ID }).with({ status: FAILED }))
if (onJobDone) await onJobDone(failed.error ?? failed.ERROR)
} else if (!running) {
await retry(() => cds.tx({ tenant: t0 }, tx =>
tx.run(UPDATE(Jobs, { ID: job_ID }).with({ status: FINISHED }))
), LOG)
await t0_(UPDATE(Jobs, { ID: job_ID }).with({ status: FINISHED }))
if (onJobDone) await onJobDone()

@@ -139,14 +184,10 @@ }

try {
return await _fn
await _fn
} catch (e) {
LOG.error(e)
hasErrored = true
await retry(() => cds.tx({ tenant: t0 }, tx =>
tx.run(UPDATE(Tasks, { ID, tenant }).with({ status: FAILED, error: e.message }))
), LOG)
await t0_(UPDATE(Tasks, { ID, tenant }).with({ status: FAILED, error: e.message }))
} finally {
if (!hasErrored) {
await retry(() => cds.tx({ tenant: t0 }, tx =>
tx.run(UPDATE(Tasks, { ID, tenant }).with({ status: FINISHED }))
), LOG)
await t0_(UPDATE(Tasks, { ID, tenant }).with({ status: FINISHED }))
}

@@ -161,4 +202,3 @@ }

const cutoff = new Date(new Date - (cds.env.requires.multitenancy.jobCleanupAge ?? 1000 * 60 * 60 * 24)) // a day
await cds.tx({ tenant: t0 },
tx => tx.run(DELETE.from(Jobs, { status: FAILED, or: { status: FINISHED, and: { createdAt: { '<': cutoff.toISOString() }}}}))
await t0_(DELETE.from(Jobs, { status: FAILED, or: { status: FINISHED, and: { createdAt: { '<': cutoff.toISOString() }}}})
)

@@ -171,5 +211,3 @@ }, cds.env.requires.multitenancy.jobCleanupInterval ?? 1000 * 60 * 60 * 24) // once a day

const cutoff = new Date(new Date - (cds.env.requires.multitenancy.jobCleanupAgeStale ?? 1000 * 60 * 60 * 24 * 7)) // a week
await cds.tx({ tenant: t0 },
tx => tx.run(DELETE.from(Jobs, { createdAt: { '<': cutoff.toISOString() }}))
)
await t0_(DELETE.from(Jobs, { createdAt: { '<': cutoff.toISOString() }}))
}, cds.env.requires.multitenancy.jobCleanupIntervalStale ?? 1000 * 60 * 60 * 24 * 7) // once a week

@@ -179,1 +217,14 @@ jobCleanupStale.unref()

}
// Later, for blue-green deployment support
// if (cds.requires.multitenancy?.jobs?.restartCancelled && cds.services['cds.xt.DeploymentService'].lazyT0 !== true) {
// pickJob()
// }
// cds.once('shutdown', async () => {
// if (runningJobs) {
// const jobIDs = runningJobs.map(j => j.job_ID)
// await _run(UPDATE(Jobs, { status: RUNNING }).with({ status: CANCELLED }).where({ ID: { in: jobIDs } }))
// await _run(UPDATE(Tasks, { status: RUNNING }).with({ status: CANCELLED }).where({ job_ID: { in: jobIDs } }))
// }
// })
const cds = require('@sap/cds/lib')
const { t0_ } = require('../../lib/utils')
const Tenants = 'cds.xt.Tenants'

@@ -7,7 +8,8 @@ const LOG = cds.log('mtx')

const t0 = cds.env.requires.multitenancy?.t0 ?? 't0'
// Add database-agnostic metadata handlers to DeploymentService...
cds.on ('serving:cds.xt.DeploymentService', ds => {
const t0 = cds.env.requires.multitenancy?.t0 ?? 't0'
const { lazyT0 } = cds.env.requires['cds.xt.DeploymentService'] ?? cds.env.requires.multitenancy ?? {}
const lazyT0 = cds.env.requires['cds.xt.DeploymentService']?.lazyT0 ?? cds.env.requires.multitenancy?.lazyT0

@@ -20,3 +22,2 @@ ds.before ('*', req => {

ds.before ('subscribe', req => {
const { lazyT0 } = cds.env.requires['cds.xt.DeploymentService'] ?? cds.env.requires.multitenancy ?? {}
if (lazyT0 && req.data.tenant !== cds.env.requires.multitenancy.t0) {

@@ -28,11 +29,12 @@ return _resubscribeT0IfNeeded(req.data.options?._)

ds.after ('subscribe', async (_, req) => {
// REVISIT: Use UPSERT instead
const { tenant, metadata } = req.data
if (tenant === t0) return
try {
await cds.tx({ tenant: t0 }, tx =>
tx.run(INSERT.into(Tenants, { ID: tenant, metadata: JSON.stringify(metadata) }))
)
// can't use UPSERT here so @cds.on.insert still works for createdAt
await t0_(INSERT.into(Tenants, { ID: tenant, metadata: JSON.stringify(metadata) }))
} catch (e) {
if (e.message !== 'ENTITY_ALREADY_EXISTS') throw e
if (e.message === 'ENTITY_ALREADY_EXISTS') {
await t0_(UPSERT.into(Tenants, { ID: tenant, metadata: JSON.stringify(metadata) }))
} else throw e
}

@@ -44,5 +46,3 @@ LOG.info(`subscribed tenant ${tenant}`)

const { tenant } = req.data
if (tenant !== t0) await cds.tx({ tenant: t0 }, tx =>
tx.run(DELETE.from(Tenants).where({ ID: tenant }))
)
if (tenant !== t0) await t0_(DELETE.from(Tenants).where({ ID: tenant }))
LOG.info(`unsubscribed tenant ${tenant}`)

@@ -53,6 +53,6 @@ })

function _getT0JobsTableName(csn) {
function _getT0TenantsTableName(csn) {
return cds.requires.db.kind === 'hana' ?
getArtifactCdsPersistenceName('cds.xt.Jobs', cds.env.sql.names || 'plain', csn, 'hana')
: 'cds_xt_Jobs'
getArtifactCdsPersistenceName('cds.xt.Tenants', cds.env.sql.names || 'plain', csn, 'hana')
: 'cds_xt_Tenants'
}

@@ -66,4 +66,4 @@

const csn = await cds.load(`${__dirname}/../../db/t0.cds`)
const columns = await tx.getColumns(t0, _getT0JobsTableName(csn), params)
const needsT0Redeployment = !columns.includes('error') && !columns.includes('ERROR')
const columns = await tx.getColumns(t0, _getT0TenantsTableName(csn), params)
const needsT0Redeployment = !columns.includes('createdAt') && !columns.includes('CREATEDAT')
if (!needsT0Redeployment) return

@@ -70,0 +70,0 @@

@@ -93,10 +93,14 @@ const cds = require('@sap/cds/lib'), {db} = cds.env.requires

const bindParams = { ...bindParamsFromEnv, ...bindParamsFromTenantOptions, ...params?.hdi?.bind }
return {
provisioning_parameters: { ..._encryptionParams(metadata), ...createParams },
binding_parameters: { ...bindParams }
}
const final = {}
const provisioningParams = { ..._encryptionParams(metadata), ...createParams }
if (Object.keys(provisioningParams).length > 0) final.provisioning_parameters = provisioningParams
if (Object.keys(bindParams).length > 0) final.binding_parameters = bindParams
return Object.keys(final).length > 0 ? final : null
}
function _encryptionParams(data) {
return data ? {
return data?.globalAccountGUID ? {
subscriptionContext: {

@@ -285,3 +289,3 @@ // crmId: '',

}
container = await retry(() => checkAndRefreshCredentials(container, tenant), LOG)
container = await retry(() => checkAndRefreshCredentials(container, tenant))
}

@@ -288,0 +292,0 @@ return await ds.deploy({ tenant, options: { container, out, _: params } })

@@ -17,6 +17,6 @@ const https = require('https')

async function create(tenant, parameters = {}) {
LOG.info('creating HDI container for', { tenant }, 'with', { ...parameters })
async function create(tenant, parameters) {
LOG.info('creating HDI container for', { tenant }, ...(parameters ? ['with', { ...parameters }] : []))
const name = await _instanceName4(tenant), service_plan_id = await _planId()
const { binding_parameters, provisioning_parameters } = parameters
const { binding_parameters, provisioning_parameters } = parameters ?? {}
let _instance, service_instance_id

@@ -23,0 +23,0 @@ try {

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc