@f5devcentral/f5-cloud-libs-azure
Advanced tools
Comparing version 2.2.0 to 2.3.0
{ | ||
"name": "@f5devcentral/f5-cloud-libs-azure", | ||
"version": "2.2.0", | ||
"version": "2.3.0", | ||
"description": "Azure implementation of f5-cloud-libs cloud provider code", | ||
@@ -32,3 +32,3 @@ "keywords": [ | ||
"peerDependencies": { | ||
"@f5devcentral/f5-cloud-libs": "^4.2.0-beta.1" | ||
"@f5devcentral/f5-cloud-libs": "^4.3.0-beta.1" | ||
}, | ||
@@ -43,6 +43,6 @@ "devDependencies": { | ||
"lint": "node node_modules/eslint/bin/eslint lib scripts", | ||
"package": "./package.sh", | ||
"package": "bash ./package.sh", | ||
"test": "node node_modules/nodeunit/bin/nodeunit --reporter eclipse test test/lib", | ||
"version": "./setVersion.sh $npm_package_version && git add ." | ||
"version": "bash ./setVersion.sh $npm_package_version && git add ." | ||
} | ||
} |
@@ -20,2 +20,3 @@ #!/usr/bin/env node | ||
const Logger = require('@f5devcentral/f5-cloud-libs').logger; | ||
const localCryptoUtil = require('@f5devcentral/f5-cloud-libs').localCryptoUtil; | ||
@@ -31,11 +32,12 @@ /** | ||
.option('--log-level [type]', 'Specify the Log Level', 'info') | ||
.option('--config-file [type]', 'Specify the configuration file', '/config/cloud/.azCredentials') | ||
.option('--log-file [type]', 'Specify log file location', '/var/log/cloud/azure/appInsightsApiKey.log') | ||
.parse(process.argv); | ||
const logFile = '/var/log/cloud/azure/appInsightsApiKey.log'; | ||
const loggerOptions = { logLevel: options.logLevel, fileName: logFile, console: true }; | ||
const loggerOptions = { logLevel: options.logLevel, fileName: options.logFile, console: true }; | ||
const logger = Logger.getLogger(loggerOptions); | ||
let credentialsFile; | ||
if (fs.existsSync('/config/cloud/.azCredentials')) { | ||
credentialsFile = JSON.parse(fs.readFileSync('/config/cloud/.azCredentials', 'utf8')); | ||
let configFile; | ||
if (fs.existsSync(options.configFile)) { | ||
configFile = fs.readFileSync(options.configFile, 'utf8'); | ||
} else { | ||
@@ -46,58 +48,69 @@ logger.error('Credentials file not found'); | ||
const subscriptionId = credentialsFile.subscriptionId; | ||
const clientId = credentialsFile.clientId; | ||
const tenantId = credentialsFile.tenantId; | ||
const secret = credentialsFile.secret; | ||
const resourceGroupName = credentialsFile.resourceGroupName; | ||
const appInsightsResourceName = credentialsFile.appInsightsName; | ||
const appInsightsId = credentialsFile.appInsightsId; | ||
let subscriptionId; | ||
let resourceGroupName; | ||
let appInsightsResourceName; | ||
let appInsightsId; | ||
let client; | ||
localCryptoUtil.symmetricDecryptPassword(configFile) | ||
.then((data) => { | ||
configFile = JSON.parse(data); | ||
subscriptionId = configFile.subscriptionId; | ||
resourceGroupName = configFile.resourceGroupName; | ||
appInsightsResourceName = configFile.appInsightsName; | ||
appInsightsId = configFile.appInsightsId; | ||
const clientId = configFile.clientId; | ||
const tenantId = configFile.tenantId; | ||
const secret = configFile.secret; | ||
const credentials = new msRestAzure.ApplicationTokenCredentials(clientId, tenantId, secret); | ||
const client = new AppInsights(credentials, subscriptionId); | ||
const credentials = new msRestAzure.ApplicationTokenCredentials(clientId, tenantId, secret); | ||
client = new AppInsights(credentials, subscriptionId); | ||
logger.info('App Insights ID:', appInsightsId); | ||
logger.info('App Insights ID:', appInsightsId); | ||
/** | ||
* Check if operation is create, delete or list and act accordingly | ||
*/ | ||
if (options.keyOperation === 'create') { | ||
Promise.all([ | ||
createAppInsightApiKey(resourceGroupName, appInsightsResourceName) | ||
]) | ||
.then((results) => { | ||
const response = results[0]; | ||
response.appInsightsId = appInsightsId; | ||
logger.info('Response:', response); | ||
logger.debug('API Key Name:', response.name); | ||
logger.debug('API Key ID:', response.id.split('/apikeys/')[1]); | ||
logger.debug('API Key:', response.apiKey); | ||
}) | ||
.catch((err) => { | ||
logger.error('Error:', err); | ||
}); | ||
} else if (options.keyOperation === 'delete') { | ||
Promise.all([ | ||
deleteAppInsightApiKey(resourceGroupName, appInsightsResourceName, options.keyId) | ||
]) | ||
.then((results) => { | ||
logger.info('Delete Response:', results[0]); | ||
}) | ||
.catch((err) => { | ||
logger.error('Error:', err); | ||
}); | ||
} | ||
if (options.keyOperation === 'list' || options.logLevel === 'debug' || options.logLevel === 'silly') { | ||
Promise.all([ | ||
listAppInsightInstances(), | ||
listAppInsightApiKeys(resourceGroupName, appInsightsResourceName) | ||
]) | ||
.then((results) => { | ||
logger.info('List of App Insight components:', results[0]); | ||
logger.info('List of API keys:', results[1]); | ||
}) | ||
.catch((err) => { | ||
logger.error('Error:', err); | ||
}); | ||
} | ||
/** | ||
* Check if operation is create, delete or list and act accordingly | ||
*/ | ||
if (options.keyOperation === 'create') { | ||
q.all([ | ||
createAppInsightApiKey(resourceGroupName, appInsightsResourceName) | ||
]) | ||
.then((results) => { | ||
const response = results[0]; | ||
response.appInsightsId = appInsightsId; | ||
logger.info('Response:', response); | ||
logger.debug('API Key Name:', response.name); | ||
logger.debug('API Key ID:', response.id.split('/apikeys/')[1]); | ||
logger.debug('API Key:', response.apiKey); | ||
}) | ||
.catch((err) => { | ||
logger.error('Error:', err); | ||
}); | ||
} else if (options.keyOperation === 'delete') { | ||
q.all([ | ||
deleteAppInsightApiKey(resourceGroupName, appInsightsResourceName, options.keyId) | ||
]) | ||
.then((results) => { | ||
logger.info('Delete Response:', results[0]); | ||
}) | ||
.catch((err) => { | ||
logger.error('Error:', err); | ||
}); | ||
} else if (options.keyOperation === 'list') { | ||
q.all([ | ||
listAppInsightInstances(), | ||
listAppInsightApiKeys(resourceGroupName, appInsightsResourceName) | ||
]) | ||
.then((results) => { | ||
logger.info('List of App Insight components:', results[0]); | ||
logger.info('List of API keys:', results[1]); | ||
}) | ||
.catch((err) => { | ||
logger.error('Error:', err); | ||
}); | ||
} | ||
}) | ||
.catch((err) => { | ||
logger.error('Error:', err); | ||
}); | ||
@@ -139,7 +152,7 @@ /** | ||
function deleteAppInsightApiKey(rgName, resourceName, keyId) { | ||
const deferred = q.defer(); | ||
if (keyId === null || keyId === undefined) { | ||
throw new Error('keyId cannot be null or undefined when delete has been specified'); | ||
deferred.reject(new Error('keyId cannot be null or undefined when delete has been specified')); | ||
} | ||
const deferred = q.defer(); | ||
client.aPIKeys.deleteMethod(rgName, resourceName, keyId, (err, data) => { | ||
@@ -146,0 +159,0 @@ if (err) { |
@@ -16,2 +16,3 @@ #!/usr/bin/env node | ||
const options = require('commander'); | ||
const q = require('q'); | ||
const appInsights = require('applicationinsights'); | ||
@@ -30,6 +31,6 @@ const Logger = require('@f5devcentral/f5-cloud-libs').logger; | ||
.option('--mgmt-port [type]', 'Specify the BIG-IP mgmt port', '8443') | ||
.option('--log-file [type]', 'Specify the log file location', '/var/log/cloud/azure/metricsCollector.log') | ||
.parse(process.argv); | ||
const logFile = '/var/log/cloud/azure/azureMetricsCollector.log'; | ||
const loggerOptions = { logLevel: options.logLevel, fileName: logFile, console: true }; | ||
const loggerOptions = { logLevel: options.logLevel, fileName: options.logFile, console: true }; | ||
const logger = Logger.getLogger(loggerOptions); | ||
@@ -66,18 +67,19 @@ this.logger = logger; | ||
.then(() => { | ||
Promise.all([ | ||
return q.all([ | ||
bigip.list('/tm/sys/tmm-info/stats'), | ||
bigip.list('/tm/sys/traffic/stats'), | ||
]) | ||
.then((results) => { | ||
const cpuMetricValue = calcTmmCpu(results[0].entries); | ||
logger.debug(`Metric Name: ${cpuMetricName} Metric Value: ${cpuMetricValue}`); | ||
client.trackMetric(cpuMetricName, cpuMetricValue); | ||
]); | ||
}) | ||
.then((results) => { | ||
const cpuMetricValue = calcTmmCpu(results[0].entries); | ||
logger.debug(`Metric Name: ${cpuMetricName} Metric Value: ${cpuMetricValue}`); | ||
client.trackMetric(cpuMetricName, cpuMetricValue); | ||
const trafficMetricValue = calcTraffic(results[1].entries); | ||
logger.debug(`Metric Name: ${trafficMetricName} Metric Value: ${trafficMetricValue}`); | ||
client.trackMetric(trafficMetricName, trafficMetricValue); | ||
}) | ||
.catch((err) => { | ||
logger.error(err); | ||
}); | ||
const trafficMetricValue = calcTraffic(results[1].entries); | ||
logger.debug(`Metric Name: ${trafficMetricName} Metric Value: ${trafficMetricValue}`); | ||
client.trackMetric(trafficMetricName, trafficMetricValue); | ||
}) | ||
.catch((err) => { | ||
const error = err.message ? err.message : err; | ||
logger.error(error); | ||
}); | ||
@@ -84,0 +86,0 @@ |
@@ -16,2 +16,3 @@ #!/usr/bin/env node | ||
const util = f5CloudLibs.util; | ||
const localCryptoUtil = f5CloudLibs.localCryptoUtil; | ||
@@ -25,6 +26,7 @@ /** | ||
.option('--log-level [type]', 'Specify the log level', 'info') | ||
.option('--config-file [type]', 'Specify the configuration file', '/config/cloud/.azCredentials') | ||
.option('--log-file [type]', 'Specify the log file', '/var/log/cloud/azure/failover.log') | ||
.parse(process.argv); | ||
const logFile = '/var/log/cloud/azure/azureFailover.log'; | ||
const loggerOptions = { logLevel: options.logLevel, fileName: logFile, console: true }; | ||
const loggerOptions = { logLevel: options.logLevel, fileName: options.logFile, console: true }; | ||
const logger = Logger.getLogger(loggerOptions); | ||
@@ -34,66 +36,17 @@ const BigIp = f5CloudLibs.bigIp; | ||
let uniqueLabel; | ||
let resourceGroup; | ||
let networkClient; | ||
let routeFilter; | ||
let storageAccount; | ||
let storageKey; | ||
let storageClient; | ||
if (fs.existsSync('/config/cloud/.azCredentials')) { | ||
logger.debug('Credentials file found'); | ||
const credentialsFile = JSON.parse(fs.readFileSync('/config/cloud/.azCredentials', 'utf8')); | ||
const clientId = credentialsFile.clientId; | ||
const secret = credentialsFile.secret; | ||
const subscriptionId = credentialsFile.subscriptionId; | ||
const tenantId = credentialsFile.tenantId; | ||
let location = credentialsFile.location; | ||
uniqueLabel = credentialsFile.uniqueLabel; | ||
resourceGroup = credentialsFile.resourceGroupName; | ||
// Detect environment based on location (region), default to Azure | ||
let environment = azureEnvironment.Azure; | ||
if (location) { | ||
location = location.toLowerCase(); | ||
logger.silly(`Location: ${location}`); | ||
// Azure US Government cloud regions: US DoD Central, US DoD East, US Gov Arizona, | ||
// US Gov Iowa, US Gov Non-Regional, US Gov Texas, US Gov Virginia, US Sec East1, US Sec Wes | ||
if (location.includes('usgov') || location.includes('usdod') || location.includes('ussec')) { | ||
environment = azureEnvironment.AzureUSGovernment; | ||
// Azure China cloud regions: China East, China North | ||
} else if (location.includes('china')) { | ||
environment = azureEnvironment.AzureChina; | ||
// Azure Germany cloud regions: Germany Central, Germany Non-Regional, Germany Northeast | ||
// Note: There is Azure commercial cloud regions in germany so have to be specific | ||
} else if (location.includes('germanycentral') || location.includes('germanynortheast') || | ||
location.includes('germanynonregional')) { | ||
environment = azureEnvironment.AzureGermanCloud; | ||
} | ||
} | ||
storageAccount = credentialsFile.storageAccount; | ||
storageKey = credentialsFile.storageKey; | ||
storageClient = azureStorage.createBlobService( | ||
storageAccount, | ||
storageKey, | ||
`${storageAccount}.blob${environment.storageEndpointSuffix}` | ||
); | ||
const credentials = new msRestAzure.ApplicationTokenCredentials( | ||
clientId, tenantId, secret, { environment } | ||
); | ||
networkClient = new NetworkManagementClient( | ||
credentials, | ||
subscriptionId, | ||
environment.resourceManagerEndpointUrl | ||
); | ||
let configFile; | ||
if (fs.existsSync(options.configFile)) { | ||
configFile = fs.readFileSync(options.configFile, 'utf8'); | ||
} else { | ||
logger.error('Credentials file not found'); | ||
logger.error('Configuration file not found'); | ||
return; | ||
} | ||
if (fs.existsSync('/config/cloud/managedRoutes')) { | ||
let routeFilter = []; | ||
const managedRoutesFile = '/config/cloud/managedRoutes'; | ||
if (fs.existsSync(managedRoutesFile)) { | ||
logger.silly('Managed routes file found'); | ||
routeFilter = | ||
fs.readFileSync('/config/cloud/managedRoutes', 'utf8').replace(/(\r\n|\n|\r)/gm, '').split(','); | ||
fs.readFileSync(managedRoutesFile, 'utf8').replace(/(\r\n|\n|\r)/gm, '').split(','); | ||
} else { | ||
routeFilter = []; | ||
logger.info('Managed routes file not found'); | ||
@@ -107,3 +60,3 @@ } | ||
const FAILOVER_STATUS_RUN = 'running'; | ||
const MAX_RUNNING_TASK_MS = 5 * 60000; // 5 minutes | ||
const MAX_RUNNING_TASK_MS = 10 * 60000; // 10 minutes | ||
let tgStats = []; | ||
@@ -115,3 +68,3 @@ let globalSettings = []; | ||
// Define base properties of failover database in storage | ||
let failoverDbBlob = { | ||
let failoverDb = { | ||
status: '', | ||
@@ -126,2 +79,12 @@ timeStamp: '', | ||
}; | ||
let subscriptionId; | ||
let location; | ||
let uniqueLabel; | ||
let resourceGroup; | ||
let environment; | ||
let storageAccount; | ||
let storageKey; | ||
let storageClient; | ||
let credentials; | ||
let networkClient; | ||
@@ -131,3 +94,3 @@ const performFailover = function () { | ||
putJsonObject(storageClient, FAILOVER_CONTAINER, FAILOVER_FILE, failoverDbBlob) | ||
putJsonObject(storageClient, FAILOVER_CONTAINER, FAILOVER_FILE, failoverDb) | ||
.then(() => { | ||
@@ -149,3 +112,3 @@ return notifyStateUpdate('delete'); | ||
.then(() => { | ||
return Promise.all([ | ||
return q.all([ | ||
bigip.list('/tm/cm/traffic-group/stats'), | ||
@@ -163,3 +126,3 @@ bigip.list('/tm/sys/global-settings'), | ||
virtualAddresses = results[3]; | ||
return Promise.all([ | ||
return q.all([ | ||
listRouteTables(), | ||
@@ -171,3 +134,3 @@ listAzNics(resourceGroup), | ||
logger.info('Performing failover'); | ||
return Promise.all([ | ||
return q.all([ | ||
matchRoutes(results[0], selfIpsArr, tgStats, globalSettings), | ||
@@ -179,4 +142,4 @@ matchNics(results[1], virtualAddresses, selfIpsArr, tgStats, globalSettings), | ||
logger.silly('Updating failover database in storage'); | ||
failoverDbBlob.status = FAILOVER_STATUS_SUCCESS; | ||
return putJsonObject(storageClient, FAILOVER_CONTAINER, FAILOVER_FILE, failoverDbBlob); | ||
failoverDb.status = FAILOVER_STATUS_SUCCESS; | ||
return putJsonObject(storageClient, FAILOVER_CONTAINER, FAILOVER_FILE, failoverDb); | ||
}) | ||
@@ -188,4 +151,4 @@ .then(() => { | ||
.catch((err) => { | ||
failoverDbBlob.status = FAILOVER_STATUS_FAIL; | ||
putJsonObject(storageClient, FAILOVER_CONTAINER, FAILOVER_FILE, failoverDbBlob) | ||
failoverDb.status = FAILOVER_STATUS_FAIL; | ||
putJsonObject(storageClient, FAILOVER_CONTAINER, FAILOVER_FILE, failoverDb) | ||
.then(() => { | ||
@@ -199,5 +162,52 @@ logger.error('Error during failover:', err); | ||
storageInit(storageClient) | ||
q.all([ | ||
localCryptoUtil.symmetricDecryptPassword(configFile) | ||
]) | ||
.then((results) => { | ||
configFile = JSON.parse(results[0]); | ||
subscriptionId = configFile.subscriptionId; | ||
location = configFile.location; | ||
uniqueLabel = configFile.uniqueLabel; | ||
resourceGroup = configFile.resourceGroupName; | ||
// Detect environment based on location (region), default to Azure | ||
environment = azureEnvironment.Azure; | ||
if (location) { | ||
location = location.toLowerCase(); | ||
logger.silly(`Location: ${location}`); | ||
// Azure US Government cloud regions: US DoD Central, US DoD East, US Gov Arizona, | ||
// US Gov Iowa, US Gov Non-Regional, US Gov Texas, US Gov Virginia, US Sec East1, US Sec Wes | ||
if (location.includes('usgov') || location.includes('usdod') || location.includes('ussec')) { | ||
environment = azureEnvironment.AzureUSGovernment; | ||
// Azure China cloud regions: China East, China North | ||
} else if (location.includes('china')) { | ||
environment = azureEnvironment.AzureChina; | ||
// Azure Germany cloud regions: Germany Central, Germany Non-Regional, Germany Northeast | ||
// Note: There is Azure commercial cloud regions in germany so have to be specific | ||
} else if (location.includes('germanycentral') || location.includes('germanynortheast') || | ||
location.includes('germanynonregional')) { | ||
environment = azureEnvironment.AzureGermanCloud; | ||
} | ||
} | ||
storageAccount = configFile.storageAccount; | ||
storageKey = configFile.storageKey; | ||
storageClient = azureStorage.createBlobService( | ||
storageAccount, | ||
storageKey, | ||
`${storageAccount}.blob${environment.storageEndpointSuffix}` | ||
); | ||
credentials = new msRestAzure.ApplicationTokenCredentials( | ||
configFile.clientId, configFile.tenantId, configFile.secret, { environment } | ||
); | ||
networkClient = new NetworkManagementClient( | ||
credentials, | ||
subscriptionId, | ||
environment.resourceManagerEndpointUrl | ||
); | ||
return storageInit(storageClient); | ||
}) | ||
.then(() => { | ||
// Avoid the case where multiple tgactive scripts triggered | ||
// Avoid the case where multiple tgactive/tgrefresh scripts are triggered | ||
// within a short time frame may stomp on each other | ||
@@ -210,23 +220,19 @@ return notifyStateUpdate('check'); | ||
.then((results) => { | ||
failoverDbBlob = results; | ||
failoverDb = results; | ||
logger.debug('Failover database status:', failoverDbBlob.status); | ||
if (failoverDbBlob.status === FAILOVER_STATUS_RUN || failoverDbBlob.status === FAILOVER_STATUS_FAIL) { | ||
if (typeof failoverDbBlob.timeStamp !== 'undefined' && failoverDbBlob.timeStamp !== '') { | ||
const differenceInMs = new Date() - Date.parse(failoverDbBlob.timeStamp); | ||
logger.silly('differenceInMs:', differenceInMs, 'MAX_RUNNING_TASK_MS:', MAX_RUNNING_TASK_MS); | ||
if (differenceInMs > MAX_RUNNING_TASK_MS) { | ||
logger.info('Recovering from previous task, differenceInMs:', differenceInMs); | ||
recoverPreviousTask = true; | ||
} | ||
} | ||
// Return - except when recovering from previous incomplete or failed task | ||
if (!recoverPreviousTask) { | ||
logger.info('Already performing failover, exit'); | ||
return q(); | ||
} | ||
// If status tells us previous task is either running or failed then we need to wait | ||
logger.silly('Failover database status:', failoverDb.status); | ||
if (failoverDb.status === FAILOVER_STATUS_RUN || failoverDb.status === FAILOVER_STATUS_FAIL) { | ||
logger.info('Waiting for previous task to complete before continuing'); | ||
return processPreviousTask(); | ||
} | ||
failoverDbBlob.status = FAILOVER_STATUS_RUN; | ||
failoverDbBlob.timeStamp = new Date().toJSON(); | ||
return q(); | ||
}) | ||
.then(() => { | ||
// If recovering from previous task, log | ||
if (recoverPreviousTask) { | ||
logger.info('Recovering from previous task'); | ||
} | ||
failoverDb.status = FAILOVER_STATUS_RUN; | ||
failoverDb.timeStamp = new Date().toJSON(); | ||
return performFailover(); | ||
@@ -266,5 +272,67 @@ }) | ||
/** | ||
* Queries previous task in an interval until certain conditions are met | ||
* | ||
* @returns {Promise} A promise which will be resolved after certain conditions are met | ||
*/ | ||
function processPreviousTask() { | ||
const deferred = q.defer(); | ||
// set last task timestamp to now if it does not exist | ||
if (typeof failoverDb.timeStamp === 'undefined' || failoverDb.timeStamp === '') { | ||
failoverDb.timeStamp = new Date().toJSON(); | ||
} | ||
const i = setInterval(run, 5000); | ||
function run() { | ||
return new Promise( | ||
((resolve, reject) => { | ||
const differenceInMs = new Date() - Date.parse(failoverDb.timeStamp); | ||
getJsonObject(storageClient, FAILOVER_CONTAINER, FAILOVER_FILE) | ||
.then((data) => { | ||
// If previous task reports success we are fine to perform failover | ||
logger.silly('status: ', data.status); | ||
if (data.status === FAILOVER_STATUS_SUCCESS) { | ||
logger.info('Previous task completed, continuing'); | ||
clearInterval(i); | ||
deferred.resolve(); | ||
} | ||
// If previous task reports failure we should attempt to recover immediately | ||
if (data.status === FAILOVER_STATUS_FAIL) { | ||
logger.info('Previous task failed, recovering'); | ||
recoverPreviousTask = true; | ||
clearInterval(i); | ||
deferred.resolve(); | ||
} | ||
// If maximum allowed time has gone by without task succeeding, set | ||
// recover flag and perform failover | ||
logger.silly('differenceInMs: ', differenceInMs, MAX_RUNNING_TASK_MS); | ||
if (differenceInMs > MAX_RUNNING_TASK_MS) { | ||
logger.info('Recovering from previous task, differenceInMs: ', differenceInMs); | ||
recoverPreviousTask = true; | ||
clearInterval(i); | ||
deferred.resolve(); | ||
} | ||
// simply resolve if done with chain | ||
resolve(); | ||
}) | ||
.catch((error) => { | ||
logger.error('Error: ', error); | ||
clearInterval(i); | ||
deferred.reject(error); | ||
reject(error); | ||
}); | ||
}) | ||
); | ||
} | ||
return deferred.promise; | ||
} | ||
/** | ||
* Creates local notification to alert other processes that state is being updated | ||
* | ||
* @param {String} action - Action to take for local notification function | ||
* @param {String} action - Action to take for local notification | ||
* | ||
@@ -286,7 +354,7 @@ * @returns {Promise} A promise which will be resolved after state update actions taken | ||
let ctr = 30; | ||
const iObj = setInterval(() => { | ||
const i = setInterval(() => { | ||
if (!fs.existsSync(stateFile)) { | ||
fs.writeFileSync(stateFile, stateFileContents, 'utf8'); | ||
deferred.resolve(); | ||
clearInterval(iObj); | ||
clearInterval(i); | ||
} else { | ||
@@ -298,3 +366,3 @@ logger.silly('State file exists, retrying after sleep:', ctr); | ||
deferred.reject(new Error('State file still exists after retry period expired:', stateFile)); | ||
clearInterval(iObj); | ||
clearInterval(i); | ||
} | ||
@@ -520,2 +588,4 @@ }, 1000); | ||
function matchNics(nics, vs, selfIps, tgs, global) { | ||
const deferred = q.defer(); | ||
let h; | ||
@@ -742,6 +812,6 @@ let i; | ||
// Replace current configuration with previous desired configuration from failover database | ||
if (failoverDbBlob.desiredConfiguration.nicArr.disassociateArr && | ||
failoverDbBlob.desiredConfiguration.nicArr.associateArr) { | ||
disassociateArr = failoverDbBlob.desiredConfiguration.nicArr.disassociateArr; | ||
associateArr = failoverDbBlob.desiredConfiguration.nicArr.associateArr; | ||
if (failoverDb.desiredConfiguration.nicArr.disassociateArr && | ||
failoverDb.desiredConfiguration.nicArr.associateArr) { | ||
disassociateArr = failoverDb.desiredConfiguration.nicArr.disassociateArr; | ||
associateArr = failoverDb.desiredConfiguration.nicArr.associateArr; | ||
} | ||
@@ -751,11 +821,10 @@ } | ||
if (disassociateArr && disassociateArr.length && associateArr && associateArr.length) { | ||
failoverDbBlob.desiredConfiguration.nicArr.disassociateArr = disassociateArr; | ||
failoverDbBlob.desiredConfiguration.nicArr.associateArr = associateArr; | ||
failoverDb.desiredConfiguration.nicArr.disassociateArr = disassociateArr; | ||
failoverDb.desiredConfiguration.nicArr.associateArr = associateArr; | ||
} | ||
const associateDeferred = q.defer(); | ||
putJsonObject(storageClient, FAILOVER_CONTAINER, FAILOVER_FILE, failoverDbBlob) | ||
putJsonObject(storageClient, FAILOVER_CONTAINER, FAILOVER_FILE, failoverDb) | ||
.then(() => { | ||
const disassociatePromises = disassociateArr.map(retrier.bind(null, updateNics)); | ||
return Promise.all(disassociatePromises); | ||
return q.all(disassociatePromises); | ||
}) | ||
@@ -765,13 +834,13 @@ .then(() => { | ||
const associatePromises = associateArr.map(retrier.bind(null, updateNics)); | ||
return Promise.all(associatePromises); | ||
return q.all(associatePromises); | ||
}) | ||
.then(() => { | ||
logger.info('Associate NICs successful.'); | ||
associateDeferred.resolve(); | ||
deferred.resolve(); | ||
}) | ||
.catch((error) => { | ||
logger.error('Error: ', error); | ||
associateDeferred.reject(error); | ||
deferred.reject(error); | ||
}); | ||
return associateDeferred.promise; | ||
return deferred.promise; | ||
} | ||
@@ -798,3 +867,3 @@ | ||
} else { | ||
putJsonObject(storageClient, FAILOVER_CONTAINER, FAILOVER_FILE, failoverDbBlob) | ||
putJsonObject(storageClient, FAILOVER_CONTAINER, FAILOVER_FILE, failoverDb) | ||
.then(() => { | ||
@@ -801,0 +870,0 @@ deferred.resolve(); |
@@ -16,5 +16,7 @@ #!/usr/bin/env node | ||
const NetworkManagementClient = require('azure-arm-network'); | ||
const Logger = require('@f5devcentral/f5-cloud-libs').logger; | ||
const BigIp = require('@f5devcentral/f5-cloud-libs').bigIp; | ||
const f5CloudLibs = require('@f5devcentral/f5-cloud-libs'); | ||
const localCryptoUtil = require('@f5devcentral/f5-cloud-libs').localCryptoUtil; | ||
const Logger = f5CloudLibs.logger; | ||
const BigIp = f5CloudLibs.bigIp; | ||
@@ -30,10 +32,12 @@ /** | ||
.option('--log-level [type]', 'Specify the Log Level', 'info') | ||
.option('--config-file [type]', 'Specify the configuration file', '/config/cloud/.azCredentials') | ||
.option('--log-file [type]', 'Specify the log file location', '/var/log/cloud/azure/scaleSet.log') | ||
.parse(process.argv); | ||
const logFile = '/var/log/cloud/azure/azureScaleSet.log'; | ||
const logger = Logger.getLogger({ logLevel: options.logLevel, fileName: logFile, console: true }); | ||
const logger = Logger.getLogger({ logLevel: options.logLevel, fileName: options.logFile, console: true }); | ||
const bigip = new BigIp({ logger }); | ||
let credentialsFile; | ||
if (fs.existsSync('/config/cloud/.azCredentials')) { | ||
credentialsFile = JSON.parse(fs.readFileSync('/config/cloud/.azCredentials', 'utf8')); | ||
let configFile; | ||
if (fs.existsSync(options.configFile)) { | ||
configFile = fs.readFileSync(options.configFile, 'utf8'); | ||
} else { | ||
@@ -44,36 +48,41 @@ logger.info('Credentials file not found'); | ||
const subscriptionId = credentialsFile.subscriptionId; | ||
const clientId = credentialsFile.clientId; | ||
const tenantId = credentialsFile.tenantId; | ||
const secret = credentialsFile.secret; | ||
const resourceGroupName = credentialsFile.resourceGroupName; | ||
const vmssName = credentialsFile.vmssName; | ||
let resourceGroupName; | ||
let vmssName; | ||
let loadBalancerName; | ||
let instanceId; | ||
let inboundNatRuleBase; | ||
const loadBalancerName = credentialsFile.loadBalancerName; | ||
const instanceId = options.instanceId; | ||
const inboundNatRuleBase = options.natBase; | ||
q.all([ | ||
localCryptoUtil.symmetricDecryptPassword(configFile), | ||
bigip.init( | ||
'localhost', | ||
'svc_user', | ||
'file:///config/cloud/.passwd', | ||
{ | ||
passwordIsUrl: true, | ||
port: '8443', | ||
passwordEncrypted: true | ||
} | ||
) | ||
]) | ||
.then((results) => { | ||
configFile = JSON.parse(results[0]); | ||
const credentials = new msRestAzure.ApplicationTokenCredentials(clientId, tenantId, secret); | ||
this.networkClient = new NetworkManagementClient(credentials, subscriptionId); | ||
this.logger = logger; | ||
const bigip = new BigIp({ logger: this.logger }); | ||
resourceGroupName = configFile.resourceGroupName; | ||
vmssName = configFile.vmssName; | ||
loadBalancerName = configFile.loadBalancerName; | ||
instanceId = options.instanceId; | ||
inboundNatRuleBase = options.natBase; | ||
/** Log some basic information | ||
* Instance ID, Load Balancer Name | ||
*/ | ||
logger.debug('Instance ID:', instanceId, 'Load Balancer Name:', loadBalancerName, | ||
'VMSS Name:', vmssName); | ||
const credentials = new msRestAzure.ApplicationTokenCredentials( | ||
configFile.clientId, configFile.tenantId, configFile.secret | ||
); | ||
this.networkClient = new NetworkManagementClient(credentials, configFile.subscriptionId); | ||
bigip.init( | ||
'localhost', | ||
'svc_user', | ||
'file:///config/cloud/.passwd', | ||
{ | ||
passwordIsUrl: true, | ||
port: '8443', | ||
passwordEncrypted: true | ||
} | ||
) | ||
.then(() => { | ||
/** Log some basic information such as: Instance ID, Load Balancer Name, VMSS Name */ | ||
logger.debug('Instance ID:', instanceId, 'Load Balancer Name:', loadBalancerName, | ||
'VMSS Name:', vmssName); | ||
const promises = []; | ||
promises.push(getPublicIpFromScaleSet(this.networkClient, resourceGroupName, vmssName, instanceId)); | ||
if (loadBalancerName) { | ||
@@ -84,16 +93,15 @@ promises.push(listDeploymentALB(this.networkClient, resourceGroupName, loadBalancerName)); | ||
} | ||
promises.push(getPublicIpFromScaleSet(this.networkClient, resourceGroupName, vmssName, instanceId)); | ||
Promise.all(promises) | ||
.then((results) => { | ||
const instanceInfo = {}; | ||
instanceInfo.port = getNatRulePort(results[0], instanceId, inboundNatRuleBase); | ||
instanceInfo.publicIp = results[1]; | ||
return q.all(promises); | ||
}) | ||
.then((results) => { | ||
const instanceInfo = {}; | ||
instanceInfo.publicIp = results[0]; | ||
instanceInfo.port = getNatRulePort(results[1], instanceId, inboundNatRuleBase); | ||
logger.info('instanceInfo:', instanceInfo); | ||
}) | ||
.catch((err) => { | ||
const error = err.message ? err.message : err; | ||
logger.error(error); | ||
}); | ||
logger.info('instanceInfo:', instanceInfo); | ||
}) | ||
.catch((err) => { | ||
const error = err.message ? err.message : err; | ||
logger.error(error); | ||
}); | ||
@@ -107,3 +115,3 @@ | ||
* @param {String} resourceGroup - Name of the resource group | ||
* @param {Object} vName - Name of the VMSS | ||
* @param {String} vName - Name of the VMSS | ||
* @param {String} ir - VMSS ID | ||
@@ -110,0 +118,0 @@ * @returns {List} A list of public IP addresses |
@@ -35,2 +35,4 @@ /** | ||
let authnMock; | ||
let icontrolMock; | ||
let azureMock; | ||
@@ -64,2 +66,4 @@ let azureNetworkMock; | ||
bigIpMock = require('@f5devcentral/f5-cloud-libs').bigIp; | ||
authnMock = require('@f5devcentral/f5-cloud-libs').authn; | ||
icontrolMock = require('@f5devcentral/f5-cloud-libs').iControl; | ||
@@ -69,3 +73,2 @@ AzureAutoscaleProvider = require('../../lib/azureAutoscaleProvider'); | ||
provider = new AzureAutoscaleProvider({ clOptions: { user: 'foo', password: 'bar' } }); | ||
@@ -94,13 +97,19 @@ provider.resourceGroup = 'my resource group'; | ||
setUp(callback) { | ||
const credentialsBlob = { | ||
clientId, | ||
secret, | ||
tenantId, | ||
subscriptionId, | ||
storageAccount, | ||
storageKey | ||
}; | ||
utilMock.getDataFromUrl = function getDataFromUrl() { | ||
return q(JSON.stringify({ | ||
clientId, | ||
secret, | ||
tenantId, | ||
subscriptionId, | ||
storageAccount, | ||
storageKey | ||
})); | ||
return q(JSON.stringify(credentialsBlob)); | ||
}; | ||
localCryptoUtilMock.symmetricDecryptPassword = function symmetricDecryptPassword() { | ||
return q(JSON.stringify(credentialsBlob)); | ||
}; | ||
azureMock.loginWithServicePrincipalSecret = function loginWithServicePrincipalSecret( | ||
@@ -137,24 +146,15 @@ aClientId, | ||
testAzureLoginSecretId(test) { | ||
const storedSecret = 'my secret from rest'; | ||
testAzureLoginEncrypted(test) { | ||
const providerOptions = { | ||
clientId, | ||
tenantId, | ||
subscriptionId, | ||
secretId: 'foo', | ||
scaleSet: 'myScaleSet', | ||
resourceGroup: 'myResourceGroup' | ||
resourceGroup: 'myResourceGroup', | ||
azCredentialsUrl: 'file:///foo/bar', | ||
azCredentialsEncrypted: true | ||
}; | ||
localCryptoUtilMock.decryptDataFromRestStorage = function decryptDataFromRestStorage() { | ||
return q( | ||
{ | ||
servicePrincipalSecret: storedSecret | ||
} | ||
); | ||
}; | ||
provider.init(providerOptions) | ||
.then(() => { | ||
test.strictEqual(receivedSecret, storedSecret); | ||
test.strictEqual(receivedClientId, clientId); | ||
test.strictEqual(receivedSecret, secret); | ||
test.strictEqual(receivedTenantId, tenantId); | ||
test.done(); | ||
@@ -1038,2 +1038,11 @@ }); | ||
bigIpMock.prototype.ready = function ready() { | ||
return q(); | ||
}; | ||
authnMock.authenticate = function authenticate(host, user, password) { | ||
icontrolMock.password = password; | ||
return q.resolve(icontrolMock); | ||
}; | ||
callback(); | ||
@@ -1040,0 +1049,0 @@ }, |
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
352141
8811