dockerode-compose
Advanced tools
Comparing version 1.2.1 to 1.2.2
@@ -30,3 +30,3 @@ const yaml = require('js-yaml'); | ||
async up() { | ||
async up(options) { | ||
var output = {}; | ||
@@ -39,3 +39,3 @@ try { | ||
output.networks = await networks(this.docker, this.projectName, this.recipe, output); | ||
output.services = await services(this.docker, this.projectName, this.recipe, output); | ||
output.services = await services(this.docker, this.projectName, this.recipe, output, options); | ||
return output; | ||
@@ -42,0 +42,0 @@ } catch (e) { |
@@ -14,9 +14,5 @@ module.exports = async function (docker, projectName, recipe, output) { | ||
} | ||
try { | ||
configs.push(await docker.createConfig(opts)); | ||
} catch (err) { | ||
throw err; | ||
} | ||
configs.push(await docker.createConfig(opts)); | ||
} | ||
return configs; | ||
} |
@@ -8,7 +8,7 @@ module.exports = async function (docker, projectName, recipe, output) { | ||
try { | ||
networks.push({ "name": projectName + '_' + networkName, "network": await docker.createNetwork({ 'Name': projectName + '_' + networkName, 'CheckDuplicate': true }) }); | ||
networks.push({ 'name': projectName + '_' + networkName, 'network': await docker.createNetwork({ 'Name': projectName + '_' + networkName, 'CheckDuplicate': true }) }); | ||
} catch (err) { | ||
if (err.statusCode == 409 && err.json.message.includes("already exists")) { | ||
let returnedNetwork = await docker.listNetworks({ "filters": { "name": [projectName + '_' + networkName] } }); | ||
networks.push({ "name": projectName + '_' + networkName, "network": await docker.getNetwork(returnedNetwork[0].Id) }); | ||
if (err.statusCode == 409 && err.json.message.includes('already exists')) { | ||
let returnedNetwork = await docker.listNetworks({ 'filters': { 'name': [projectName + '_' + networkName] } }); | ||
networks.push({ 'name': projectName + '_' + networkName, 'network': await docker.getNetwork(returnedNetwork[0].Id) }); | ||
} else { | ||
@@ -48,8 +48,4 @@ throw err; | ||
} | ||
try { | ||
networks.push({ "name": projectName + '_' + networkName, "network": await docker.createNetwork(opts) }); | ||
} catch (err) { | ||
//if exists we have to compare with the existing network | ||
throw err; | ||
} | ||
//if exists we have to compare with the existing network | ||
networks.push({ 'name': projectName + '_' + networkName, 'network': await docker.createNetwork(opts) }); | ||
} | ||
@@ -59,7 +55,7 @@ | ||
try { | ||
networks.push({ "name": projectName + '_default', "network": await docker.createNetwork({ 'Name': projectName + '_default', 'CheckDuplicate': true }) }); | ||
networks.push({ 'name': projectName + '_default', 'network': await docker.createNetwork({ 'Name': projectName + '_default', 'CheckDuplicate': true }) }); | ||
} catch (err) { | ||
if (err.statusCode == 409 && err.json.message.includes("already exists")) { | ||
let returnedNetwork = await docker.listNetworks({ "filters": { "name": [projectName + '_default'] } }); | ||
networks.push({ "name": projectName + '_' + networkName, "network": await docker.getNetwork(returnedNetwork[0].Id) }); | ||
if (err.statusCode == 409 && err.json.message.includes('already exists')) { | ||
let returnedNetwork = await docker.listNetworks({ 'filters': { 'name': [projectName + '_default'] } }); | ||
networks.push({ 'name': projectName + '_' + networkName, 'network': await docker.getNetwork(returnedNetwork[0].Id) }); | ||
} else { | ||
@@ -66,0 +62,0 @@ throw err; |
const fs = require('fs'); | ||
module.exports = async function(docker, projectName, recipe, output) { | ||
module.exports = async function (docker, projectName, recipe, output) { | ||
var secrets = []; | ||
@@ -16,9 +16,5 @@ var secretNames = Object.keys(recipe.secrets || []); | ||
} | ||
try { | ||
secrets.push(await docker.createSecret(opts)); | ||
} catch (err) { | ||
throw err; | ||
} | ||
secrets.push(await docker.createSecret(opts)); | ||
} | ||
return secrets; | ||
} |
const tools = require('./tools'); | ||
const servicesTools = require('./servicesTools'); | ||
const fs = require('fs'); | ||
const yaml = require('js-yaml'); | ||
const tar = require('tar-fs'); | ||
const path = require('path') | ||
const path = require('path'); | ||
module.exports = async function (docker, projectName, recipe, output) { | ||
module.exports = async function (docker, projectName, recipe, output, options) { | ||
var services = []; | ||
@@ -19,3 +19,3 @@ var serviceNames = tools.sortServices(recipe); | ||
} else { | ||
throw 'Service key in extends is required!'; | ||
throw new Error('Service key in extends is required!'); | ||
} | ||
@@ -39,3 +39,2 @@ | ||
var buildContextPath = path.resolve(path.join(absolutePath, service.build.context)); | ||
var buildContextPathWDockerfile = ""; | ||
if (fs.existsSync(buildContextPath)) { | ||
@@ -69,7 +68,8 @@ | ||
if (service.build.labels.length > 0) { | ||
var labels = {}; | ||
for (var labelsb of service.build.labels) { | ||
var p = labelsb.split('='); | ||
if (p[1] === undefined) | ||
let labels = {}; | ||
for (let labelsb of service.build.labels) { | ||
let p = labelsb.split('='); | ||
if (p[1] === undefined) { | ||
p[1] = ''; | ||
} | ||
labels[p[0]] = p[1]; | ||
@@ -85,3 +85,3 @@ } | ||
// RE ARRAGE the function "convertSizeStringToByteValue" to a generic one | ||
obj['shmsize'] = convertSizeStringToByteValue([{ 'path': '', 'rate': service.build.shm_size }]).Rate; | ||
obj['shmsize'] = servicesTools.convertSizeStringToByteValue([{ 'path': '', 'rate': service.build.shm_size }]).Rate; | ||
} | ||
@@ -94,11 +94,11 @@ | ||
if (service.build.dockerfile === undefined) { | ||
await buildDockerImage(docker, buildContextPath, obj, null); | ||
await servicesTools.buildDockerImage(docker, buildContextPath, obj, null, options); | ||
} else { | ||
await buildDockerImage(docker, buildContextPath, obj, service.build.dockerfile); | ||
await servicesTools.buildDockerImage(docker, buildContextPath, obj, service.build.dockerfile, options); | ||
} | ||
} else { | ||
throw `build path ${buildContextPath} either does not exist, is not accessible, or is not a valid URL.`; | ||
throw new Error(`build path ${buildContextPath} either does not exist, is not accessible, or is not a valid URL.`); | ||
} | ||
} else { | ||
throw 'Build context is required!'; | ||
throw new Error('Build context is required!'); | ||
} | ||
@@ -108,5 +108,5 @@ } else { | ||
if (fs.existsSync(dockerFilePath)) { | ||
await buildDockerImage(docker, dockerFilePath, obj, null); | ||
await servicesTools.buildDockerImage(docker, dockerFilePath, obj, null, options); | ||
} else { | ||
throw `build path ${dockerFilePath} either does not exist, is not accessible, or is not a valid URL.`; | ||
throw new Error(`build path ${dockerFilePath} either does not exist, is not accessible, or is not a valid URL.`); | ||
} | ||
@@ -119,4 +119,4 @@ } | ||
'Image': service.image, | ||
'HostConfig': buildHostConfig(service, recipe), | ||
'Env': buildEnvVars(service), | ||
'HostConfig': servicesTools.buildHostConfig(service, recipe), | ||
'Env': servicesTools.buildEnvVars(service), | ||
'NetworkingConfig': { | ||
@@ -129,53 +129,3 @@ 'EndpointsConfig': { | ||
if (service.networks !== undefined) { | ||
if (Array.isArray(service.networks)) { | ||
for (let index = 0; index < service.networks.length; index++) { | ||
let networkName = projectName + '_' + service.networks[index]; | ||
let networkTemplate = { | ||
'NetworkingConfig': { | ||
'EndpointsConfig': { | ||
} | ||
} | ||
}; | ||
networkTemplate.NetworkingConfig.EndpointsConfig[networkName] = {}; | ||
networkTemplate.NetworkingConfig.EndpointsConfig[networkName]['Aliases'] = [serviceName]; | ||
if (index === 0) | ||
opts.NetworkingConfig.EndpointsConfig = networkTemplate.NetworkingConfig.EndpointsConfig; | ||
networksToAttach.push(networkTemplate.NetworkingConfig.EndpointsConfig); | ||
} | ||
} else { | ||
let networkNames = Object.keys(service.networks); | ||
for (let index = 0; index < networkNames.length; index++) { | ||
let network = service.networks[networkNames[index]] || {}; | ||
let networkName = projectName + '_' + networkNames[index]; | ||
let networkTemplate = { | ||
'NetworkingConfig': { | ||
'EndpointsConfig': { | ||
} | ||
} | ||
}; | ||
networkTemplate.NetworkingConfig.EndpointsConfig[networkName] = {}; | ||
networkTemplate.NetworkingConfig.EndpointsConfig[networkName]['IPAMConfig'] = {}; | ||
if (network.aliases !== undefined) { | ||
networkTemplate.NetworkingConfig.EndpointsConfig[networkName]['Aliases'] = network.aliases; | ||
} | ||
if (network.ipv4_address !== undefined) { | ||
networkTemplate.NetworkingConfig.EndpointsConfig[networkName].IPAMConfig['IPv4Address'] = network.ipv4_address; | ||
} | ||
if (network.ipv6_address !== undefined) { | ||
networkTemplate.NetworkingConfig.EndpointsConfig[networkName].IPAMConfig['IPv6Address'] = network.ipv6_address; | ||
} | ||
if (network.link_local_ips !== undefined) { | ||
networkTemplate.NetworkingConfig.EndpointsConfig[networkName].IPAMConfig['LinkLocalIPs'] = network.link_local_ips; | ||
} | ||
if (network.priority !== undefined) { | ||
networkTemplate.NetworkingConfig.EndpointsConfig[networkName].priority = network.priority; | ||
} else { | ||
networkTemplate.NetworkingConfig.EndpointsConfig[networkName].priority = 0; | ||
} | ||
if (index === 0) | ||
opts.NetworkingConfig.EndpointsConfig = networkTemplate.NetworkingConfig.EndpointsConfig; | ||
networksToAttach.push(networkTemplate.NetworkingConfig.EndpointsConfig); | ||
} | ||
} | ||
servicesTools.buildNetworks(service.networks, networksToAttach); | ||
} else { | ||
@@ -196,3 +146,3 @@ opts.NetworkingConfig.EndpointsConfig[projectName + '_default'] = { | ||
var svf = recipe.services[vf[0]]; | ||
buildVolumes(svf.volumes, opts); | ||
servicesTools.buildVolumes(svf.volumes, opts); | ||
} | ||
@@ -202,7 +152,6 @@ } | ||
if (service.volumes) { | ||
buildVolumes(service.volumes, opts); | ||
servicesTools.buildVolumes(service.volumes, opts); | ||
} | ||
if (service.name !== undefined) { | ||
opts.Name = service.container_name || serviceName; | ||
if (service.container_name !== undefined) { | ||
opts.name = service.container_name; | ||
} | ||
@@ -254,7 +203,8 @@ if (service.domainname !== undefined) { | ||
if (service.labels.length > 0) { | ||
var labels = {}; | ||
for (var labelsb of service.labels) { | ||
var p = labelsb.split('='); | ||
if (p[1] === undefined) | ||
let labels = {}; | ||
for (let labelsb of service.labels) { | ||
let p = labelsb.split('='); | ||
if (p[1] === undefined) { | ||
p[1] = ''; | ||
} | ||
labels[p[0]] = p[1]; | ||
@@ -288,22 +238,16 @@ } | ||
} | ||
try { | ||
var container = await docker.createContainer(opts); | ||
var container = await docker.createContainer(opts); | ||
if (networksToAttach.length > 1) { | ||
let networkNames = Object.keys(networksToAttach[0]); | ||
let network = findNetwork(output, networkNames[0]); | ||
await network.disconnect({ 'Container': container.id }); | ||
let networksToAttachSorted = tools.sortNetworksToAttach(networksToAttach); | ||
for (var networkToAttach of networksToAttachSorted) { | ||
let networkName = Object.keys(networkToAttach); | ||
let network = findNetwork(output, networkName); | ||
await network.connect({ 'Container': container.id, 'EndpointConfig': networkToAttach[networkName] }); | ||
} | ||
if (networksToAttach.length > 1) { | ||
let networkNames = Object.keys(networksToAttach[0]); | ||
await findNetwork(output, networkNames[0]).disconnect({ 'Container': container.id }); | ||
let networksToAttachSorted = tools.sortNetworksToAttach(networksToAttach); | ||
for (var networkToAttach of networksToAttachSorted) { | ||
let networkName = Object.keys(networkToAttach); | ||
await findNetwork(output, networkName).connect({ 'Container': container.id, 'EndpointConfig': networkToAttach[networkName] }); | ||
} | ||
} | ||
await container.start(); | ||
services.push(container); | ||
} catch (err) { | ||
throw err; | ||
} | ||
await container.start(); | ||
services.push(container); | ||
} | ||
@@ -313,383 +257,3 @@ return services; | ||
//ToDo: complete the compose specification | ||
var buildHostConfig = function (service, recipe) { | ||
var output = { | ||
'RestartPolicy': { 'Name': service.restart } | ||
}; | ||
if (service.volumes_from) { | ||
for (var volume_from of service.volumes_from) { | ||
var vf = volume_from.split(':'); | ||
var svf = recipe.services[vf[0]]; | ||
buildVolumesHostconfig(svf.volumes, output, vf[1]); | ||
} | ||
} | ||
if (service.volumes) { | ||
buildVolumesHostconfig(service.volumes, output); | ||
} | ||
if (service.ports !== undefined) { | ||
if (typeof service.ports[0] === 'object') { | ||
// LONG SYNTAX | ||
// !!! INCOMPLETE - NOT USING DIFFERENT MODES - `mode`: `host` for publishing a host port on each node, or `ingress` for a port to be load balanced. | ||
var ports = {}; | ||
for (var port of service.ports) { | ||
ports[port.target + '/' + port.protocol] = [{ 'HostPort': port.published.toString() }]; | ||
} | ||
output['PortBindings'] = ports; | ||
} else { | ||
// SHORT SYNTAX | ||
// TODO: SIMPLIFY THIS BLOCK OF CODE! MAYBE! | ||
var ports = {}; | ||
for (var port of service.ports) { | ||
var port_split = port.split(':'); | ||
if (port_split.length == 2) { | ||
// "xxxx:xxxx" | ||
if (port_split[1].includes('-')) { | ||
// "9090-9091:8080-8081" | ||
let split_port_split0 = port_split[0].split('-'); | ||
let split_port_split0_array = []; | ||
split_port_split0_array = fillPortArray(parseInt(split_port_split0[0]), parseInt(split_port_split0[1])); | ||
let split_port_split1 = port_split[1].split('-'); | ||
let split_port_split1_array = []; | ||
split_port_split1_array = fillPortArray(parseInt(split_port_split1[0]), parseInt(split_port_split1[1])); | ||
for (let index in split_port_split0_array) { | ||
ports[split_port_split1_array[index] + '/tcp'] = [{ 'HostPort': split_port_split0_array[index].toString() }]; | ||
} | ||
} else if (port_split[0].includes('-')) { | ||
// "3000-3005" | ||
let split_port_split = port_split[0].split('-'); | ||
ports[port_split[1] + '/tcp'] = []; | ||
for (let i = split_port_split[0]; i <= split_port_split[1]; i++) { | ||
ports[port_split[1] + '/tcp'].push({ 'HostPort': i.toString() }); | ||
} | ||
} else if (port_split[1].includes('/')) { | ||
// "6060:6060/udp" | ||
ports[port_split[1]] = [{ 'HostPort': port_split[0] }]; | ||
} else { | ||
// "8000:8000" | ||
ports[port_split[1] + '/tcp'] = [{ 'HostPort': port_split[0] }]; | ||
} | ||
} else if (port_split.length == 3) { | ||
// "x.x.x.x:xxxx:xxxx" | ||
if (port_split[2].includes('-')) { | ||
// "127.0.0.1:5000-5010:5000-5010" | ||
let split_port_split1 = port_split[1].split('-'); | ||
let split_port_split1_array = []; | ||
split_port_split1_array = fillPortArray(parseInt(split_port_split1[0]), parseInt(split_port_split1[1])); | ||
let split_port_split2 = port_split[2].split('-'); | ||
let split_port_split2_array = []; | ||
split_port_split2_array = fillPortArray(parseInt(split_port_split2[0]), parseInt(split_port_split2[1])); | ||
for (let index in split_port_split1_array) { | ||
ports[split_port_split2_array[index] + '/tcp'] = [{ 'HostPort': split_port_split1_array[index].toString(), 'HostIp': port_split[0] }]; | ||
} | ||
} else if (port_split[1] == '') { | ||
// "127.0.0.1::5000 | ||
ports[port_split[2] + '/tcp'] = [{ 'HostPort': port_split[2], 'HostIp': port_split[0] }]; | ||
} else { | ||
// "127.0.0.1:8001:8001" | ||
ports[port_split[2] + '/tcp'] = [{ 'HostPort': port_split[1], 'HostIp': port_split[0] }]; | ||
} | ||
} else { | ||
// "xxxx" | ||
if (port_split[0].includes('-')) { | ||
// "3000-3005" | ||
let split_port_split = port_split[0].split('-'); | ||
for (let i = split_port_split[0]; i <= split_port_split[1]; i++) { | ||
ports[i + '/tcp'] = [{ 'HostPort': i.toString() }]; | ||
} | ||
} else { | ||
// "3000" | ||
ports[port + '/tcp'] = [{ 'HostPort': port }]; | ||
} | ||
} | ||
} | ||
output['PortBindings'] = ports; | ||
} | ||
} | ||
if (service.cpu_count !== undefined) { | ||
output.CpuCount = service.cpu_count; | ||
} | ||
if (service.cpu_percent !== undefined) { | ||
output.CpuPercent = service.cpu_percent; | ||
} | ||
if (service.cpu_shares !== undefined) { | ||
output.CpuShares = service.cpu_shares; | ||
} | ||
if (service.cpu_period !== undefined) { | ||
output.CpuPeriod = service.cpu_period; | ||
} | ||
if (service.cpu_quota !== undefined) { | ||
output.CpuQuota = service.cpu_quota; | ||
} | ||
if (service.cpu_rt_runtime !== undefined) { | ||
output.CpuRealtimeRuntime = service.cpu_rt_runtime; | ||
} | ||
if (service.cpu_rt_period !== undefined) { | ||
output.CpuRealtimePeriod = service.cpu_rt_period; | ||
} | ||
if (service.cpuset !== undefined) { | ||
output.CpusetCpus = service.cpuset; | ||
} | ||
if (service.cap_add !== undefined) { | ||
output.CapAdd = service.cap_add; | ||
} | ||
if (service.cap_drop !== undefined) { | ||
output.CapDrop = service.cap_drop; | ||
} | ||
if (service.cgroup_parent !== undefined) { | ||
output.CgroupParent = service.cgroup_parent; | ||
} | ||
if (service.device_cgroup_rules !== undefined) { | ||
output.DeviceCgroupRules = service.device_cgroup_rules; | ||
} | ||
if (service.dns !== undefined) { | ||
output.Dns = service.dns; | ||
} | ||
if (service.dns_opt !== undefined) { | ||
output.DnsOptions = service.dns_opt; | ||
} | ||
if (service.dns_search !== undefined) { | ||
output.DnsSearch = service.dns_search; | ||
} | ||
if (service.extra_hosts !== undefined) { | ||
output.ExtraHosts = service.extra_hosts; | ||
} | ||
if (service.group_add !== undefined) { | ||
output.GroupAdd = service.group_add; | ||
} | ||
if (service.init !== undefined) { | ||
output.Init = service.init; | ||
} | ||
if (service.ipc !== undefined) { | ||
output.IpcMode = service.ipc; | ||
} | ||
if (service.isolation !== undefined) { | ||
output.Isolation = service.isolation; | ||
} | ||
if (service.mem_swappiness !== undefined) { | ||
output.MemorySwappiness = service.mem_swappiness; | ||
} | ||
if (service.oom_kill_disable !== undefined) { | ||
output.OomKillDisable = service.oom_kill_disable; | ||
} | ||
if (service.oom_score_adj !== undefined) { | ||
output.OomScoreAdj = service.oom_score_adj; | ||
} | ||
if (service.pid !== undefined) { | ||
output.PidMode = service.pid; | ||
} | ||
if (service.pids_limit !== undefined) { | ||
output.PidsLimit = service.pids_limit; | ||
} | ||
if (service.privileged !== undefined) { | ||
output.Privileged = service.privileged; | ||
} | ||
if (service.read_only !== undefined) { | ||
output.ReadonlyRootfs = service.read_only; | ||
} | ||
if (service.runtime !== undefined) { | ||
output.Runtime = service.runtime; | ||
} | ||
if (service.security_opt !== undefined) { | ||
output.SecurityOpt = service.security_opt; | ||
} | ||
if (service.shm_size !== undefined) { | ||
output.ShmSize = service.shm_size; | ||
} | ||
if (service.storage_opt !== undefined) { | ||
output.StorageOpt = service.storage_opt; | ||
} | ||
if (service.sysctls !== undefined) { | ||
if (service.sysctls.length > 0) { | ||
var sysctls = {}; | ||
for (var sysctlsb of service.sysctls) { | ||
var p = sysctlsb.split('='); | ||
sysctls[p[0]] = p[1]; | ||
} | ||
output.Sysctls = sysctls; | ||
} else { | ||
let sysctlKeys = Object.keys(service.sysctls); | ||
let newSysctls = {}; | ||
for (var key of sysctlKeys) { | ||
newSysctls[key] = service.sysctls[key].toString(); | ||
} | ||
output.Sysctls = newSysctls; | ||
} | ||
} | ||
if (service.userns_mode !== undefined) { | ||
output.UsernsMode = service.userns_mode; | ||
} | ||
if (service.tmpfs !== undefined) { | ||
if (Array.isArray(service.tmpfs)) { | ||
var tmpfs = {}; | ||
for (var tmpfsb of service.tmpfs) { | ||
var p = tmpfsb.split(':'); | ||
if (p[1] === undefined) | ||
p[1] = ''; | ||
tmpfs[p[0]] = p[1]; | ||
} | ||
output.Tmpfs = tmpfs; | ||
} else { | ||
var tmpfs = {}; | ||
var p = service.tmpfs.split(':'); | ||
if (p[1] === undefined) | ||
p[1] = ''; | ||
tmpfs[p[0]] = p[1]; | ||
output.Tmpfs = tmpfs; | ||
} | ||
} | ||
if (service.ulimits !== undefined) { | ||
let ulimitsKeys = Object.keys(service.ulimits); | ||
let ulimitsArray = []; | ||
for (var key of ulimitsKeys) { | ||
let ulimitsObject = {}; | ||
if (typeof service.ulimits[key] === 'object') { | ||
ulimitsObject.Name = key; | ||
ulimitsObject.Soft = service.ulimits[key].soft; | ||
ulimitsObject.Hard = service.ulimits[key].hard; | ||
ulimitsArray.push(ulimitsObject); | ||
} else { | ||
ulimitsObject.Name = key; | ||
ulimitsObject.Soft = service.ulimits[key]; | ||
ulimitsObject.Hard = service.ulimits[key]; | ||
ulimitsArray.push(ulimitsObject); | ||
} | ||
} | ||
output.Ulimits = ulimitsArray; | ||
} | ||
if (service.blkio_config !== undefined) { | ||
if (service.blkio_config.weight !== undefined) { | ||
output.BlkioWeight = service.blkio_config.weight; | ||
} | ||
if (service.blkio_config.weight_device !== undefined) { | ||
let weight_device = [{}]; | ||
weight_device[0]['Path'] = service.blkio_config.weight_device[0].path; | ||
weight_device[0]['Weight'] = service.blkio_config.weight_device[0].weight; | ||
output.BlkioWeightDevice = weight_device; | ||
} | ||
if (service.blkio_config.device_read_bps !== undefined) { | ||
output.BlkioDeviceReadBps = convertSizeStringToByteValue(service.blkio_config.device_read_bps); | ||
} | ||
if (service.blkio_config.device_read_iops !== undefined) { | ||
let device_read_iops = [{}]; | ||
device_read_iops[0]['Path'] = service.blkio_config.device_read_iops[0].path; | ||
device_read_iops[0]['Rate'] = service.blkio_config.device_read_iops[0].rate; | ||
output.BlkioDeviceReadIOps = device_read_iops; | ||
} | ||
if (service.blkio_config.device_write_bps !== undefined) { | ||
output.BlkioDeviceWriteBps = convertSizeStringToByteValue(service.blkio_config.device_write_bps); | ||
} | ||
if (service.blkio_config.device_write_iops !== undefined) { | ||
let device_write_iops = [{}]; | ||
device_write_iops[0]['Path'] = service.blkio_config.device_write_iops[0].path; | ||
device_write_iops[0]['Rate'] = service.blkio_config.device_write_iops[0].rate; | ||
output.BlkioDeviceWriteIOps = device_write_iops; | ||
} | ||
} | ||
if (service.logging !== undefined) { | ||
let logging = {}; | ||
logging.Type = service.logging.driver; | ||
logging.Config = service.logging.options; | ||
output.LogConfig = logging; | ||
} | ||
return output; | ||
} | ||
var buildVolumesHostconfig = function (volumes, output, type) { | ||
if (output['Binds'] === undefined) { | ||
output['Binds'] = []; | ||
} | ||
for (var volume of volumes) { | ||
if (typeof volume === 'string' || volume instanceof String) { | ||
var aux = volume; | ||
if (type == 'ro') { | ||
aux += ':ro'; | ||
} | ||
output['Binds'].push(aux); | ||
} else { | ||
var volumestr = ''; | ||
if (volume.source && volume.target) { | ||
volumestr += volume.source + ':' + volume.target + ':'; | ||
} | ||
if (volume.read_only || type == 'ro') { | ||
volumestr += 'ro,'; | ||
} | ||
if (volume.volume && volume.volume.nocopy) { | ||
volumestr += 'nocopy,'; | ||
} | ||
if (volume.bind && volume.bind.propagation) { | ||
volumestr += volume.bind.propagation + ','; | ||
} | ||
volumestr = volumestr.slice(0, -1); | ||
output['Binds'].push(volumestr); | ||
} | ||
} | ||
} | ||
var buildVolumes = function (volumes, opts) { | ||
if (opts['Volumes'] === undefined) { | ||
opts['Volumes'] = {}; | ||
} | ||
for (var volume of volumes) { | ||
if (typeof volume === 'string' || volume instanceof String) { | ||
var v = volume.split(':'); | ||
opts['Volumes'][v[1]] = {}; | ||
} else { | ||
if (volume.target) { | ||
opts['Volumes'][volume.target] = {}; | ||
} | ||
} | ||
} | ||
} | ||
var buildEnvVars = function (service) { | ||
var output = []; | ||
if (service.env_file !== undefined) { | ||
if (Array.isArray(service.env_file)) { | ||
for (let env_file_path of service.env_file) { | ||
buildEnvVarsFromFile(env_file_path, output); | ||
} | ||
} else { | ||
buildEnvVarsFromFile(service.env_file, output); | ||
} | ||
} | ||
if (service.environment !== undefined) { | ||
if (Array.isArray(service.environment)) { | ||
for (let environment_line of service.environment) { | ||
output.push(environment_line); | ||
} | ||
} else { | ||
var envsNames = Object.keys(service.environment); | ||
for (var envName of envsNames) { | ||
output.push(envName + '=' + service.environment[envName]); | ||
} | ||
} | ||
} | ||
return output; | ||
} | ||
var findNetwork = function (output, name) { | ||
@@ -702,55 +266,2 @@ for (var network of output.networks) { | ||
// TODO: OPTIMIZE! | ||
var convertSizeStringToByteValue = function (obj) { | ||
let rate = obj[0].rate.toLowerCase(); | ||
let new_obj = [{}]; | ||
if (rate.includes('k')) { | ||
if (rate.indexOf('k') == rate.length - 1) { | ||
rate = rate.replace('k', ''); | ||
} else if (rate.indexOf('k') == rate.length - 2) { | ||
rate = rate.replace('kb', ''); | ||
} | ||
new_obj[0]['Path'] = obj[0].path; | ||
new_obj[0]['Rate'] = rate * 1024; | ||
return new_obj; | ||
} else if (rate.includes('m')) { | ||
if (rate.indexOf('m') == rate.length - 1) { | ||
rate = rate.replace('m', ''); | ||
} else if (rate.indexOf('m') == rate.length - 2) { | ||
rate = rate.replace('mb', ''); | ||
} | ||
new_obj[0]['Path'] = obj[0].path; | ||
new_obj[0]['Rate'] = rate * 1024 * 1024; | ||
return new_obj; | ||
} else if (rate.includes('g')) { | ||
if (rate.indexOf('g') == rate.length - 1) { | ||
rate = rate.replace('g', ''); | ||
} else if (rate.indexOf('g') == rate.length - 2) { | ||
rate = rate.replace('gb', ''); | ||
} | ||
new_obj[0]['Path'] = obj[0].path; | ||
new_obj[0]['Rate'] = rate * 1024 * 1024 * 1024; | ||
return new_obj; | ||
} | ||
} | ||
var buildEnvVarsFromFile = function (env_file_path, output) { | ||
// Each line in an env file MUST be in `VAR=VAL` format. | ||
try { | ||
let env_file = fs.readFileSync(env_file_path, 'utf8').toString().split('\n'); | ||
for (let env_line of env_file) { | ||
// Lines beginning with `#` MUST be ignored. Blank lines MUST also be ignored. | ||
if (env_line != '' && env_line.indexOf('#') != 0) { | ||
let env_line_split = env_line.split('='); | ||
// `VAL` MAY be omitted, sin such cases the variable value is empty string. `=VAL` MAY be omitted, in such cases the variable is **unset**. | ||
if (env_line_split[0] != '' && env_line_split[1] != '') { | ||
output.push(env_line); | ||
} | ||
} | ||
} | ||
} catch (e) { | ||
throw e; | ||
} | ||
} | ||
var convertFancyDurationToMs = function (value) { | ||
@@ -769,5 +280,2 @@ let interval = parseInt(value); | ||
var fillPortArray = function (start, end) { | ||
return Array(end - start + 1).fill().map((_, idx) => start + idx); | ||
} | ||
@@ -782,9 +290,5 @@ // https://github.com/compose-spec/compose-spec/blob/master/spec.md#extends | ||
// EXTENDS OF ANOTHER RECIPE | ||
try { | ||
var absolutePath = path.dirname(pathScope.file); | ||
var extendsRecipe = yaml.load(fs.readFileSync(path.resolve(path.join(absolutePath, service.extends.file)), 'utf8')); | ||
return buildExtendsService(service, service.extends.service, extendsRecipe, pathScope); | ||
} catch (e) { | ||
throw e; | ||
} | ||
var absolutePath = path.dirname(pathScope.file); | ||
var extendsRecipe = yaml.load(fs.readFileSync(path.resolve(path.join(absolutePath, service.extends.file)), 'utf8')); | ||
return buildExtendsService(service, service.extends.service, extendsRecipe, pathScope); | ||
} | ||
@@ -804,3 +308,3 @@ } | ||
var serviceKeys = Object.keys(service); | ||
for (var key of serviceKeys) { | ||
for (let key of serviceKeys) { | ||
verifyRestrictions(key); | ||
@@ -812,3 +316,3 @@ if (key == 'extends') { | ||
var oldServiceKeys = Object.keys(oldService); | ||
for (var key of oldServiceKeys) { | ||
for (let key of oldServiceKeys) { | ||
if (key != 'extends') { | ||
@@ -828,3 +332,3 @@ mergingService(key, service, oldService) | ||
} | ||
throw 'Extends service not found'; | ||
throw new Error('Extends service not found'); | ||
} | ||
@@ -843,4 +347,5 @@ | ||
]; | ||
if (restrictions.includes(key)) | ||
throw 'This extends service cannot be used as a base'; | ||
if (restrictions.includes(key)) { | ||
throw new Error('This extends service cannot be used as a base'); | ||
} | ||
} | ||
@@ -886,3 +391,3 @@ | ||
var objectMappingsKeys = Object.keys(objectMappings[key]); | ||
for (objectMappingsKey of objectMappingsKeys) { | ||
for (let objectMappingsKey of objectMappingsKeys) { | ||
if (oldService[key][objectMappingsKey] !== undefined) { | ||
@@ -896,5 +401,5 @@ service[key][objectMappingsKey] = oldService[key][objectMappingsKey]; | ||
if (!Array.isArray(service[key])) { | ||
var tempService = []; | ||
var envsNames = Object.keys(service[key]); | ||
for (var envName of envsNames) { | ||
let tempService = []; | ||
let envsNames = Object.keys(service[key]); | ||
for (let envName of envsNames) { | ||
tempService.push(envName + '=' + service[key][envName]); | ||
@@ -906,4 +411,4 @@ } | ||
var tempOldService = []; | ||
var envsNames = Object.keys(oldService[key]); | ||
for (var envName of envsNames) { | ||
let envsNames = Object.keys(oldService[key]); | ||
for (let envName of envsNames) { | ||
tempOldService.push(envName + '=' + oldService[key][envName]); | ||
@@ -930,17 +435,15 @@ } | ||
if (!Array.isArray(service[key])) { | ||
var tempService = []; | ||
if (service[key] !== undefined) | ||
let tempService = []; | ||
if (service[key] !== undefined) { | ||
tempService.push(service[key]); | ||
} | ||
service[key] = tempService; | ||
} | ||
if (!Array.isArray(oldService[key])) { | ||
var tempService = []; | ||
tempService.push(oldService[key]); | ||
oldService[key] = tempService; | ||
oldService[key] = [oldService[key]]; | ||
} | ||
service[key] = service[key].concat(oldService[key]); | ||
} else { | ||
var tempService = service[key]; | ||
service[key] = []; | ||
service[key].push(tempService) | ||
service[key].push(service[key]) | ||
service[key].push(oldService[key]); | ||
@@ -950,3 +453,3 @@ } | ||
var objectSequencesKeys = Object.keys(objectSequences[key]); | ||
for (objectSequencesKey of objectSequencesKeys) { | ||
for (let objectSequencesKey of objectSequencesKeys) { | ||
if (oldService[key][objectSequencesKey] !== undefined) { | ||
@@ -969,26 +472,1 @@ service[key][objectSequencesKey] = oldService[key][objectSequencesKey]; | ||
} | ||
var buildDockerImage = async function (docker, buildPath, obj, dockerfile) { | ||
if (dockerfile !== null) { | ||
obj['dockerfile'] = path.basename(dockerfile); | ||
try { | ||
var stream = await docker.buildImage({ | ||
context: buildPath, | ||
src: [dockerfile] | ||
}, obj); | ||
stream.pipe(process.stdout); | ||
await new Promise(fulfill => stream.once('end', fulfill)); | ||
} catch (e) { | ||
throw e; | ||
} | ||
} else { | ||
var tarStream = tar.pack(buildPath); | ||
try { | ||
var stream = await docker.buildImage(tarStream, obj); | ||
stream.pipe(process.stdout); | ||
await new Promise(fulfill => stream.once('end', fulfill)); | ||
} catch (e) { | ||
throw e; | ||
} | ||
} | ||
} |
@@ -17,9 +17,5 @@ module.exports = async function (docker, projectName, recipe, output) { | ||
} | ||
try { | ||
volumes.push(await docker.createVolume(opts)); | ||
} catch (err) { | ||
throw err; | ||
} | ||
volumes.push(await docker.createVolume(opts)); | ||
} | ||
return volumes; | ||
} |
{ | ||
"name": "dockerode-compose", | ||
"version": "1.2.1", | ||
"version": "1.2.2", | ||
"description": "docker-compose in nodejs using dockerode", | ||
@@ -5,0 +5,0 @@ "main": "./compose.js", |
@@ -44,2 +44,12 @@ const expect = require('chai').expect, | ||
expect(report.services).to.be.ok; | ||
await new Promise(r => setTimeout(r, 2000)); | ||
let listContainers = await docker.listContainers({ 'all': true }); | ||
for (var containerInfo of listContainers) { | ||
if (containerInfo.Names[0].includes("dockerodec_wordpress")) { | ||
let container = docker.getContainer(containerInfo.Id); | ||
if (containerInfo.State == 'running') | ||
await container.stop(); | ||
await container.remove(); | ||
} | ||
} | ||
done(); | ||
@@ -52,6 +62,16 @@ })(); | ||
it("should do compose up complex example with extends and build", function (done) { | ||
this.timeout(60000); | ||
this.timeout(300000); | ||
(async () => { | ||
var report = await compose_complex.up(); | ||
expect(report.services).to.be.ok; | ||
await new Promise(r => setTimeout(r, 5000)); | ||
let listContainers = await docker.listContainers({ 'all': true }); | ||
for (var containerInfo of listContainers) { | ||
if (containerInfo.Names[0].includes("dockerodec_complex")) { | ||
let container = docker.getContainer(containerInfo.Id); | ||
if (containerInfo.State == 'running') | ||
await container.stop(); | ||
await container.remove(); | ||
} | ||
} | ||
done(); | ||
@@ -64,11 +84,39 @@ })(); | ||
it("should do compose up example with build", function (done) { | ||
this.timeout(60000); | ||
this.timeout(300000); | ||
(async () => { | ||
var report = await compose_build.up(); | ||
expect(report.services).to.be.ok; | ||
await new Promise(r => setTimeout(r, 5000)); | ||
let listContainers = await docker.listContainers({ 'all': true }); | ||
for (var containerInfo of listContainers) { | ||
if (containerInfo.Names[0].includes("dockerodec_build")) { | ||
let container = docker.getContainer(containerInfo.Id); | ||
if (containerInfo.State == 'running') | ||
await container.stop(); | ||
await container.remove(); | ||
} | ||
} | ||
done(); | ||
})(); | ||
}); | ||
it("should do compose up example with build(verbose)", function (done) { | ||
this.timeout(300000); | ||
(async () => { | ||
var report = await compose_build.up({ 'verbose': true }); | ||
expect(report.services).to.be.ok; | ||
await new Promise(r => setTimeout(r, 5000)); | ||
let listContainers = await docker.listContainers({ 'all': true }); | ||
for (var containerInfo of listContainers) { | ||
if (containerInfo.Names[0].includes("dockerodec_build")) { | ||
let container = docker.getContainer(containerInfo.Id); | ||
if (containerInfo.State == 'running') | ||
await container.stop(); | ||
await container.remove(); | ||
} | ||
} | ||
done(); | ||
})(); | ||
}); | ||
}); | ||
}); |
@@ -5,5 +5,5 @@ var Dockerode = require('dockerode'); | ||
var docker = new Dockerode(); | ||
var compose = new DockerodeCompose(docker, './test/assets/wordpress_original.yml', 'wordpress'); | ||
var compose_complex = new DockerodeCompose(docker, './test/assets/complex_example/docker-compose.yml', 'complex'); | ||
var compose_build = new DockerodeCompose(docker, './test/assets/test_build/docker-compose.yml', 'build'); | ||
var compose = new DockerodeCompose(docker, './test/assets/wordpress_original.yml', 'dockerodec_wordpress'); | ||
var compose_complex = new DockerodeCompose(docker, './test/assets/complex_example/docker-compose.yml', 'dockerodec_complex'); | ||
var compose_build = new DockerodeCompose(docker, './test/assets/test_build/docker-compose.yml', 'dockerodec_build'); | ||
@@ -10,0 +10,0 @@ module.exports = { |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
70329
34
1298
5