Last active
August 29, 2015 14:16
-
-
Save rgulewich/c7ab74059e5ffcf7aac4 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| /* | |
| * This Source Code Form is subject to the terms of the Mozilla Public | |
| * License, v. 2.0. If a copy of the MPL was not distributed with this | |
| * file, You can obtain one at http://mozilla.org/MPL/2.0/. | |
| */ | |
| /* | |
| * Copyright (c) 2015, Joyent, Inc. | |
| */ | |
| /* | |
| * Shared workflow tasks for dealing with fabrics | |
| */ | |
| var async = require('async'); | |
| function acquireFabricTickets(job, cb) { | |
| if (!job.params.fabricNatNics || job.params.fabricNatNics.length === 0) { | |
| return cb(null, 'No fabric NATs to provision'); | |
| } | |
| var cnapi = new sdcClients.CNAPI({ | |
| url: cnapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| var nics = []; | |
| var netuuids = []; | |
| job.params.fabricNatTickets = []; | |
| // Uniquify, just in case | |
| for (var n in job.params.fabricNatNics) { | |
| if (netuuids.indexOf(job.params.fabricNatNics[n].network_uuid) === -1) { | |
| nics.push(job.params.fabricNatNics[n]); | |
| netuuids.push(job.params.fabricNatNics[n].network_uuid); | |
| } | |
| } | |
| cnapi.listServers({ headnode: true }, function (listErr, cns) { | |
| if (listErr) { | |
| cb(listErr); | |
| return; | |
| } | |
| if (!cns || cns.length === 0) { | |
| cb(new Error('Headnode not found in CNAPI')); | |
| return; | |
| } | |
| if (cns.length > 1) { | |
| job.params.headnodes = cns; | |
| cb(new Error('More than 1 headnode found in CNAPI')); | |
| return; | |
| } | |
| async.mapSeries(nics, function (nic, next) { | |
| var newTicket = { | |
| scope: 'fabric_nat', | |
| id: nic.network_uuid, | |
| expires_at: (new Date( | |
| Date.now() + 600 * 1000).toISOString()) | |
| }; | |
| cnapi.waitlistTicketCreate(cns[0].uuid, newTicket, onCreate); | |
| function onCreate(err, ticket) { | |
| if (err) { | |
| next(err); | |
| return; | |
| } | |
| // look up ticket, ensure it's not expired or invalid | |
| cnapi.waitlistTicketGet(ticket.uuid, | |
| function (geterr, getticket) { | |
| if (geterr) { | |
| next(geterr); | |
| return; | |
| } | |
| job.params.fabricNatTickets.push({ | |
| nic: nic, | |
| ticket: getticket | |
| }); | |
| job.log.info( | |
| { nic: nic, ticket: getticket }, | |
| 'ticket status after create'); | |
| next(); | |
| }); | |
| } | |
| }, function (sErr) { | |
| if (sErr) { | |
| cb(sErr); | |
| } else { | |
| cb(null, 'Fabric nat tickets acquired'); | |
| } | |
| }); | |
| }); | |
| } | |
| function provisionFabricNats(job, cb) { | |
| if (!job.params.fabricNatTickets || | |
| job.params.fabricNatTickets.length === 0) { | |
| return cb(null, 'No fabric NATs to provision'); | |
| } | |
| if (!job.params.sdc_nat_pool) { | |
| return cb(null, 'No fabric NAT pool configured for provisioning'); | |
| } | |
| var cnapi = new sdcClients.CNAPI({ | |
| url: cnapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| var napi = new sdcClients.NAPI({ | |
| url: napiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| var natSvc; | |
| var sapi = new sdcClients.SAPI({ | |
| log: job.log.child({ component: 'sapi' }), | |
| url: sapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| function releaseTicket(tErr, ticket, tCb) { | |
| cnapi.waitlistTicketRelease(ticket.uuid, function (relErr) { | |
| if (relErr) { | |
| job.log.error({ ticket: ticket, err: relErr }, | |
| 'Error releasing ticket'); | |
| } | |
| if (tErr) { | |
| tCb(tErr); | |
| return; | |
| } | |
| tCb(relErr); | |
| return; | |
| }); | |
| } | |
| /* | |
| * Provision a new NAT zone through SAPI on two networks: | |
| * - the configured NAT network pool | |
| * - the fabric network that needs a NAT zone | |
| */ | |
| function provisionNatZone(tick, done) { | |
| var fabricNic = tick.nic; | |
| // If we were waiting on a ticket because another NAT zone was being | |
| // provisioned and it succeeded, we don't need to provision another. | |
| napi.getNetwork(fabricNic.network_uuid, function (netErr, fNet) { | |
| if (netErr) { | |
| return done(netErr); | |
| } | |
| if (fNet.gateway_provisioned) { | |
| job.log.debug({ ticket: tick.ticket.uuid, net: fNet }, | |
| 'Network already has gateway provisioned'); | |
| tick.gateway_provisioned = true; | |
| return releaseTicket(null, tick.ticket, done); | |
| } | |
| var instParams = { | |
| metadata: { | |
| 'com.joyent:ipnat_subnet': fNet.subnet | |
| }, | |
| params: { | |
| alias: 'nat-' + fabricNic.network_uuid, | |
| internal_metadata: { | |
| 'com.joyent:ipnat_owner': job.params.owner_uuid | |
| }, | |
| networks: [ | |
| { | |
| uuid: job.params.sdc_nat_pool, | |
| primary: true, | |
| allow_ip_spoofing: true | |
| }, | |
| { | |
| uuid: fabricNic.network_uuid, | |
| ip: fabricNic.gateway, | |
| allow_ip_spoofing: true | |
| } | |
| ], | |
| ticket: tick.ticket.uuid | |
| } | |
| }; | |
| sapi.createInstanceAsync(natSvc, instParams, | |
| function _afterSapiProv(createErr, inst) { | |
| if (createErr) { | |
| return releaseTicket(createErr, tick.ticket, done); | |
| } | |
| job.log.info({ instance: inst, natSvc: natSvc }, | |
| 'Created NAT instance'); | |
| tick.job_uuid = inst.job_uuid; | |
| tick.vm_uuid = inst.uuid; | |
| return done(); | |
| }); | |
| }); | |
| } | |
| sapi.listServices({ name: 'nat' }, function (sapiErr, svcs) { | |
| if (sapiErr) { | |
| return cb(sapiErr); | |
| } | |
| if (!svcs || svcs.length === 0) { | |
| return cb(new Error('No "nat" service found in SAPI')); | |
| } | |
| if (svcs.length > 1) { | |
| return cb(new Error('More than one "nat" service found in SAPI')); | |
| } | |
| natSvc = svcs[0].uuid; | |
| job.log.info({ svc: natSvc, svcs: svcs }, 'svcs'); | |
| async.forEach(job.params.fabricNatTickets, function (tick, next) { | |
| if (tick.ticket.status === 'active') { | |
| return provisionNatZone(tick, next); | |
| } | |
| cnapi.waitlistTicketWait(tick.ticket.uuid, | |
| function _afterWait(tErr) { | |
| if (tErr) { | |
| next(tErr); | |
| } else { | |
| provisionNatZone(tick, next); | |
| } | |
| }); | |
| }, function (aErr) { | |
| if (aErr) { | |
| cb(aErr); | |
| } else { | |
| cb(null, 'Provisioned fabric NATs'); | |
| } | |
| }); | |
| }); | |
| } | |
| function releaseFabricTicket(job, cb) { | |
| if (!job.params.ticket) { | |
| return cb(null, 'No ticket to release'); | |
| } | |
| var cnapi = new sdcClients.CNAPI({ | |
| url: cnapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| cnapi.waitlistTicketRelease(job.params.ticket, function (err) { | |
| if (err) { | |
| if (err.code === 'ResourceNotFound') { | |
| cb(null, 'Ticket released'); | |
| } else { | |
| cb(err); | |
| } | |
| return; | |
| } | |
| cb(null, 'Released ticket ' + job.params.ticket); | |
| }); | |
| } | |
| function waitForFabricNatProvisions(job, cb) { | |
| if (!job.params.fabricNatTickets || | |
| job.params.fabricNatTickets.length === 0) { | |
| return cb(null, 'No fabric NATs provisioned'); | |
| } | |
| // Filter out tickets that didn't end up needing a gateway provisioned | |
| var toWaitFor = job.params.fabricNatTickets.filter(function (t) { | |
| return !t.gateway_provisioned; | |
| }); | |
| if (toWaitFor.length === 0) { | |
| return cb(null, 'No fabric NAT provisions left to wait for'); | |
| } | |
| var vmapi = new sdcClients.VMAPI({ | |
| url: vmapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| function checkVm(tick, done) { | |
| var uuid = tick.vm_uuid; | |
| vmapi.getVm({ uuid: uuid }, onVmapi); | |
| function onVmapi(err, vm, req, res) { | |
| if (err) { | |
| cb(err); | |
| } else if (vm.state === 'running') { | |
| done(); | |
| } else if (vm.state === 'failed') { | |
| done(new Error( | |
| 'NAT zone "' + vm.uuid + '" failed to provision')); | |
| } else { | |
| setTimeout(checkVm, 1000, tick, done); | |
| } | |
| } | |
| } | |
| async.forEach(toWaitFor, checkVm, function (aErr) { | |
| if (aErr) { | |
| cb(aErr); | |
| } else { | |
| cb(null, 'Fabric NATs running'); | |
| } | |
| }); | |
| } | |
| module.exports = { | |
| acquireFabricTickets: acquireFabricTickets, | |
| provisionFabricNats: provisionFabricNats, | |
| releaseFabricTicket: releaseFabricTicket, | |
| waitForFabricNatProvisions: waitForFabricNatProvisions, | |
| provisionChain: [ | |
| { | |
| name: 'cnapi.acquire_fabric_nat_tickets', | |
| timeout: 10, | |
| retry: 1, | |
| body: acquireFabricTickets, | |
| modules: { sdcClients: 'sdc-clients', async: 'async' } | |
| }, | |
| { | |
| name: 'napi.provision_fabric_nats', | |
| timeout: 120, | |
| retry: 1, | |
| body: provisionFabricNats, | |
| modules: { sdcClients: 'sdc-clients', async: 'async' } | |
| } | |
| ], | |
| provisionWaitTask: { | |
| name: 'cnapi.wait_for_fabric_nat_provisions', | |
| timeout: 600, | |
| retry: 1, | |
| body: waitForFabricNatProvisions, | |
| modules: { sdcClients: 'sdc-clients', async: 'async' } | |
| }, | |
| releaseTicketTask: { | |
| name: 'cnapi.release_fabric_nat_ticket', | |
| timeout: 60, | |
| retry: 1, | |
| body: releaseFabricTicket, | |
| modules: { sdcClients: 'sdc-clients' } | |
| } | |
| }; |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| /* | |
| * This Source Code Form is subject to the terms of the Mozilla Public | |
| * License, v. 2.0. If a copy of the MPL was not distributed with this | |
| * file, You can obtain one at http://mozilla.org/MPL/2.0/. | |
| */ | |
| /* | |
| * Copyright (c) 2015, Joyent, Inc. | |
| */ | |
| /* | |
| * A brief overview of this source file: what is its purpose. | |
| */ | |
| var restify = require('restify'); | |
| var async = require('async'); | |
| var papiUrl; | |
| /* | |
| * Validates that the request can be used to call CNAPI, i.e. that the cnapiUrl | |
| * and vm_uuid parameters are present | |
| */ | |
| function validateForZoneAction(job, cb) { | |
| if (!cnapiUrl) { | |
| cb('No CNAPI URL provided'); | |
| return; | |
| } | |
| if (!job.params['vm_uuid']) { | |
| cb('VM UUID is required'); | |
| return; | |
| } | |
| cb(null, 'All parameters OK!'); | |
| } | |
| /* | |
| * General purpose function to call a CNAPI endpoint. endpoint and requestMethod | |
| * are required. This function will post the job params object as params for the | |
| * CNAPI request. Additionally, this function will set a taskId property in the | |
| * job object so you can optionally poll the status of the task with pollTask | |
| */ | |
| function zoneAction(job, cb) { | |
| if (job.params['skip_zone_action']) { | |
| cb(null, 'Skipping zoneAction'); | |
| return; | |
| } | |
| if (!job.endpoint) { | |
| cb('No CNAPI endpoint provided'); | |
| return; | |
| } | |
| if (!job.requestMethod) { | |
| cb('No HTTP request method provided'); | |
| return; | |
| } | |
| // Not using sdc-clients to allow calling generic POST actions without | |
| // explicitly saying: startVm, stopVm, etc | |
| var cnapi = restify.createJsonClient({ | |
| url: cnapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| // Use payload when available | |
| var payload = job.params.payload || job.params; | |
| function callback(err, req, res, task) { | |
| if (err) { | |
| cb(err); | |
| } else { | |
| job.taskId = task.id; | |
| cb(null, 'Task id: ' + task.id + ' queued to CNAPI!'); | |
| } | |
| } | |
| if (job.requestMethod == 'post') { | |
| cnapi.post(job.endpoint, payload, callback); | |
| } else if (job.requestMethod == 'put') { | |
| cnapi.put(job.endpoint, payload, callback); | |
| } else if (job.requestMethod == 'del') { | |
| cnapi.del(job.endpoint, callback); | |
| } else { | |
| cb('Unsupported requestMethod: "' + job.requestMethod + '"'); | |
| } | |
| } | |
| /* | |
| * Polls the status of a CNAPI task. The two possible final states of a task | |
| * are failure and completed. taskId is required for this task, so this function | |
| * is commonly used in conjunction with zoneAction | |
| */ | |
| function pollTask(job, cb) { | |
| if (job.params['skip_zone_action']) { | |
| cb(null, 'Skipping pollTask'); | |
| return; | |
| } | |
| if (!job.taskId) { | |
| cb('No taskId provided'); | |
| return; | |
| } | |
| if (!cnapiUrl) { | |
| cb('No CNAPI URL provided'); | |
| return; | |
| } | |
| var cnapi = new sdcClients.CNAPI({ | |
| url: cnapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| // Repeat checkTask until task has finished | |
| checkTask(); | |
| function checkTask() { | |
| cnapi.getTask(job.taskId, onCnapi); | |
| function onCnapi(err, task) { | |
| if (err) { | |
| cb(err); | |
| } else if (task.status == 'failure') { | |
| cb(new Error(getErrorMesage(task))); | |
| } else if (task.status == 'complete') { | |
| cb(null, 'Job succeeded!'); | |
| } else { | |
| setTimeout(checkTask, 1000); | |
| } | |
| } | |
| } | |
| function getErrorMesage(task) { | |
| var message; | |
| var details = []; | |
| if (task.history !== undefined && task.history.length) { | |
| for (var i = 0; i < task.history.length; i++) { | |
| var event = task.history[i]; | |
| if (event.name && event.name === 'error' && event.event && | |
| event.event.error) { | |
| var err = event.event.error; | |
| if (typeof (err) === 'string') { | |
| message = err; | |
| if (event.event.details && event.event.details.error) { | |
| message += ', ' + event.event.details.error; | |
| } | |
| } else { | |
| message = err.message; | |
| } | |
| } else if (event.name && event.name === 'finish' && | |
| event.event && event.event.log && event.event.log.length) { | |
| for (var j = 0; j < event.event.log.length; j++) { | |
| var logEvent = event.event.log[j]; | |
| if (logEvent.level && logEvent.level === 'error') { | |
| details.push(logEvent.message); | |
| } | |
| } | |
| } | |
| } | |
| } | |
| // Apparently the task doesn't have any message for us... | |
| if (message === undefined) { | |
| message = 'Unexpected error occured'; | |
| } else if (details.length) { | |
| message += ': ' + details.join(', '); | |
| } | |
| return message; | |
| } | |
| } | |
| /* | |
| * Checks (polls) the state of a machine in VMAPI. It is used for provisions and | |
| * VM actions such as reboot and shutdown. This task will only succeed if the VM | |
| * reaches the expected state, which can be passed as job.expects on a previous | |
| * task. vm_uuid and vmapiUrl are also required | |
| */ | |
| function checkState(job, cb) { | |
| if (job.params['skip_zone_action']) { | |
| cb(null, 'Skipping checkState'); | |
| return; | |
| } | |
| // For now don't fail the job if this parameter is not present | |
| if (!job.expects) { | |
| cb(null, 'No \'expects\' state parameter provided'); | |
| return; | |
| } | |
| if (!job.params['vm_uuid']) { | |
| cb('No VM UUID provided'); | |
| return; | |
| } | |
| if (!vmapiUrl) { | |
| cb('No VMAPI URL provided'); | |
| return; | |
| } | |
| var vmapi = new sdcClients.VMAPI({ | |
| url: vmapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| // Repeat checkVm until VM data is updated | |
| checkVm(); | |
| function checkVm() { | |
| vmapi.getVm({ uuid: job.params['vm_uuid'] }, onVmapi); | |
| function onVmapi(err, vm, req, res) { | |
| if (err) { | |
| cb(err); | |
| } else if (vm.state == job.expects) { | |
| cb(null, 'VM is now ' + job.expects); | |
| } else { | |
| setTimeout(checkVm, 1000); | |
| } | |
| } | |
| } | |
| } | |
| /* | |
| * Checks (polls) that the machine has been updated at all. When any of the VM | |
| * properties are updated (such as ram or metadata), the last_modified timestamp | |
| * of the VM changes. This let us poll the VM endpoint until the changes on its | |
| * properties have been propagated | |
| */ | |
| function checkUpdated(job, cb) { | |
| if (!job.params['vm_uuid']) { | |
| cb('No VM UUID provided'); | |
| return; | |
| } | |
| if (!vmapiUrl) { | |
| cb('No VMAPI URL provided'); | |
| return; | |
| } | |
| if (!job.params['last_modified']) { | |
| cb('No VM last_modified timestamp provided'); | |
| return; | |
| } | |
| var oldDate = new Date(job.params['last_modified']); | |
| var vmapi = new sdcClients.VMAPI({ | |
| url: vmapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| // Repeat checkVm until VM data is updated | |
| checkVm(); | |
| function checkVm() { | |
| vmapi.listVms({ uuid: job.params['vm_uuid'] }, onVmapi); | |
| function onVmapi(err, vms, req, res) { | |
| if (err) { | |
| cb(err); | |
| } else if (vms.length && (vms[0].uuid == job.params['vm_uuid'])) { | |
| var newDate = new Date(vms[0]['last_modified']); | |
| if (newDate > oldDate) { | |
| cb(null, 'VM data has been updated'); | |
| } else { | |
| setTimeout(checkVm, 1000); | |
| } | |
| } | |
| } | |
| } | |
| } | |
| /* | |
| * Posts back the job parameters to the list of URLs that were passed to the | |
| * job. Note that this is a task itself, so the job execution state might not | |
| * be exactly that. We use this task at the end of a job (or as an onError | |
| * callback) to let anybody know if machine provision was successful and get | |
| * access to the parameters of the job. We might want to generalize this task | |
| * as an operation that can be optionally executed as a 'final' task for the job | |
| * but it's separate from the job tasks themselves | |
| */ | |
| function postBack(job, cb) { | |
| if (job.markAsFailedOnError === false) { | |
| return cb(null, 'markAsFailedOnError was set to false, ' + | |
| 'won\'t postBack provision failure to VMAPI'); | |
| } | |
| var urls = job.params['post_back_urls']; | |
| var vmapiPath = vmapiUrl + '/job_results'; | |
| // By default, post back to VMAPI | |
| if (urls === undefined || !Array.isArray(urls)) { | |
| urls = [ vmapiPath ]; | |
| } else { | |
| urls.push(vmapiPath); | |
| } | |
| var obj = clone(job.params); | |
| obj.execution = job.postBackState || 'succeeded'; | |
| async.mapSeries(urls, function (url, next) { | |
| var p = urlModule.parse(url); | |
| var api = restify.createJsonClient({ | |
| url: p.protocol + '//' + p.host, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| api.post(p.pathname, obj, onResponse); | |
| function onResponse(err, req, res) { | |
| return next(err); | |
| } | |
| }, function (err2) { | |
| if (err2) { | |
| var errObject = { err: err2, urls: urls }; | |
| job.log.info(errObject, 'Error posting back to URLs'); | |
| cb(null, 'Could not post back job results. See /info object'); | |
| } else { | |
| cb(null, 'Posted job results back to specified URLs'); | |
| } | |
| }); | |
| // Shallow clone for the job.params object | |
| function clone(theObj) { | |
| if (null === theObj || 'object' != typeof (theObj)) { | |
| return theObj; | |
| } | |
| var copy = theObj.constructor(); | |
| for (var attr in theObj) { | |
| if (theObj.hasOwnProperty(attr)) { | |
| copy[attr] = theObj[attr]; | |
| } | |
| } | |
| return copy; | |
| } | |
| } | |
| /* | |
| * Gets a list of NIC tags after validating every network provided | |
| */ | |
| function validateNetworks(job, cb) { | |
| var networks = job.params.networks; | |
| // add-nics also calls this function, but if macs are provided we don't | |
| // necessarily need to progress further | |
| if (job.params.macs && !networks) { | |
| return cb(); | |
| } | |
| var newNetworks = []; | |
| if (!networks) { | |
| return cb('Networks are required'); | |
| } | |
| var napi = new sdcClients.NAPI({ | |
| url: napiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| job.nicTags = []; | |
| // - Resolve network names to uuids when applicable | |
| // - Repopulate job.params.networks | |
| // - Returns cb(err, uuid). uuid is present when name was resolved to uuid | |
| function getNetwork(netId, callback) { | |
| /*JSSTYLED*/ | |
| var UUID_RE = /^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$/; | |
| var netFn, poolFn; | |
| var params; | |
| // Network can be either by name or uuid | |
| if (UUID_RE.test(netId)) { | |
| params = netId; | |
| netFn = napi.getNetwork; | |
| poolFn = napi.getNetworkPool; | |
| } else { | |
| params = { name: netId }; | |
| netFn = napi.listNetworks; | |
| poolFn = napi.listNetworkPools; | |
| } | |
| netFn.call(napi, params, function (err, nets) { | |
| // If network is not found then it might be a network pool | |
| if (err && err.name !== 'ResourceNotFoundError') { | |
| return callback(err); | |
| } | |
| // Did we get the network from list or get? | |
| var net = (Array.isArray(nets) ? nets[0] : nets); | |
| // No net if NAPI returns an empty array or if we got a 404 | |
| if (net) { | |
| job.nicTags.push(net.nic_tag); | |
| return callback(null, net.uuid); | |
| } | |
| // We might be trying to provision on a network pool, so | |
| // try that instead | |
| poolFn.call(napi, params, function (err2, pools) { | |
| if (err2) { | |
| return callback(err2); | |
| } | |
| // NAPI-121: listNetworkPools should accept a name | |
| if (Array.isArray(pools)) { | |
| pools = pools.filter(function (pool) { | |
| return pool.name === netId; | |
| }); | |
| if (pools.length) { | |
| job.nicTags.push(pools[0].nic_tag); | |
| callback(null, pools[0].uuid); | |
| } else { | |
| callback(new Error('No such Network Pool with ' + | |
| 'name: ' + netId)); | |
| } | |
| } else { | |
| job.nicTags.push(pools.nic_tag); | |
| callback(null, pools.uuid); | |
| } | |
| }); | |
| }); | |
| } | |
| async.mapSeries(networks, function (network, next) { | |
| var netId; | |
| if (network.ipv4_uuid !== undefined) { | |
| netId = network.ipv4_uuid; | |
| } else if (network.name !== undefined) { | |
| netId = network.name; | |
| } | |
| getNetwork(netId, function (err, uuid) { | |
| if (err) { | |
| next(err); | |
| } else { | |
| network.uuid = uuid; | |
| network.ipv4_uuid = uuid; | |
| newNetworks.push(network); | |
| next(); | |
| } | |
| }); | |
| }, function (err2) { | |
| if (err2) { | |
| cb(err2); | |
| } else { | |
| job.params.networks = newNetworks; | |
| job.log.info({ nicTags: job.nicTags }, 'NIC Tags retrieved'); | |
| cb(null, 'Networks are valid'); | |
| } | |
| }); | |
| } | |
| /* | |
| * Provisions a list of NICs for the soon to be provisioned machine. | |
| * | |
| * The networks list can contain a not null ip attribute on each object, which | |
| * denotes that we want to allocate that given IP for the correspondant network. | |
| * This task should be executed after DAPI has allocated a server. | |
| * | |
| * If there's at least one NIC with "belongs_to_uuid" set to this machine, then | |
| * don't provision any new NICs. | |
| */ | |
| function provisionNics(job, cb) { | |
| var networks = job.params.networks; | |
| if (networks === undefined) { | |
| cb('Networks are required'); | |
| return; | |
| } | |
| var napi = new sdcClients.NAPI({ | |
| url: napiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| // Every NIC we provision is added to this array | |
| var nics = []; | |
| var primaryFound = false; | |
| job.params.fabricNatNics = []; | |
| networks.forEach(function (net) { | |
| if (net.primary) | |
| primaryFound = true; | |
| // Make absolutely sure we're never overriding NAPI's network | |
| // owner checks: | |
| delete net.check_owner; | |
| }); | |
| if (!primaryFound && networks.length > 0) | |
| networks[0].primary = true; | |
| // Return a new copy for every time we provision a new NIC and avoid | |
| // accidentally reusing an object | |
| function nicParams() { | |
| return { | |
| owner_uuid: job.params.owner_uuid, | |
| belongs_to_uuid: job.params.uuid || job.params.vm_uuid, | |
| belongs_to_type: 'zone', | |
| cn_uuid: job.params.server_uuid | |
| }; | |
| } | |
| // If this is a nic on a fabric and has no gateway provisioned, add it | |
| function addFabricNatNic(fNic) { | |
| if (fNic && fNic.fabric && fNic.gateway && !fNic.gateway_provisioned && | |
| fNic.ip !== fNic.gateway) { | |
| job.params.fabricNatNics.push(fNic); | |
| } | |
| } | |
| // Get current list of NICs that might have been provisioned ahead of time | |
| napi.listNics(nicParams(), function (err, res) { | |
| if (err) { | |
| cb(err); | |
| return; | |
| } | |
| return asyncProvisionNics(res); | |
| }); | |
| function asyncProvisionNics(currentNics) { | |
| async.mapSeries(networks, function (network, next) { | |
| // If there is at least one provisioned NIC in one of the networks | |
| // provided, skip napi.provisionNic for this network | |
| var netNics = currentNics.filter(function (nic) { | |
| return (nic.network_uuid && nic.network_uuid === | |
| network.ipv4_uuid); | |
| }); | |
| if (netNics.length > 0) { | |
| nics = nics.concat(netNics); | |
| next(); | |
| return; | |
| } | |
| var antiSpoofParams = ['allow_dhcp_spoofing', 'allow_ip_spoofing', | |
| 'allow_mac_spoofing', 'allow_restricted_traffic']; | |
| var params = nicParams(); | |
| if (network.ipv4_ips !== undefined) | |
| params.ip = network.ipv4_ips[0]; | |
| if (network.primary !== undefined) | |
| params.primary = network.primary; | |
| antiSpoofParams.forEach(function (spoofParam) { | |
| if (network.hasOwnProperty(spoofParam)) { | |
| params[spoofParam] = network[spoofParam]; | |
| } | |
| }); | |
| napi.provisionNic(network.ipv4_uuid, params, | |
| function (suberr, nic) { | |
| if (suberr) { | |
| next(suberr); | |
| } else { | |
| nics.push(nic); | |
| addFabricNatNic(nic); | |
| next(); | |
| } | |
| }); | |
| }, function (err2) { | |
| if (err2) { | |
| cb(err2); | |
| } else { | |
| job.params.nics = nics; | |
| job.log.info({ nics: job.params.nics }, 'NICs allocated'); | |
| cb(null, 'NICs allocated'); | |
| } | |
| }); | |
| } | |
| } | |
| /* | |
| * Provisions additional NICs for a zone in NAPI if networks were provided to | |
| * the job. If macs were provided, load those from NAPI instead. | |
| * | |
| * The networks list can contain a not null ip attribute on each object, which | |
| * denotes that we want to allocate that given IP for the correspondent network. | |
| */ | |
| function addNics(job, cb) { | |
| var networks = job.params.networks; | |
| var macs = job.params.macs; | |
| if (networks === undefined && macs === undefined) { | |
| cb('Networks or mac are required'); | |
| return; | |
| } | |
| var napi = new sdcClients.NAPI({ | |
| url: napiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| var nics = []; | |
| function done(err) { | |
| if (err) { | |
| cb(err); | |
| } else { | |
| job.log.info({ nics: nics }, 'NICs allocated'); | |
| job.params['add_nics'] = nics; | |
| cb(null, 'NICs looked up or allocated'); | |
| } | |
| } | |
| if (macs) { | |
| async.mapSeries(macs, function (mac, next) { | |
| napi.getNic(mac, function (err, nic) { | |
| if (err) { | |
| return next(err); | |
| } | |
| nics.push(nic); | |
| next(); | |
| }); | |
| }, done); | |
| } else { | |
| async.mapSeries(networks, function (network, next) { | |
| var params = { | |
| owner_uuid: job.params.owner_uuid, | |
| belongs_to_uuid: job.params.uuid || job.params.vm_uuid, | |
| belongs_to_type: 'zone', | |
| state: 'provisioning' | |
| }; | |
| var antiSpoofParams = ['allow_dhcp_spoofing', 'allow_ip_spoofing', | |
| 'allow_mac_spoofing', 'allow_restricted_traffic']; | |
| if (network.ip !== undefined) { | |
| params.ip = network.ip; | |
| } else if (network.primary !== undefined) { | |
| params.primary = network.primary; | |
| } | |
| antiSpoofParams.forEach(function (spoofParam) { | |
| if (network.hasOwnProperty(spoofParam)) { | |
| params[spoofParam] = network[spoofParam]; | |
| } | |
| }); | |
| napi.provisionNic(network.uuid, params, function (err, nic) { | |
| if (err) { | |
| next(err); | |
| } else { | |
| nics.push(nic); | |
| next(); | |
| } | |
| }); | |
| }, done); | |
| } | |
| } | |
| /* | |
| * Exactly the same as removeNics but used as a fallback task for provision and | |
| * add-nics. Those tasks set either a nics or add-nics object to the params. | |
| * In addition we don't throw an error if the NICs were not added at all | |
| */ | |
| function cleanupNics(job, cb) { | |
| // If this is false it means that cnapi.pollTask succeeded, so the VM exists | |
| // physically wether its provision failed or not | |
| if (job.markAsFailedOnError === false) { | |
| return cb(null, 'markAsFailedOnError was set to false, ' + | |
| 'won\'t cleanup VM NICs'); | |
| } | |
| var nics = job.params['add_nics'] || job.params['nics']; | |
| if (nics === undefined) { | |
| return cb(null, 'No NICs were provisioned'); | |
| } | |
| var napi = new sdcClients.NAPI({ | |
| url: napiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| async.mapSeries(nics, function (nic, next) { | |
| napi.deleteNic(nic.mac, function (err) { | |
| if (err) { | |
| next(err); | |
| } else { | |
| next(); | |
| } | |
| }); | |
| }, function (err2) { | |
| if (err2) { | |
| cb(err2); | |
| } else { | |
| cb(null, 'NICs removed'); | |
| } | |
| }); | |
| } | |
| /* | |
| * Lists the nics that already exist for a VM, and uses that list to | |
| * update the routes. | |
| */ | |
| function updateNetworkParams(job, cb) { | |
| var toAdd = job.params.add_nics; | |
| if (toAdd === undefined) { | |
| cb('add_nics are required'); | |
| return; | |
| } | |
| // From the list of oldResolvers append the new ones | |
| var i, j, nic, resolver; | |
| var resolvers = job.params.oldResolvers || []; | |
| job.log.info(job.params.oldResolvers, 'oldResolvers'); | |
| for (i = 0; i < toAdd.length; i++) { | |
| nic = toAdd[i]; | |
| if (nic['resolvers'] !== undefined && | |
| Array.isArray(nic['resolvers'])) { | |
| for (j = 0; j < nic['resolvers'].length; j++) { | |
| resolver = nic['resolvers'][j]; | |
| if (resolvers.indexOf(resolver) === -1) { | |
| resolvers.push(resolver); | |
| } | |
| } | |
| } | |
| } | |
| if (job.params.wantResolvers && resolvers.length !== 0) { | |
| job.params.resolvers = resolvers; | |
| } | |
| var napi = new sdcClients.NAPI({ | |
| url: napiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| var params = { | |
| belongs_to_uuid: job.params.uuid || job.params.vm_uuid, | |
| belongs_to_type: 'zone' | |
| }; | |
| napi.listNics(params, function (err, res) { | |
| if (err) { | |
| cb(err); | |
| return; | |
| } | |
| var routes = {}; | |
| var allNics = res.concat(toAdd); | |
| for (i = 0; i < allNics.length; i++) { | |
| nic = allNics[i]; | |
| if (nic['routes'] !== undefined && | |
| typeof (nic['routes']) === 'object') { | |
| for (var r in nic['routes']) { | |
| if (!routes.hasOwnProperty(r)) { | |
| routes[r] = nic['routes'][r]; | |
| } | |
| } | |
| } | |
| } | |
| if (Object.keys(routes).length !== 0) { | |
| job.params.set_routes = routes; | |
| } | |
| return cb(null, 'Added network parameters to payload'); | |
| }); | |
| } | |
| /* | |
| * Updates FWAPI with the current VM's parameters | |
| */ | |
| function updateFwapi(job, cb) { | |
| var fwapi = new sdcClients.FWAPI({ | |
| url: fwapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| var jobParams = job.params.payload || job.params; | |
| var type; | |
| var update = {}; | |
| var vmProps = ['add_nics', 'firewall_enabled', 'nics', 'remove_ips', | |
| 'remove_nics', 'remove_tags', 'set_tags', 'tags']; | |
| if (job.params.task === 'provision') { | |
| type = 'vm.add'; | |
| } else { | |
| type = (job.params.task === 'destroy') ? 'vm.delete' : 'vm.update'; | |
| } | |
| vmProps.forEach(function (prop) { | |
| if (jobParams.hasOwnProperty(prop)) { | |
| update[prop] = jobParams[prop]; | |
| } | |
| }); | |
| job.log.info({ jobParams: jobParams, update: update }, 'update params'); | |
| if (Object.keys(update).length === 0 && job.params.task !== 'destroy') { | |
| return cb(null, 'No properties affecting FWAPI found: not updating'); | |
| } | |
| update.owner_uuid = jobParams.owner_uuid; | |
| update.server_uuid = jobParams.server_uuid; | |
| update.type = type; | |
| update.uuid = jobParams.uuid || jobParams.vm_uuid || job.params.vm_uuid; | |
| return fwapi.createUpdate(update, function (err, obj) { | |
| if (err) { | |
| job.log.warn(err, 'Error sending update to FWAPI'); | |
| return cb(null, 'Error updating FWAPI'); | |
| } | |
| return cb(null, 'Updated FWAPI with update UUID: ' + obj.update_uuid); | |
| }); | |
| } | |
| /* | |
| * Lists the nics that already exist for a VM, and uses that list to | |
| * delete the routes for that network. | |
| */ | |
| function removeNetworkParams(job, cb) { | |
| var macs = job.params['remove_nics']; | |
| if (macs === undefined) { | |
| cb('MAC addresses are required'); | |
| return; | |
| } | |
| var oldMacs = job.params.oldMacs; | |
| if (oldMacs === undefined) { | |
| cb('Complete list of VM MAC addresses is required'); | |
| return; | |
| } | |
| var napi = new sdcClients.NAPI({ | |
| url: napiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| var params = { | |
| belongs_to_uuid: job.params.uuid || job.params.vm_uuid, | |
| belongs_to_type: 'zone' | |
| }; | |
| napi.listNics(params, function (err, res) { | |
| if (err) { | |
| cb(err); | |
| return; | |
| } | |
| var i, j, nic; | |
| var del = []; | |
| var keepNetworks = []; | |
| var routes = []; | |
| var resolversHash = {}; | |
| for (i = 0; i < res.length; i++) { | |
| nic = res[i]; | |
| if (macs.indexOf(nic.mac) !== -1) { | |
| del.push(nic); | |
| } else { | |
| keepNetworks.push(nic.network_uuid); | |
| if (nic.resolvers !== undefined && | |
| Array.isArray(nic.resolvers)) { | |
| resolversHash[nic.mac] = nic.resolvers; | |
| } | |
| } | |
| } | |
| job.log.info(res, 'res'); | |
| job.log.info(del, 'del'); | |
| job.log.info(keepNetworks, 'keepNets'); | |
| for (i = 0; i < del.length; i++) { | |
| nic = del[i]; | |
| // Only delete the routes if there are no other nics on the | |
| // same network (which therefore have the same routes) | |
| if (nic['routes'] !== undefined && | |
| typeof (nic['routes']) === 'object' && | |
| keepNetworks.indexOf(nic.network_uuid) === -1) { | |
| for (var r in nic['routes']) { | |
| if (routes.indexOf(r) === -1) { | |
| routes.push(r); | |
| } | |
| } | |
| } | |
| } | |
| if (routes.length !== 0) { | |
| job.params.remove_routes = routes; | |
| } | |
| // We iterate over oldMacs since it has the correct order for the NICs | |
| // If the MAC is not in the resolversHash then we don't add its resolver | |
| var mac, resolver; | |
| var resolvers = []; | |
| for (i = 0; i < oldMacs.length; i++) { | |
| mac = oldMacs[i]; | |
| if (resolversHash[mac] !== undefined && | |
| Array.isArray(resolversHash[mac])) { | |
| for (j = 0; j < resolversHash[mac].length; j++) { | |
| resolver = resolversHash[mac][j]; | |
| if (resolvers.indexOf(resolver) === -1) { | |
| resolvers.push(resolver); | |
| } | |
| } | |
| } | |
| } | |
| if (job.params.wantResolvers && resolvers.length !== 0) { | |
| job.params.resolvers = resolvers; | |
| } else { | |
| job.params.resolvers = []; | |
| } | |
| job.params.remove_ips = del.map(function (n) { return n.ip; }); | |
| return cb(null, 'Added network parameters to payload'); | |
| }); | |
| } | |
| /* | |
| * Calls VMAPI with ?sync=true so we force a cache refresh of the VM. Only being | |
| * used by the snapshot workflows until the last_modified timestamp change is | |
| * checked in | |
| */ | |
| function refreshVm(job, cb) { | |
| if (!job.params['vm_uuid']) { | |
| cb('No VM UUID provided'); | |
| return; | |
| } | |
| if (!vmapiUrl) { | |
| cb('No VMAPI URL provided'); | |
| return; | |
| } | |
| var vmapi = restify.createJsonClient({ | |
| url: vmapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| var path = '/vms/' + job.params['vm_uuid'] + '?sync=true'; | |
| vmapi.get(path, onVmapi); | |
| function onVmapi(err, req, res, vm) { | |
| if (err) { | |
| cb(err); | |
| } else { | |
| cb(null, 'VM data refreshed, new VM state is ' + vm.state); | |
| } | |
| } | |
| } | |
| /* | |
| * Used by start/stop/reboot actions to ensure the VM is in a required state | |
| * before calling the correspondent action | |
| */ | |
| function ensureVmState(job, cb) { | |
| var vmapi = new sdcClients.VMAPI({ url: vmapiUrl }); | |
| var desiredStates = { | |
| 'start': 'stopped', | |
| 'stop': 'running', | |
| 'reboot': 'running' | |
| }; | |
| vmapi.getVm({ uuid: job.params['vm_uuid'] }, function (err, vm, req, res) { | |
| if (err) { | |
| cb(err); | |
| } else if (vm.state !== desiredStates[job.params.task]) { | |
| cb(new Error('Cannot ' + job.params.task + ' a VM from a \'' + | |
| vm.state + '\' state')); | |
| } else { | |
| cb(null, 'VM is ' + vm.state); | |
| } | |
| return; | |
| }); | |
| } | |
| function acquireVMTicket(job, cb) { | |
| var cnapi = new sdcClients.CNAPI({ | |
| url: cnapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| var server_uuid = job.params.server_uuid; | |
| var newTicket = { | |
| scope: 'vm', | |
| id: job.params.vm_uuid, | |
| expires_at: (new Date( | |
| Date.now() + 600 * 1000).toISOString()), | |
| action: job.action | |
| }; | |
| if (job.action === 'provision') { | |
| newTicket.extra = { | |
| workflow_job_uuid: job.uuid, | |
| owner_uuid: job.params.owner_uuid, | |
| max_physical_memory: job.params.max_physical_memory, | |
| cpu_cap: job.params.cpu_cap, | |
| quota: job.params.quota, | |
| brand: job.params.brand | |
| }; | |
| if (job.params.brand === 'kvm' && job.params.image) { | |
| newTicket.extra.image_size = job.params.image.image_size; | |
| } | |
| } | |
| cnapi.waitlistTicketCreate(server_uuid, newTicket, onCreate); | |
| function onCreate(err, ticket) { | |
| if (err) { | |
| cb(err); | |
| return; | |
| } | |
| // look up ticket, ensure it's not expired etc | |
| cnapi.waitlistTicketGet(ticket.uuid, | |
| function (geterr, getticket) { | |
| if (geterr) { | |
| cb(geterr); | |
| return; | |
| } | |
| job.ticket = getticket; | |
| job.log.info( | |
| { ticket: getticket }, 'ticket status after wait'); | |
| cb(); | |
| }); | |
| } | |
| } | |
| function waitOnVMTicket(job, cb) { | |
| var cnapi = new sdcClients.CNAPI({ | |
| url: cnapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| var ticket = job.ticket; | |
| if (ticket.status === 'active') { | |
| cb(); | |
| return; | |
| } | |
| cnapi.waitlistTicketWait(job.ticket.uuid, cb); | |
| } | |
| function releaseVMTicket(job, cb) { | |
| if (!job.ticket) { | |
| return cb(); | |
| } | |
| var cnapi = new sdcClients.CNAPI({ | |
| url: cnapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| cnapi.waitlistTicketRelease(job.ticket.uuid, function (err) { | |
| if (err) { | |
| job.log.warn({err: err, ticket: job.ticket}, | |
| 'error releasing CNAPI waitlist VM ticket'); | |
| } | |
| cb(err); | |
| }); | |
| } | |
| function acquireAllocationTicket(job, cb) { | |
| var cnapi = new sdcClients.CNAPI({ | |
| url: cnapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| // Find the headnode | |
| cnapi.listServers({ headnode: true }, function (err, headnodes) { | |
| if (err) { | |
| cb(err); | |
| return; | |
| } | |
| if (!headnodes || !headnodes.length) { | |
| cb(new Error('no headnodes returned')); | |
| return; | |
| } | |
| var newTicket = { | |
| scope: 'vm-allocate', | |
| id: 'global', | |
| expires_at: (new Date( | |
| Date.now() + 60 * 1000).toISOString()), | |
| action: 'allocate', | |
| workflow_job_uuid: job.uuid | |
| }; | |
| cnapi.waitlistTicketCreate(headnodes[0].uuid, newTicket, onCreate); | |
| }); | |
| function onCreate(err, ticket) { | |
| if (err) { | |
| cb(err); | |
| return; | |
| } | |
| cnapi.waitlistTicketGet( | |
| ticket.uuid, function (geterr, getticket) | |
| { | |
| if (geterr) { | |
| cb(geterr); | |
| return; | |
| } | |
| job.allocationTicket = getticket; | |
| cb(); | |
| }); | |
| } | |
| } | |
| function waitOnAllocationTicket(job, cb) { | |
| var cnapi = new sdcClients.CNAPI({ | |
| url: cnapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| var allocationTicket = job.allocationTicket; | |
| if (allocationTicket.status === 'active') { | |
| return cb(); | |
| } | |
| cnapi.waitlistTicketWait(allocationTicket.uuid, cb); | |
| } | |
| function releaseAllocationTicket(job, cb) { | |
| if (!job.allocationTicket) { | |
| return cb(); | |
| } | |
| var cnapi = new sdcClients.CNAPI({ | |
| url: cnapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| cnapi.waitlistTicketRelease(job.allocationTicket.uuid, function (err) { | |
| if (err) { | |
| job.log.warn({err: err, ticket: job.ticket}, | |
| 'error releasing CNAPI waitlist allocation ticket'); | |
| return; | |
| } | |
| cb(); | |
| }); | |
| } | |
| module.exports = { | |
| validateForZoneAction: validateForZoneAction, | |
| zoneAction: zoneAction, | |
| pollTask: pollTask, | |
| checkState: checkState, | |
| checkUpdated: checkUpdated, | |
| postBack: postBack, | |
| provisionNics: provisionNics, | |
| addNics: addNics, | |
| cleanupNics: cleanupNics, | |
| validateNetworks: validateNetworks, | |
| updateNetworkParams: updateNetworkParams, | |
| updateFwapi: updateFwapi, | |
| removeNetworkParams: removeNetworkParams, | |
| refreshVm: refreshVm, | |
| ensureVmState: ensureVmState, | |
| acquireVMTicket: acquireVMTicket, | |
| waitOnVMTicket: waitOnVMTicket, | |
| releaseVMTicket: releaseVMTicket, | |
| acquireAllocationTicket: acquireAllocationTicket, | |
| waitOnAllocationTicket: waitOnAllocationTicket, | |
| releaseAllocationTicket: releaseAllocationTicket | |
| }; |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # | |
| # Configuration File for JavaScript Lint | |
| # | |
| # This configuration file can be used to lint a collection of scripts, or to enable | |
| # or disable warnings for scripts that are linted via the command line. | |
| # | |
| ### Warnings | |
| # Enable or disable warnings based on requirements. | |
| # Use "+WarningName" to display or "-WarningName" to suppress. | |
| # | |
| +ambiguous_else_stmt # the else statement could be matched with one of multiple if statements (use curly braces to indicate intent | |
| +ambiguous_nested_stmt # block statements containing block statements should use curly braces to resolve ambiguity | |
| +ambiguous_newline # unexpected end of line; it is ambiguous whether these lines are part of the same statement | |
| -anon_no_return_value # anonymous function does not always return value | |
| +assign_to_function_call # assignment to a function call | |
| -block_without_braces # block statement without curly braces | |
| +comma_separated_stmts # multiple statements separated by commas (use semicolons?) | |
| -comparison_type_conv # comparisons against null, 0, true, false, or an empty string allowing implicit type conversion (use === or !==) | |
| +default_not_at_end # the default case is not at the end of the switch statement | |
| +dup_option_explicit # duplicate "option explicit" control comment | |
| +duplicate_case_in_switch # duplicate case in switch statement | |
| +duplicate_formal # duplicate formal argument {name} | |
| +empty_statement # empty statement or extra semicolon | |
| +identifier_hides_another # identifer {name} hides an identifier in a parent scope | |
| -inc_dec_within_stmt # increment (++) and decrement (--) operators used as part of greater statement | |
| +incorrect_version # Expected /*jsl:content-type*/ control comment. The script was parsed with the wrong version. | |
| +invalid_fallthru # unexpected "fallthru" control comment | |
| +invalid_pass # unexpected "pass" control comment | |
| +jsl_cc_not_understood # couldn't understand control comment using /*jsl:keyword*/ syntax | |
| +leading_decimal_point # leading decimal point may indicate a number or an object member | |
| +legacy_cc_not_understood # couldn't understand control comment using /*@keyword@*/ syntax | |
| +meaningless_block # meaningless block; curly braces have no impact | |
| +mismatch_ctrl_comments # mismatched control comment; "ignore" and "end" control comments must have a one-to-one correspondence | |
| +misplaced_regex # regular expressions should be preceded by a left parenthesis, assignment, colon, or comma | |
| +missing_break # missing break statement | |
| +missing_break_for_last_case # missing break statement for last case in switch | |
| +missing_default_case # missing default case in switch statement | |
| +missing_option_explicit # the "option explicit" control comment is missing | |
| +missing_semicolon # missing semicolon | |
| +missing_semicolon_for_lambda # missing semicolon for lambda assignment | |
| +multiple_plus_minus # unknown order of operations for successive plus (e.g. x+++y) or minus (e.g. x---y) signs | |
| +nested_comment # nested comment | |
| -no_return_value # function {name} does not always return a value | |
| +octal_number # leading zeros make an octal number | |
| -parseint_missing_radix # parseInt missing radix parameter | |
| +partial_option_explicit # the "option explicit" control comment, if used, must be in the first script tag | |
| +redeclared_var # redeclaration of {name} | |
| +trailing_comma_in_array # extra comma is not recommended in array initializers | |
| +trailing_decimal_point # trailing decimal point may indicate a number or an object member | |
| +undeclared_identifier # undeclared identifier: {name} | |
| +unreachable_code # unreachable code | |
| -unreferenced_argument # argument declared but never referenced: {name} | |
| -unreferenced_function # function is declared but never referenced: {name} | |
| +unreferenced_variable # variable is declared but never referenced: {name} | |
| +unsupported_version # JavaScript {version} is not supported | |
| +use_of_label # use of label | |
| +useless_assign # useless assignment | |
| +useless_comparison # useless comparison; comparing identical expressions | |
| -useless_quotes # the quotation marks are unnecessary | |
| +useless_void # use of the void type may be unnecessary (void is always undefined) | |
| +var_hides_arg # variable {name} hides argument | |
| +want_assign_or_call # expected an assignment or function call | |
| +with_statement # with statement hides undeclared variables; use temporary variable instead | |
| ### Output format | |
| # Customize the format of the error message. | |
| # __FILE__ indicates current file path | |
| # __FILENAME__ indicates current file name | |
| # __LINE__ indicates current line | |
| # __COL__ indicates current column | |
| # __ERROR__ indicates error message (__ERROR_PREFIX__: __ERROR_MSG__) | |
| # __ERROR_NAME__ indicates error name (used in configuration file) | |
| # __ERROR_PREFIX__ indicates error prefix | |
| # __ERROR_MSG__ indicates error message | |
| # | |
| # For machine-friendly output, the output format can be prefixed with | |
| # "encode:". If specified, all items will be encoded with C-slashes. | |
| # | |
| # Visual Studio syntax (default): | |
| +output-format __FILE__(__LINE__): __ERROR__ | |
| # Alternative syntax: | |
| #+output-format __FILE__:__LINE__: __ERROR__ | |
| ### Context | |
| # Show the in-line position of the error. | |
| # Use "+context" to display or "-context" to suppress. | |
| # | |
| +context | |
| ### Control Comments | |
| # Both JavaScript Lint and the JScript interpreter confuse each other with the syntax for | |
| # the /*@keyword@*/ control comments and JScript conditional comments. (The latter is | |
| # enabled in JScript with @cc_on@). The /*jsl:keyword*/ syntax is preferred for this reason, | |
| # although legacy control comments are enabled by default for backward compatibility. | |
| # | |
| -legacy_control_comments | |
| ### Defining identifiers | |
| # By default, "option explicit" is enabled on a per-file basis. | |
| # To enable this for all files, use "+always_use_option_explicit" | |
| -always_use_option_explicit | |
| # Define certain identifiers of which the lint is not aware. | |
| # (Use this in conjunction with the "undeclared identifier" warning.) | |
| # | |
| # Common uses for webpages might be: | |
| +define __dirname | |
| +define clearInterval | |
| +define clearTimeout | |
| +define console | |
| +define exports | |
| +define global | |
| +define process | |
| +define require | |
| +define setInterval | |
| +define setTimeout | |
| +define Buffer | |
| +define JSON | |
| +define Math | |
| +define module | |
| +define sdcClients | |
| +define dapiUrl | |
| +define cnapiUrl | |
| +define fwapiUrl | |
| +define napiUrl | |
| +define napiUsername | |
| +define napiPassword | |
| +define ufdsUrl | |
| +define ufdsDn | |
| +define ufdsPassword | |
| +define imgapiUrl | |
| +define sapiUrl | |
| +define vmapiUrl | |
| +define urlModule | |
| ### JavaScript Version | |
| # To change the default JavaScript version: | |
| #+default-type text/javascript;version=1.5 | |
| #+default-type text/javascript;e4x=1 | |
| ### Files | |
| # Specify which files to lint | |
| # Use "+recurse" to enable recursion (disabled by default). | |
| # To add a set of files, use "+process FileName", "+process Folder\Path\*.js", | |
| # or "+process Folder\Path\*.htm". | |
| # | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| /* | |
| * This Source Code Form is subject to the terms of the Mozilla Public | |
| * License, v. 2.0. If a copy of the MPL was not distributed with this | |
| * file, You can obtain one at http://mozilla.org/MPL/2.0/. | |
| */ | |
| /* | |
| * Copyright (c) 2015, Joyent, Inc. | |
| */ | |
| /* | |
| * A brief overview of this source file: what is its purpose. | |
| */ | |
| var async = require('async'); | |
| var fabricCommon = require('./fabric-common'); | |
| var restify = require('restify'); | |
| var common = require('./job-common'); | |
| var childProcess = require('child_process'); | |
| var wfapiUrl; | |
| var VERSION = '7.2.1'; | |
| /* | |
| * Validates that the needed provision parameters are present | |
| */ | |
| function validateParams(job, cb) { | |
| if (napiUrl === undefined) { | |
| return cb('No NAPI parameters provided'); | |
| } | |
| if (ufdsUrl === undefined || ufdsDn === undefined || | |
| ufdsPassword === undefined) { | |
| return cb('No UFDS parameters provided'); | |
| } | |
| if (cnapiUrl === undefined) { | |
| return cb('No CNAPI URL provided'); | |
| } | |
| if (fwapiUrl === undefined) { | |
| return cb('No FWAPI URL provided'); | |
| } | |
| if (imgapiUrl === undefined) { | |
| return cb('No IMGAPI URL provided'); | |
| } | |
| if (sapiUrl === undefined) { | |
| return cb('No SAPI URL provided'); | |
| } | |
| if (job.params['owner_uuid'] === undefined) { | |
| return cb('\'owner_uuid\' is required'); | |
| } | |
| if (job.params.brand === undefined) { | |
| return cb('VM \'brand\' is required'); | |
| } | |
| return cb(null, 'All parameters OK!'); | |
| } | |
| /* | |
| * Generates passwords when the image requires it | |
| */ | |
| function generatePasswords(job, cb) { | |
| var log = job.log; | |
| var execFile = childProcess.execFile; | |
| var PWD_LENGTH = 12; | |
| var APG_COMMAND = '/opt/local/bin/apg'; | |
| var APG_ARGS = [ | |
| '-m', PWD_LENGTH, | |
| '-M', 'SCNL', | |
| '-n', 1, | |
| '-E', '"\'@$%&*/.:[]\\' | |
| ]; | |
| if (job.params.image['generate_passwords'] === false) { | |
| return cb(null, 'No need to generate passwords for image'); | |
| } | |
| if (job.params.image.users === undefined || | |
| !Array.isArray(job.params.image.users)) { | |
| return cb(null, 'Image has generate_passwords=true but no users found'); | |
| } | |
| if (job.params['internal_metadata'] === undefined) { | |
| job.params['internal_metadata'] = {}; | |
| } | |
| var users = job.params.image.users; | |
| var name; | |
| var password; | |
| async.mapSeries(users, function (user, next) { | |
| name = user.name + '_pw'; | |
| if (job.params['internal_metadata'][name] === undefined) { | |
| execFile(APG_COMMAND, APG_ARGS, function (err, stdout, stderr) { | |
| if (err) { | |
| log.info({ err: err }, 'Error generating random password'); | |
| return next(err); | |
| } | |
| password = stdout.toString().replace(/\n|\r/g, ''); | |
| job.params['internal_metadata'][name] = password; | |
| return next(); | |
| }); | |
| } else { | |
| return next(); | |
| } | |
| }, function (err) { | |
| if (err) { | |
| cb(err, 'Could not generate passwords'); | |
| } else { | |
| cb(null, 'Passwords generated for Image'); | |
| } | |
| }); | |
| } | |
| /* | |
| * If a server_uuid was already provided (thus skipping DAPI's checks above), | |
| * we'd still like to at least ensure that the manually-selected server has the | |
| * sufficient matching nic-tags. | |
| * | |
| * This function only applies if params['server_uuid'] was provided by the | |
| * entity (e.g. person) invoking the provision. | |
| */ | |
| function checkManualServerNics(job, cb) { | |
| var serverUuid = job.params['server_uuid']; | |
| if (!serverUuid) { | |
| return cb(); | |
| } | |
| var headers = { 'x-request-id': job.params['x-request-id'] }; | |
| var cnapi = new sdcClients.CNAPI({ url: cnapiUrl, headers: headers }); | |
| return cnapi.getServer(serverUuid, function (err, server) { | |
| if (err) { | |
| return cb(err); | |
| } | |
| var nicTags = job.nicTags; | |
| var interfaces = server.sysinfo['Network Interfaces']; | |
| var found = 0; | |
| var vnics = server.sysinfo['Virtual Network Interfaces'] || {}; | |
| Object.keys(interfaces).forEach(function (iname) { | |
| var serverTags = interfaces[iname]['NIC Names']; | |
| for (var i = 0; i < nicTags.length; i++) { | |
| if (serverTags.indexOf(nicTags[i]) !== -1) { | |
| found++; | |
| } | |
| } | |
| }); | |
| Object.keys(vnics).forEach(function (iname) { | |
| var serverTags = vnics[iname]['Overlay Nic Tags'] || []; | |
| for (var i = 0; i < nicTags.length; i++) { | |
| if (serverTags.indexOf(nicTags[i]) !== -1) { | |
| found++; | |
| } | |
| } | |
| }); | |
| if (found == nicTags.length) { | |
| return cb(null, 'Manual server meets NIC Tag requirements'); | |
| } else { | |
| return cb('Manual server does not meet NIC Tag requirements'); | |
| } | |
| }); | |
| } | |
| /* | |
| * Selects a server for the VM. This function will send VM, image, package and | |
| * nic-tag requirements to DAPI, and let it figure out which server best fits | |
| * the requirements. | |
| * | |
| * Note that if you pass params['server_uuid'], this function will terminate | |
| * early, because you have already specified the server you want to provision. | |
| */ | |
| function getAllocation(job, cb) { | |
| var nicTags = job.nicTags; | |
| var pkg = job.params.package; | |
| var img = job.params.image; | |
| if (!nicTags) { | |
| return cb('NIC tags are required'); | |
| } | |
| if (!img) { | |
| return cb('Image is required'); | |
| } | |
| if (job.params['server_uuid']) { | |
| cb(null, 'Server UUID present, no need to get allocation from DAPI'); | |
| return; | |
| } | |
| // There is no sdc-client for CNAPI's DAPI yet | |
| var cnapi = restify.createJsonClient({ | |
| url: cnapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| var payload = { | |
| vm: job.params, | |
| image: img, | |
| package: pkg, | |
| nic_tags: nicTags | |
| }; | |
| job.log.info({ dapiPayload: payload }, 'Payload sent to DAPI'); | |
| return cnapi.post('/allocate', payload, function (err, req, res, body) { | |
| if (err) { | |
| return cb(err); | |
| } | |
| job.params['server_uuid'] = body.server.uuid; | |
| job.server_uuid = body.server.uuid; | |
| return cb(null, 'VM allocated to Server ' + body.server.uuid); | |
| }); | |
| } | |
| /** | |
| * Set up the payload that will be sent to CNAPI and will be used to provision | |
| * the virtual machine. | |
| */ | |
| function preparePayload(job, cb) { | |
| job.params.jobid = job.uuid; | |
| var params = job.params; | |
| var i, j, nic; | |
| var payload = { uuid: params['vm_uuid'], image: job.params.image }; | |
| var wantResolvers = true; | |
| if (payload.image.hasOwnProperty('tags') && | |
| payload.image.tags.hasOwnProperty('kernel_version') && | |
| !params.hasOwnProperty('kernel_version')) { | |
| params['kernel_version'] = payload.image.tags.kernel_version; | |
| } | |
| if (payload.image.type === 'lx-dataset') { | |
| params['brand'] = 'lx'; | |
| } | |
| var keys = [ 'alias', 'autoboot', 'billing_id', 'brand', 'cpu_cap', | |
| 'cpu_shares', 'customer_metadata', 'delegate_dataset', 'dns_domain', | |
| 'docker', 'firewall_enabled', 'fs_allowed', 'hostname', | |
| 'indestructible_zoneroot', 'indestructible_delegated', 'init_name', | |
| 'internal_metadata', 'kernel_version', 'limit_priv', | |
| 'maintain_resolvers', 'max_locked_memory', 'max_lwps', 'max_msg_ids', | |
| 'max_physical_memory', 'max_shm_memory', 'max_sem_ids', 'max_shm_ids', | |
| 'max_swap', 'mdata_exec_timeout', 'nics', | |
| 'owner_uuid', 'package_name', 'package_version', 'quota', 'ram', | |
| 'resolvers', 'vcpus', 'zfs_data_compression', 'zfs_io_priority', | |
| 'tags', 'tmpfs' | |
| ]; | |
| for (i = 0; i < keys.length; i++) { | |
| var key = keys[i]; | |
| if (params[key] !== undefined) { | |
| payload[key] = params[key]; | |
| } | |
| } | |
| // Per OS-2520 we always want to be setting archive_on_delete in SDC | |
| payload['archive_on_delete'] = true; | |
| // If internal_metadata.set_resolvers === false, we always want | |
| // to leave the resolvers as empty | |
| if (params.internal_metadata !== undefined && | |
| typeof (params.internal_metadata) === 'object' && | |
| params.internal_metadata.set_resolvers === false) { | |
| wantResolvers = false; | |
| } | |
| // Add resolvers and routes in the order of the networks | |
| var resolver; | |
| var resolvers = []; | |
| var routes = {}; | |
| for (i = 0; i < params.nics.length; i++) { | |
| nic = params.nics[i]; | |
| if (nic['resolvers'] !== undefined && | |
| Array.isArray(nic['resolvers'])) { | |
| for (j = 0; j < nic['resolvers'].length; j++) { | |
| resolver = nic['resolvers'][j]; | |
| if (resolvers.indexOf(resolver) === -1) { | |
| resolvers.push(resolver); | |
| } | |
| } | |
| } | |
| if (nic['routes'] !== undefined && | |
| typeof (nic['routes']) === 'object') { | |
| for (var r in nic['routes']) { | |
| if (!routes.hasOwnProperty(r)) { | |
| routes[r] = nic['routes'][r]; | |
| } | |
| } | |
| } | |
| } | |
| if (wantResolvers) { | |
| payload['resolvers'] = resolvers; | |
| } | |
| if (Object.keys(routes).length !== 0) { | |
| payload['routes'] = routes; | |
| } | |
| if (params['brand'] === 'kvm') { | |
| payload.disks = params.disks; | |
| ['disk_driver', 'nic_driver', 'cpu_type'].forEach(function (field) { | |
| if (params[field]) { | |
| payload[field] = params[field]; | |
| } else { | |
| payload[field] = job.params.image[field]; | |
| } | |
| }); | |
| } else { | |
| payload['image_uuid'] = params['image_uuid']; | |
| if (params['filesystems'] !== undefined) { | |
| payload['filesystems'] = params['filesystems']; | |
| } | |
| } | |
| job.params.payload = payload; | |
| cb(null, 'Payload prepared successfully'); | |
| } | |
| /* | |
| * Checks if the VM image is present on the compute node and installs it if it | |
| * is not. | |
| */ | |
| function ensureImage(job, cb) { | |
| var imageUuid; | |
| var commonHeaders = { 'x-request-id': job.params['x-request-id'] }; | |
| var cnapi = new sdcClients.CNAPI({ url: cnapiUrl, headers: commonHeaders }); | |
| if (job.params['brand'] === 'kvm') { | |
| imageUuid = job.params.payload.disks[0].image_uuid; | |
| } else { | |
| imageUuid = job.params.image_uuid; | |
| } | |
| cnapi.ensureImage(job.params['server_uuid'], imageUuid, | |
| function (error, task) { | |
| if (error) { | |
| return cb(error); | |
| } | |
| job.taskId = task.id; | |
| return cb(null, 'Ensure image task queued!'); | |
| }); | |
| } | |
| /* | |
| * Calls the provision endpoint on CNAPI. This function is very similar to | |
| * common.zoneAction. Here, we also make sure if we are trying to provision an | |
| * autoboot machine, in which case the job.expects attribute value should be | |
| * stopped (and then common.checkState waits for the machine to be stopped) | |
| */ | |
| function provision(job, cb) { | |
| delete job.params.skip_zone_action; | |
| var cnapi = new sdcClients.CNAPI({ | |
| url: cnapiUrl, | |
| headers: { 'x-request-id': job.params['x-request-id'] } | |
| }); | |
| job.params.jobid = job.uuid; | |
| // autoboot=false means we want the machine to not to boot after provision | |
| if (job.params.autoboot === false || job.params.autoboot === 'false') { | |
| job.expects = 'stopped'; | |
| } else { | |
| job.expects = 'running'; | |
| } | |
| var server = job.params['server_uuid']; | |
| return cnapi.createVm(server, job.params.payload, function (err, task) { | |
| if (err) { | |
| return cb(err); | |
| } else { | |
| job.taskId = task.id; | |
| // As soon was we reach this point, we don't want to clean up NICs | |
| // when a provision fails | |
| job.markAsFailedOnError = false; | |
| return cb(null, 'Provision task: ' + task.id + ' queued!'); | |
| } | |
| }); | |
| } | |
| /* | |
| * Sets the post back execution state as failed | |
| */ | |
| function setPostBackFailed(job, cb) { | |
| // If this is false it means that cnapi.pollTask succeeded, so the VM exists | |
| // physically wether its provision failed or not | |
| if (job.markAsFailedOnError === false) { | |
| return cb(null, 'markAsFailedOnError was set to false, ' + | |
| 'won\'t set postBackState for VM'); | |
| } | |
| job.postBackState = 'failed'; | |
| return cb(null, 'Set post back state as failed'); | |
| } | |
| /** | |
| * Records the type of workflow for debugging/informational purposes. For | |
| * example when creating a waitlist ticket. | |
| */ | |
| function setJobAction(job, cb) { | |
| job.action = 'provision'; | |
| return cb(null, 'Action set'); | |
| } | |
| var workflow = module.exports = { | |
| name: 'provision-' + VERSION, | |
| version: VERSION, | |
| chain: [ { | |
| name: 'common.validate_params', | |
| timeout: 10, | |
| retry: 1, | |
| body: validateParams | |
| }, | |
| { | |
| name: 'workflow.set_job_action', | |
| timeout: 10, | |
| retry: 1, | |
| body: setJobAction, | |
| modules: {} | |
| }, | |
| { | |
| name: 'imgapi.generate_passwords', | |
| timeout: 10, | |
| retry: 1, | |
| body: generatePasswords, | |
| modules: { childProcess: 'child_process', async: 'async' } | |
| }, { | |
| name: 'napi.validate_networks', | |
| timeout: 10, | |
| retry: 1, | |
| body: common.validateNetworks, | |
| modules: { sdcClients: 'sdc-clients', async: 'async' } | |
| }, { | |
| name: 'cnapi.check_manual_server_nics', | |
| timeout: 10, | |
| retry: 1, | |
| body: checkManualServerNics, | |
| modules: { sdcClients: 'sdc-clients' } | |
| }, | |
| /** | |
| * Here we serialize the compute node designation api (DAPI) portion of the | |
| * workflow via the CNAPI waitlist to ensure that only one allocation | |
| * happens at a time. In addition to serialiing all the designations via | |
| * the waitlist, we will create waitlist tickets for the provision itself. | |
| * DAPI will use the provision tickets to know whether there are in-flight | |
| * provisions and prevent multiple concurrent provisions from inadvertently | |
| * all ending up on the same compute node. | |
| */ | |
| { | |
| name: 'dapi.get_allocation_ticket', | |
| timeout: 6, | |
| retry: 1, | |
| body: common.acquireAllocationTicket, | |
| modules: { sdcClients: 'sdc-clients' } | |
| }, { | |
| name: 'dapi.wait_allocation_ticket', | |
| timeout: 120, | |
| retry: 1, | |
| body: common.waitOnAllocationTicket, | |
| modules: { sdcClients: 'sdc-clients' } | |
| }, { | |
| name: 'dapi.get_allocation', | |
| timeout: 10, | |
| retry: 1, | |
| body: getAllocation, | |
| modules: { restify: 'restify' } | |
| }, { | |
| name: 'cnapi.acquire_vm_ticket', | |
| timeout: 10, | |
| retry: 1, | |
| body: common.acquireVMTicket, | |
| modules: { sdcClients: 'sdc-clients' } | |
| }, { | |
| name: 'dapi.release_allocation_ticket', | |
| timeout: 6, | |
| retry: 1, | |
| body: common.releaseAllocationTicket, | |
| modules: { sdcClients: 'sdc-clients' } | |
| }, { | |
| name: 'cnapi.wait_on_vm_ticket', | |
| timeout: 120, | |
| retry: 1, | |
| body: common.waitOnVMTicket, | |
| modules: { sdcClients: 'sdc-clients' } | |
| }, | |
| /** | |
| * End of DAPI serialization section. | |
| */ | |
| { | |
| name: 'napi.provision_nics', | |
| timeout: 20, | |
| retry: 1, | |
| body: common.provisionNics, | |
| modules: { sdcClients: 'sdc-clients', async: 'async' } | |
| } | |
| /** | |
| * Fabric NAT provisioning | |
| */ | |
| ].concat( | |
| fabricCommon.provisionChain).concat([ | |
| { | |
| name: 'prepare_payload', | |
| timeout: 10, | |
| retry: 1, | |
| body: preparePayload, | |
| modules: { sdcClients: 'sdc-clients' } | |
| }, { | |
| name: 'cnapi.ensure_image', | |
| timeout: 300, | |
| retry: 1, | |
| body: ensureImage, | |
| modules: { sdcClients: 'sdc-clients' } | |
| }, { | |
| name: 'cnapi.poll_task_ensure_image', | |
| timeout: 3600, | |
| retry: 1, | |
| body: common.pollTask, | |
| modules: { sdcClients: 'sdc-clients' } | |
| }, | |
| /* | |
| * If we've provisioned fabric NAT zones for this VM, wait until | |
| * they've finished before sending off the provision. | |
| */ | |
| fabricCommon.provisionWaitTask, | |
| { | |
| name: 'cnapi.provision_vm', | |
| timeout: 10, | |
| retry: 1, | |
| body: provision, | |
| modules: { sdcClients: 'sdc-clients' } | |
| }, { | |
| name: 'cnapi.poll_task', | |
| timeout: 3600, | |
| retry: 1, | |
| body: common.pollTask, | |
| modules: { sdcClients: 'sdc-clients' } | |
| }, { | |
| name: 'vmapi.check_state', | |
| timeout: 120, | |
| retry: 1, | |
| body: common.checkState, | |
| modules: { sdcClients: 'sdc-clients' } | |
| }, { | |
| name: 'fwapi.update', | |
| timeout: 10, | |
| retry: 1, | |
| body: common.updateFwapi, | |
| modules: { sdcClients: 'sdc-clients' } | |
| }, { | |
| name: 'cnapi.release_vm_ticket', | |
| timeout: 60, | |
| retry: 1, | |
| body: common.releaseVMTicket, | |
| modules: { sdcClients: 'sdc-clients' } | |
| }, | |
| // If this was a fabric nat provision, clean up the ticket | |
| fabricCommon.releaseTicketTask | |
| ]), | |
| timeout: 3810, | |
| onerror: [ { | |
| name: 'napi.cleanup_nics', | |
| timeout: 10, | |
| retry: 1, | |
| body: common.cleanupNics, | |
| modules: { sdcClients: 'sdc-clients', async: 'async' } | |
| }, { | |
| name: 'set_post_back_failed', | |
| body: setPostBackFailed, | |
| modules: {} | |
| }, { | |
| name: 'common.post_back', | |
| body: common.postBack, | |
| modules: { async: 'async', restify: 'restify', urlModule: 'url' } | |
| }, | |
| { | |
| name: 'cnapi.cleanup_allocation_ticket', | |
| modules: { sdcClients: 'sdc-clients' }, | |
| body: common.releaseAllocationTicket | |
| }, | |
| { | |
| name: 'cnapi.cleanup_vm_ticket', | |
| modules: { sdcClients: 'sdc-clients' }, | |
| body: common.releaseVMTicket | |
| }, | |
| // If this was a fabric nat provision, clean up the ticket | |
| fabricCommon.releaseTicketTask, | |
| { | |
| name: 'On error', | |
| body: function (job, cb) { | |
| return cb('Error executing job'); | |
| } | |
| }], | |
| oncancel: [ { | |
| name: 'vmapi.refresh_vm', | |
| modules: { restify: 'restify' }, | |
| body: common.refreshVm | |
| }, { | |
| name: 'cnapi.cleanup_vm_ticket', | |
| modules: { sdcClients: 'sdc-clients' }, | |
| body: common.releaseVMTicket | |
| }, | |
| { | |
| name: 'cnapi.cleanup_allocation_ticket', | |
| modules: { sdcClients: 'sdc-clients' }, | |
| body: common.releaseAllocationTicket | |
| }, | |
| // If this was a fabric nat provision, clean up the ticket | |
| fabricCommon.releaseTicketTask | |
| ] | |
| }; |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| { | |
| "logLevel": "debug", | |
| "maxSockets": 100, | |
| "useVmAgent": true, | |
| "ufdsAdminUuid": "{{{ufds_admin_uuid}}}", | |
| "reserveKvmStorage": "{{reserveKvmStorage}}", | |
| "api": { | |
| "port": 80 | |
| }, | |
| "overlay": { | |
| {{#fabric_cfg}} | |
| "natPool": "{{{sdc_nat_pool}}}" | |
| {{/fabric_cfg}} | |
| }, | |
| "heartbeater": { | |
| "host": "{{{RABBITMQ_SERVICE}}}", | |
| "queue": "heartbeat.vmapi", | |
| "reconnect": 5, | |
| "concurrency": 50, | |
| "retry": { | |
| "initialDelay": 4000 | |
| } | |
| }, | |
| "cache": { | |
| "type": "redis", | |
| "host": "{{{REDIS_SERVICE}}}" | |
| }, | |
| "wfapi": { | |
| "forceMd5Check": true, | |
| "workflows": [ | |
| "provision", | |
| "start", | |
| "stop", | |
| "kill", | |
| "reboot", | |
| "reprovision", | |
| "update", | |
| "destroy", | |
| "snapshot", | |
| "rollback", | |
| "delete-snapshot", | |
| "add-nics", | |
| "update-nics", | |
| "remove-nics" | |
| ], | |
| "url": "http://{{{WORKFLOW_SERVICE}}}" | |
| }, | |
| "cnapi": { | |
| "maxSockets": 10, | |
| "url": "http://{{{CNAPI_SERVICE}}}" | |
| }, | |
| "imgapi": { | |
| "url": "http://{{{IMGAPI_SERVICE}}}" | |
| }, | |
| "napi": { | |
| "url": "http://{{{NAPI_SERVICE}}}" | |
| }, | |
| "papi": { | |
| "url": "http://{{{PAPI_SERVICE}}}" | |
| }, | |
| "moray": { | |
| "host": "{{{MORAY_SERVICE}}}", | |
| "port": 2020, | |
| "connectTimeout": 200, | |
| "retry": { | |
| "retries": 2, | |
| "minTimeout": 500 | |
| } | |
| } | |
| } |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| /* | |
| * This Source Code Form is subject to the terms of the Mozilla Public | |
| * License, v. 2.0. If a copy of the MPL was not distributed with this | |
| * file, You can obtain one at http://mozilla.org/MPL/2.0/. | |
| */ | |
| /* | |
| * Copyright (c) 2015, Joyent, Inc. | |
| */ | |
| /* | |
| * All validation related functions. They mostly apply to CreateVm and UpdateVm | |
| */ | |
| var assert = require('assert'); | |
| var restify = require('restify'); | |
| var async = require('async'); | |
| var format = require('util').format; | |
| var libuuid = require('libuuid'); | |
| var net = require('net'); | |
| var errors = require('../errors'); | |
| var common = require('./vm-common'); | |
| var UUID_RE = /^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$/; | |
| var ALIAS_RE = /^[a-zA-Z0-9][a-zA-Z0-9\_\.\-]*$/; | |
| /*JSSTYLED*/ | |
| var IP_RE = /^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$/; | |
| var PW_SUFFIX = /^(.*)_pw$/; | |
| var VALID_VM_BRANDS = [ | |
| 'joyent-minimal', | |
| 'joyent', | |
| 'lx', | |
| 'kvm', | |
| 'sngl' | |
| ]; | |
| var DEFAULT_QUOTA = 10; // GiB | |
| var MIN_SWAP = 256; // MiB | |
| var VM_FIELDS = [ | |
| { | |
| name: 'alias', | |
| mutable: true | |
| }, | |
| { | |
| name: 'autoboot', | |
| mutable: true | |
| }, | |
| { | |
| name: 'billing_id', | |
| mutable: true | |
| }, | |
| { | |
| name: 'brand', | |
| mutable: false | |
| }, | |
| { | |
| name: 'cpu_cap', | |
| mutable: true | |
| }, | |
| { | |
| name: 'cpu_shares', | |
| mutable: true | |
| }, | |
| { | |
| name: 'cpu_type', | |
| mutable: false | |
| }, | |
| { | |
| name: 'customer_metadata', | |
| mutable: false | |
| }, | |
| { | |
| name: 'delegate_dataset', | |
| mutable: false | |
| }, | |
| { | |
| name: 'disk_driver', | |
| mutable: false | |
| }, | |
| { | |
| name: 'dns_domain', | |
| mutable: false | |
| }, | |
| { | |
| name: 'docker', | |
| mutable: false | |
| }, | |
| { | |
| name: 'do_not_inventory', | |
| mutable: true | |
| }, | |
| { | |
| name: 'firewall_enabled', | |
| mutable: true | |
| }, | |
| { | |
| name: 'fs_allowed', | |
| mutable: true | |
| }, | |
| { | |
| name: 'hostname', | |
| mutable: false | |
| }, | |
| { | |
| name: 'indestructible_delegated', | |
| mutable: true | |
| }, | |
| { | |
| name: 'indestructible_zoneroot', | |
| mutable: true | |
| }, | |
| { | |
| name: 'init_name', | |
| mutable: false | |
| }, | |
| { | |
| name: 'internal_metadata', | |
| mutable: false | |
| }, | |
| { | |
| name: 'kernel_version', | |
| mutable: false | |
| }, | |
| { | |
| name: 'limit_priv', | |
| mutable: true | |
| }, | |
| { | |
| name: 'locality', | |
| mutable: false | |
| }, | |
| { | |
| name: 'maintain_resolvers', | |
| mutable: true | |
| }, | |
| { | |
| name: 'max_locked_memory', | |
| mutable: true | |
| }, | |
| { | |
| name: 'max_lwps', | |
| mutable: true | |
| }, | |
| { | |
| name: 'max_physical_memory', | |
| mutable: true | |
| }, | |
| { | |
| name: 'max_swap', | |
| mutable: true | |
| }, | |
| { | |
| name: 'mdata_exec_timeout', | |
| mutable: false | |
| }, | |
| { | |
| name: 'networks', | |
| mutable: false | |
| }, | |
| { | |
| name: 'nic_driver', | |
| mutable: false | |
| }, | |
| { | |
| name: 'overprovision_cpu', | |
| mutable: false | |
| }, | |
| { | |
| name: 'overprovision_memory', | |
| mutable: false | |
| }, | |
| { | |
| name: 'owner_uuid', | |
| mutable: false | |
| }, | |
| { | |
| name: 'package_name', | |
| mutable: true | |
| }, | |
| { | |
| name: 'package_version', | |
| mutable: true | |
| }, | |
| { | |
| name: 'quota', | |
| mutable: true | |
| }, | |
| { | |
| name: 'ram', | |
| mutable: true | |
| }, | |
| { | |
| name: 'resolvers', | |
| mutable: true | |
| }, | |
| { | |
| name: 'server_uuid', | |
| mutable: false | |
| }, | |
| { | |
| name: 'tags', | |
| mutable: false | |
| }, | |
| { | |
| name: 'tmpfs', | |
| mutable: true | |
| }, | |
| { | |
| name: 'uuid', | |
| mutable: false | |
| }, | |
| { | |
| name: 'vcpus', | |
| mutable: false | |
| }, | |
| { | |
| name: 'zfs_data_compression', | |
| mutable: true | |
| }, | |
| { | |
| name: 'zfs_io_priority', | |
| mutable: true | |
| } | |
| ]; | |
| var validators = { | |
| alias: function (params) { | |
| var errs = []; | |
| if (params.alias !== undefined && !ALIAS_RE.test(params.alias)) { | |
| errs.push(errors.invalidParamErr('alias')); | |
| } | |
| return errs; | |
| }, | |
| autoboot: createValidateBooleanFn('autoboot'), | |
| billing_id: createValidateUUIDFn('billing_id', false), | |
| brand: function (params) { | |
| var errs = []; | |
| if (params.brand === undefined) { | |
| errs.push(errors.missingParamErr('brand')); | |
| } else if (!validBrand(params.brand)) { | |
| var message = 'Must be one of: ' + VALID_VM_BRANDS.join(', '); | |
| errs.push(errors.invalidParamErr('brand', message)); | |
| } | |
| return errs; | |
| }, | |
| cpu_cap: createValidateNumberFn('cpu_cap', true), | |
| cpu_shares: createValidateNumberFn('cpu_shares', true), | |
| cpu_type: createValidateStringFn('cpu_type'), | |
| customer_metadata: createValidateMetadataFn('customer_metadata'), | |
| delegate_dataset: createValidateBooleanFn('delegate_dataset'), | |
| disk_driver: createValidateStringFn('disk_driver'), | |
| dns_domain: createValidateStringFn('dns_domain'), | |
| docker: createValidateBooleanFn('docker'), | |
| do_not_inventory: createValidateBooleanFn('do_not_inventory'), | |
| firewall_enabled: createValidateBooleanFn('firewall_enabled'), | |
| fs_allowed: createValidateStringFn('fs_allowed'), | |
| hostname: createValidateStringFn('hostname'), | |
| indestructible_delegated: | |
| createValidateBooleanFn('indestructible_delegated'), | |
| indestructible_zoneroot: createValidateBooleanFn('indestructible_zoneroot'), | |
| init_name: createValidateStringFn('init_name'), | |
| internal_metadata: createValidateMetadataFn('internal_metadata'), | |
| kernel_version: createValidateStringFn('kernel_version'), | |
| limit_priv: createValidateStringFn('limit_priv'), | |
| locality: function (params) { | |
| var errs = []; | |
| if (params.locality) { | |
| try { | |
| validLocality(params.locality); | |
| } catch (e) { | |
| errs.push(errors.invalidParamErr('locality', e.message)); | |
| } | |
| } | |
| return errs; | |
| }, | |
| maintain_resolvers: createValidateBooleanFn('maintain_resolvers'), | |
| max_locked_memory: createValidateNumberFn('max_locked_memory', true), | |
| max_lwps: createValidateNumberFn('max_lwps', true), | |
| max_physical_memory: createValidateNumberFn('max_physical_memory', true), | |
| max_swap: createValidateNumberFn('max_swap', true), | |
| mdata_exec_timeout: createValidateNumberFn('mdata_exec_timeout', false), | |
| networks: function (params) { | |
| var errs = []; | |
| if (params.networks) { | |
| try { | |
| params.networks = validNetworks(params.networks, true); | |
| } catch (e) { | |
| errs.push(errors.invalidParamErr('networks', e.message)); | |
| } | |
| } else { | |
| errs.push(errors.missingParamErr('networks')); | |
| } | |
| return errs; | |
| }, | |
| nic_driver: createValidateStringFn('nic_driver'), | |
| overprovision_cpu: createValidateNumberFn('overprovision_cpu', false), | |
| overprovision_memory: createValidateNumberFn('overprovision_memory', false), | |
| owner_uuid: createValidateUUIDFn('owner_uuid', true), | |
| package_name: createValidateStringFn('package_name'), | |
| package_version: createValidateStringFn('package_version'), | |
| quota: createValidateNumberFn('quota', true), | |
| ram: createValidateNumberFn('ram', false), | |
| resolvers: createValidateArrayFn('resolvers'), | |
| server_uuid: createValidateUUIDFn('server_uuid', false), | |
| tags: createValidateMetadataFn('tags'), | |
| ticket: createValidateUUIDFn('uuid', false), | |
| tmpfs: createValidateNumberFn('tmpfs', true), | |
| uuid: createValidateUUIDFn('uuid', false), | |
| vcpus: createValidateNumberFn('vcpus', false), | |
| zfs_data_compression: createValidateStringFn('zfs_data_compression'), | |
| zfs_io_priority: createValidateNumberFn('zfs_io_priority', true) | |
| }; | |
| /* | |
| * Returns a validateMetadata function | |
| */ | |
| function createValidateMetadataFn(field) { | |
| return function (params) { | |
| var errs = []; | |
| if (params[field] !== undefined) { | |
| try { | |
| if (typeof (params[field]) === 'string') { | |
| params[field] = JSON.parse(params[field]); | |
| } | |
| validMetadata(field, params[field]); | |
| } catch (e) { | |
| if (e.body && e.body.errors) { | |
| errs.push(e.body.errors[0]); | |
| } else { | |
| errs.push(errors.invalidParamErr(field)); | |
| } | |
| } | |
| } | |
| return errs; | |
| }; | |
| } | |
| /* | |
| * Returns a validateArray function | |
| */ | |
| function createValidateArrayFn(field) { | |
| return function (params) { | |
| var errs = []; | |
| if (params[field] !== undefined && !Array.isArray(params[field])) { | |
| errs.push(errors.invalidParamErr(field, 'Not an array')); | |
| } | |
| return errs; | |
| }; | |
| } | |
| /* | |
| * Returns a validateString function | |
| */ | |
| function createValidateStringFn(field) { | |
| return function (params) { | |
| var errs = []; | |
| if (params[field] !== undefined && | |
| typeof (params[field]) !== 'string') { | |
| errs.push(errors.invalidParamErr(field, 'Not a valid string')); | |
| } | |
| return errs; | |
| }; | |
| } | |
| /* | |
| * Returns a validateNumber function | |
| */ | |
| function createValidateNumberFn(field, gezero) { | |
| return function (params) { | |
| var errs = []; | |
| if (params[field] !== undefined) { | |
| if (validNumber(params[field], gezero)) { | |
| params[field] = Number(params[field]); | |
| } else { | |
| errs.push(errors.invalidParamErr(field, 'Not a valid number')); | |
| } | |
| } | |
| return errs; | |
| }; | |
| } | |
| /* | |
| * Returns a validateBoolean function | |
| */ | |
| function createValidateBooleanFn(field) { | |
| return function (params) { | |
| var errs = []; | |
| if (params[field] !== undefined && | |
| (typeof (params[field]) !== 'boolean')) { | |
| errs.push(errors.invalidParamErr(field)); | |
| } | |
| return errs; | |
| }; | |
| } | |
| /* | |
| * Returns a validateUUID function | |
| */ | |
| function createValidateUUIDFn(field, required) { | |
| if (required === undefined) required = false; | |
| return function (params) { | |
| var errs = []; | |
| if (params[field] === undefined && required) { | |
| errs.push(errors.missingParamErr(field)); | |
| } else if (params[field] !== undefined && !validUUID(params[field])) { | |
| errs.push(errors.invalidUuidErr(field)); | |
| } | |
| return errs; | |
| }; | |
| } | |
| /* | |
| * Reused by Create/Update for checking package values and populating the | |
| * request params when some values are not present. This function should only | |
| * be called when a request contains billing_id | |
| */ | |
| function validatePackageValues(papi, params, errs, callback) { | |
| var packageFields = ['cpu_cap', 'max_lwps', 'max_physical_memory', | |
| 'max_swap', 'quota', 'vcpus', 'zfs_io_priority']; | |
| papi.getPackage(params.billing_id, function (err, pkg) { | |
| if (err) { | |
| if (err.name === 'ResourceNotFoundError') { | |
| errs.push({ | |
| field: 'billing_id', | |
| code: 'Invalid', | |
| message: err.message | |
| }); | |
| return callback(); | |
| } else { | |
| return callback(err); | |
| } | |
| } | |
| // Allow for manually overriding package params from original | |
| // provision params | |
| packageFields.forEach(function (field) { | |
| if (params[field] === undefined && pkg[field] !== undefined) { | |
| if (field === 'quota') { | |
| if (params.brand === 'kvm') { | |
| params.quota = 10; | |
| } else { | |
| params.quota = Number(pkg.quota) / 1024; | |
| } | |
| } else { | |
| params[field] = Number(pkg[field]); | |
| } | |
| } | |
| }); | |
| // Special case for default values | |
| var pkgRam = pkg.max_physical_memory; | |
| if (pkgRam !== undefined) { | |
| if (params.max_physical_memory === undefined) { | |
| params.max_physical_memory = Number(pkgRam); | |
| } else if (params.ram === undefined) { | |
| params.ram = Number(pkgRam); | |
| } | |
| } | |
| pkgRam = params.max_physical_memory || params.ram; | |
| if (params.cpu_shares === undefined) { | |
| if (pkg.fss !== undefined) { | |
| params.cpu_shares = Math.floor(Number(pkg.fss)); | |
| if (params.cpu_shares < 1) { | |
| params.cpu_shares = 1; | |
| } | |
| } else { | |
| // Last resort default cpu_shares | |
| if (pkgRam > 128) { | |
| params.cpu_shares = Math.floor(pkgRam / 128); | |
| } else { | |
| params.cpu_shares = 1; | |
| } | |
| } | |
| } | |
| params['package'] = pkg; | |
| return callback(); | |
| }); | |
| } | |
| /* | |
| * Validates CreateVm parameters | |
| */ | |
| exports.validateCreateVmParams = function (vmapi, params, callback) { | |
| var errs = []; | |
| VM_FIELDS.forEach(function (field) { | |
| var fieldErrs = validators[field.name](params); | |
| errs = errs.concat(fieldErrs); | |
| }); | |
| // when no package is passed, we want to validate presence of ram, | |
| // max_physical_memory and disks (when kvm) at least | |
| if (!params.billing_id) { | |
| if (params.brand === 'kvm' && !params.ram) { | |
| errs.push(errors.missingParamErr('ram', 'Is required for KVM')); | |
| } else if (!params.max_physical_memory && !params.ram) { | |
| errs.push(errors.missingParamErr('ram')); | |
| } | |
| } | |
| // max_swap | |
| if (params.max_swap !== undefined && params.max_swap < MIN_SWAP) { | |
| errs.push(errors.invalidParamErr('max_swap', | |
| 'Cannot be less than ' + MIN_SWAP)); | |
| } | |
| validateBrandParams(params, errs); | |
| // Async validations | |
| var asyncFns = []; | |
| if (params.uuid) { | |
| asyncFns.push(validateUniqueUuid); | |
| } | |
| if (params.alias) { | |
| asyncFns.push(validateAlias); | |
| } | |
| if (params.server_uuid) { | |
| asyncFns.push(validateServer); | |
| } | |
| if (params.billing_id && | |
| params.billing_id !== '00000000-0000-0000-0000-000000000000') { | |
| asyncFns.push(validatePackage); | |
| } | |
| if (params.image_uuid || (params.disks && params.disks[0].image_uuid)) { | |
| asyncFns.push(validateImage); | |
| } | |
| if (params.image_uuid && params.brand === 'lx') { | |
| asyncFns.push(validateLxBrand); | |
| } | |
| function validateUniqueUuid(next) { | |
| vmapi.moray.getVm({ uuid: params.uuid }, function onGetVm(err, vm) { | |
| if (err) { | |
| return next(err); | |
| } | |
| if (vm) { | |
| errs.push(errors.duplicateParamErr('uuid')); | |
| } | |
| return next(); | |
| }); | |
| } | |
| function validateAlias(next) { | |
| validateUniqueAlias(vmapi.moray, params, function (err, errorObj) { | |
| if (err) { | |
| return next(err); | |
| } else if (errorObj) { | |
| errs.push(errorObj); | |
| } | |
| return next(); | |
| }); | |
| } | |
| function validateServer(next) { | |
| vmapi.cnapi.getServer(params.server_uuid, function (err) { | |
| if (err) { | |
| if (err.name === 'ResourceNotFoundError') { | |
| errs.push({ | |
| field: 'server_uuid', | |
| code: 'Invalid', | |
| message: err.message | |
| }); | |
| } else { | |
| return next(err); | |
| } | |
| } | |
| return next(); | |
| }); | |
| } | |
| function validatePackage(next) { | |
| validatePackageValues(vmapi.papi, params, errs, next); | |
| } | |
| function validateImage(next) { | |
| var img_uuid = params.image_uuid || params.disks[0].image_uuid; | |
| vmapi.imgapi.getImage(img_uuid, function (err, image) { | |
| if (err) { | |
| if (err.name === 'ResourceNotFoundError') { | |
| errs.push({ | |
| field: 'image_uuid', | |
| code: 'Invalid', | |
| message: err.message | |
| }); | |
| return next(); | |
| } else { | |
| return next(err); | |
| } | |
| } | |
| if (image.state !== 'active' || image.disabled !== false) { | |
| errs.push({ | |
| field: 'image_uuid', | |
| code: 'Invalid', | |
| message: 'Image must be active and not disabled' | |
| }); | |
| } else { | |
| params.image = image; | |
| } | |
| return next(); | |
| }); | |
| } | |
| function validateLxBrand(next) { | |
| var DOCKER_TYPES = ['lx-dataset', 'docker']; | |
| if (DOCKER_TYPES.indexOf(params.image.type) === -1) { | |
| errs.push(errors.invalidParamErr( | |
| 'image_uuid', 'Image type is "' + params.image.type + '\" ' + | |
| 'must be one of: ' + JSON.stringify(DOCKER_TYPES))); | |
| } | |
| if (params.kernel_version === undefined) { | |
| if (params.image.tags && params.image.tags.kernel_version) { | |
| params.kernel_version = params.image.tags.kernel_version; | |
| } else { | |
| errs.push(errors.missingParamErr( | |
| 'kernel_version', 'Required for LX zones')); | |
| } | |
| } | |
| return next(); | |
| } | |
| async.series(asyncFns, function (err) { | |
| if (err) { | |
| return callback(err); | |
| } | |
| if (errs.length) { | |
| return callback( | |
| new errors.ValidationFailedError('Invalid VM parameters', | |
| errs)); | |
| } | |
| return callback(null); | |
| }); | |
| }; | |
| /* | |
| * Validates UpdateVm params | |
| */ | |
| exports.validateUpdateVmParams = function (vmapi, vm, obj, callback) { | |
| var errs = []; | |
| var params = {}; | |
| VM_FIELDS.filter(function (field) { | |
| return field.mutable; | |
| }).forEach(function (field) { | |
| var fieldErrs = validators[field.name](obj); | |
| errs = errs.concat(fieldErrs); | |
| if (obj[field.name] !== undefined) { | |
| params[field.name] = obj[field.name]; | |
| } | |
| }); | |
| // special case for change_owner | |
| if (obj.new_owner_uuid) { | |
| if (typeof (obj.new_owner_uuid) === 'string' && | |
| validUUID(obj.new_owner_uuid)) { | |
| params.new_owner_uuid = obj.new_owner_uuid; | |
| } else { | |
| errs.push(errors.invalidUuidErr('new_owner_uuid')); | |
| } | |
| } | |
| if (obj.customer_metadata) { | |
| try { | |
| createMetadataObject(vm, | |
| 'customer_metadata', | |
| params, | |
| obj.customer_metadata); | |
| validateMetadataNamespaces(vm, params); | |
| } catch (e) { | |
| errs.push(e.body.errors[0]); | |
| } | |
| } | |
| if (obj.internal_metadata) { | |
| try { | |
| createMetadataObject(vm, | |
| 'internal_metadata', | |
| params, | |
| obj.internal_metadata); | |
| } catch (e) { | |
| errs.push(e.body.errors[0]); | |
| } | |
| } | |
| if (obj.tags) { | |
| try { | |
| createMetadataObject(vm, | |
| 'tags', | |
| params, | |
| obj.tags); | |
| } catch (e) { | |
| errs.push(e.body.errors[0]); | |
| } | |
| } | |
| if (obj.update_disks) { | |
| if (Array.isArray(obj.update_disks)) { | |
| params.update_disks = obj.update_disks; | |
| } else { | |
| errs.push(errors.invalidParamErr('update_disks', 'Not an array')); | |
| } | |
| } | |
| // If there were no parameters passed, fail here before doing any async | |
| if (Object.keys(params).length === 0) { | |
| return callback( | |
| new errors.ValidationFailedError('No VM parameters provided', [])); | |
| } | |
| function getSubtask() { | |
| if (params.billing_id !== undefined || | |
| params.ram !== undefined || | |
| params.max_physical_memory !== undefined) { | |
| return 'resize'; | |
| } else if (params.new_owner_uuid) { | |
| return 'change_owner'; | |
| } else if (params.alias) { | |
| return 'rename'; | |
| } | |
| return ''; | |
| } | |
| // Ideally there is no simultaneous subtasks unless requests are | |
| // manually done | |
| params.subtask = getSubtask(); | |
| // Validate resize. Not allowed for KVM at the moment | |
| if (params.subtask === 'resize' && vm.brand === 'kvm') { | |
| errs.push(errors.invalidParamErr('brand', 'Cannot resize a KVM VM')); | |
| } | |
| // Async validations | |
| var asyncFns = []; | |
| if (params.alias) { | |
| asyncFns.push(validateAlias); | |
| } | |
| if (params.billing_id) { | |
| asyncFns.push(validatePackage); | |
| } | |
| if (params.subtask === 'resize' && vm.brand !== 'kvm') { | |
| asyncFns.push(validateResize); | |
| asyncFns.push(validateCapacity); | |
| } | |
| function validateAlias(next) { | |
| var vparams = { owner_uuid: vm.owner_uuid, alias: params.alias }; | |
| validateUniqueAlias(vmapi.moray, vparams, function (err, errorObj) { | |
| if (err) { | |
| return next(err); | |
| } else if (errorObj) { | |
| errs.push(errorObj); | |
| } | |
| return next(); | |
| }); | |
| } | |
| function validatePackage(next) { | |
| validatePackageValues(vmapi.papi, params, errs, next); | |
| } | |
| function validateResize(next) { | |
| // First get image from IMGAPI and resort to CNAPI if the image omly | |
| // exists in the server. This can be the case for customer created | |
| // images that can be deleted from the IMGAPI repository | |
| vmapi.imgapi.getImage(vm.image_uuid, function (err, image) { | |
| if (err) { | |
| if (err.name === 'ResourceNotFoundError') { | |
| return vmapi.cnapi.getImage( | |
| vm.server_uuid, | |
| vm.image_uuid, | |
| onImage); | |
| } else { | |
| return next(err); | |
| } | |
| } | |
| return onImage(null, image); | |
| }); | |
| function onImage(err, image) { | |
| if (err) { | |
| if (err.name === 'ResourceNotFoundError') { | |
| errs.push(errors.invalidParamErr( | |
| 'image_uuid', | |
| err.message)); | |
| return next(); | |
| } else { | |
| return next(err); | |
| } | |
| } | |
| var newRam = params.ram || params.max_physical_memory; | |
| var reqs = image.requirements; | |
| var maxRam = reqs && reqs.max_ram; | |
| var minRam = reqs && reqs.min_ram; | |
| if (maxRam && newRam > maxRam) { | |
| errs.push(errors.invalidParamErr( | |
| 'ram', | |
| 'Specified RAM (' + newRam + ') does not meet the maximum' + | |
| ' RAM requirement (' + maxRam + ')')); | |
| } else if (minRam && newRam < minRam) { | |
| errs.push(errors.invalidParamErr( | |
| 'ram', | |
| 'Specified RAM (' + newRam + ') does not meet the minimum' + | |
| ' RAM requirement (' + minRam + ')')); | |
| } | |
| return next(); | |
| } | |
| } | |
| function validateCapacity(next) { | |
| // obj == req.params | |
| if (obj.force === true || obj.force === 'true') { | |
| vmapi.log.info('Forced resize operation called for %s', obj.uuid); | |
| return next(); | |
| } | |
| var currentRam = vm.ram || vm.max_physical_memory; | |
| var requiredRam = params.ram || params.max_physical_memory; | |
| var neededRam = requiredRam - currentRam; | |
| var currentDisk = vm.quota; | |
| var requiredDisk = params.quota; | |
| vmapi.cnapi.capacity([ vm.server_uuid ], function (err, cap) { | |
| if (err) { | |
| return next(err); | |
| } | |
| var sram = cap.capacities[vm.server_uuid].ram; | |
| var sdisk = cap.capacities[vm.server_uuid].disk / 1024; | |
| if (sram < neededRam) { | |
| errs.push(errors.insufficientCapacityErr( | |
| 'ram', | |
| 'Required RAM (' + neededRam + ') exceeds the Server\'s ' + | |
| 'available RAM (' + sram + ')')); | |
| } | |
| if (requiredDisk && (sdisk < requiredDisk - currentDisk)) { | |
| var neededDisk = requiredDisk - currentDisk; | |
| errs.push(errors.insufficientCapacityErr( | |
| 'quota', | |
| 'Required disk (' + neededDisk + ') exceeds the Server\'s' + | |
| ' available disk (' + sdisk + ')')); | |
| } | |
| return next(); | |
| }); | |
| } | |
| async.series(asyncFns, function (err) { | |
| if (err) { | |
| return callback(err); | |
| } | |
| if (errs.length) { | |
| return callback( | |
| new errors.ValidationFailedError('Invalid VM update parameters', | |
| errs)); | |
| } | |
| return callback(null, params); | |
| }); | |
| }; | |
| /* | |
| * Validates if a string is a UUID | |
| */ | |
| function validUUID(uuid) { | |
| return UUID_RE.test(uuid); | |
| } | |
| exports.validUUID = validUUID; | |
| /* | |
| * Validates if an array contains strings | |
| */ | |
| function validStrings(object) { | |
| var array; | |
| var string; | |
| if (Array.isArray(object)) { | |
| array = object; | |
| } else if (typeof (object) === 'string') { | |
| array = object.split(','); | |
| } else { | |
| return false; | |
| } | |
| for (var i = 0; i < array.length; i++) { | |
| string = array[i]; | |
| if (typeof (string) !== 'string') { | |
| return false; | |
| } | |
| } | |
| return true; | |
| } | |
| exports.validStrings = validStrings; | |
| /* | |
| * Validates if an array contains UUIDs | |
| */ | |
| function validUUIDs(object) { | |
| var array; | |
| var uuid; | |
| if (Array.isArray(object)) { | |
| array = object; | |
| } else if (typeof (object) === 'string') { | |
| array = object.split(','); | |
| } else { | |
| return false; | |
| } | |
| for (var i = 0; i < array.length; i++) { | |
| uuid = array[i]; | |
| if (!UUID_RE.test(uuid)) { | |
| return false; | |
| } | |
| } | |
| return true; | |
| } | |
| exports.validUUIDs = validUUIDs; | |
| // Just make sure it works in case someone decides to send 'false' | |
| function isPrimary(prm) { | |
| return (prm === undefined ? false : (prm === true || prm === 'true')); | |
| } | |
| /* | |
| * Validates if a comma separated string contains UUIDs | |
| * If isProvision is true then it will assume a new VM that has no NICs yet, | |
| * therefore marking the first NIC as primary if not explicitly done for others. | |
| * If isProvision is false it means that we can't deafult a NIC as primary | |
| * | |
| * The history of this format has changed over some time. The original 'legacy' | |
| * way is the form of: | |
| * | |
| * [ uuid_0, uuid_1, ... ] | |
| * | |
| * The next iteration of this looks like: | |
| * | |
| * [ { uuid: uuid_0, primary: true }, { uuid: uuid_1, ip: ip_1 }, ... ] | |
| * | |
| * Importantly that form allowed us to request an IP address and to set which | |
| * nic is the primary. However, we want to allow the API to evolve into | |
| * something that's more IPv6 friendly and allows us to specify multiple IPv6 | |
| * IPs. | |
| * | |
| * The new form of this is going to look like: | |
| * | |
| * [ | |
| * { | |
| * ipv4_uuid: uuid_0, ipv4_count: <number>, ipv4_ips: [ ip0, ip1, ... ], | |
| * ipv6_uuid: uuid_1, ipv6_count: <number>, ipv6_ips: [ ip0, ip1, ... ], | |
| * primary: true | |
| * }, ... | |
| * ] | |
| * | |
| * The idea here is that each object is an interface. Interfaces can be IPv4 and | |
| * IPv6 uuids. That said, we don't quite support everything here yet. We only | |
| * support a count of 1 or a single IP in the array. We don't support both of | |
| * those at this time, though we'll go through and support it later on, the same | |
| * is true for IPv6. The goal is just to future proof ourselves at the various | |
| * layers of the stack. And of course, if this never does come to pass, I'll be | |
| * quite sad, and say I'm sorry. | |
| */ | |
| function validNetworks(object, isProvision) { | |
| var networks = []; | |
| var primaries = 0; | |
| var array, obj, uuid; | |
| if (Array.isArray(object)) { | |
| array = object; | |
| } else if (typeof (object) === 'string') { | |
| array = object.split(','); | |
| } else { | |
| throw new Error('Malformed networks object'); | |
| } | |
| for (var i = 0; i < array.length; i++) { | |
| obj = array[i]; | |
| // See history of types up above | |
| if (typeof (obj) == 'string') { | |
| uuid = obj; | |
| obj = { ipv4_uuid: uuid, ipv4_count: 1 }; | |
| } else { | |
| if (isPrimary(obj.primary)) { | |
| primaries++; | |
| } | |
| if ('uuid' in obj && ('ipv4_uuid' in obj || 'ipv6_uuid' in obj)) { | |
| throw new Error('Network object uses both old uuid form and ' + | |
| 'new ipvX_uuid form'); | |
| } | |
| if ('uuid' in obj) { | |
| obj['ipv4_uuid'] = obj['uuid']; | |
| delete obj['uuid']; | |
| if ('ip' in obj) { | |
| obj['ipv4_ips'] = [ obj['ip'] ]; | |
| delete obj['ip']; | |
| } else { | |
| obj['ipv4_count'] = 1; | |
| } | |
| } else { | |
| if ('ipv6_uuid' in obj || 'ipv6_count' in obj || | |
| 'ipv6_ips' in obj) { | |
| throw new Error('IPv6 options are not currently ' + | |
| 'supported'); | |
| } | |
| if ('ipv4_count' in obj && 'ipv4_ips' in obj) { | |
| throw new Error('cannot specify both an IP count and ' + | |
| 'specific IPs'); | |
| } | |
| if ('ipv4_count' in obj) { | |
| if (typeof (obj['ipv4_count']) !== 'number') { | |
| throw new Error('ipv4_count must be a number'); | |
| } | |
| if (obj['ipv4_count'] !== 1) { | |
| throw new Error('ipv4_count must be set to one'); | |
| } | |
| } | |
| if ('ipv4_ips' in obj) { | |
| if (!Array.isArray(obj['ipv4_ips'])) { | |
| throw new Error('ipv4_ips must be an array'); | |
| } | |
| if (obj['ipv4_ips'].length !== 1) { | |
| throw new Error('ipv4_ips may only have a single ' + | |
| 'entry'); | |
| } | |
| for (var j = 0; j < obj['ipv4_ips'].length; j++) { | |
| if (net.isIPv4(obj['ipv4_ips'][j]) !== true) { | |
| throw new Error('ipv4_ips contains invalid IPv4 ' + | |
| 'addresses'); | |
| } | |
| } | |
| } | |
| if (!('ipv4_count' in obj) && !('ipv4_ips' in obj)) { | |
| obj['ipv4_count'] = 1; | |
| } | |
| } | |
| } | |
| if (obj.ipv4_uuid && !UUID_RE.test(obj.ipv4_uuid)) { | |
| throw new Error(format('Invalid uuid %s', obj.uuid)); | |
| } else if (!obj.ipv4_uuid && !obj.name) { | |
| throw new Error('Network object must specify a UUID or a name'); | |
| } | |
| networks.push(obj); | |
| } | |
| // Two primaries were specified | |
| if (primaries > 1) { | |
| throw new Error('Cannot specify more than one primary network'); | |
| } else if (isProvision === true && primaries === 0) { | |
| networks[0].primary = true; | |
| } // else just one primary which is fine | |
| return networks; | |
| } | |
| exports.validNetworks = validNetworks; | |
| /* | |
| * Two validations at the moment: | |
| * - reordering: if you provide 'interface' for one nic, all others must have | |
| * the same attribute as well | |
| * - reassigning primary: only one nic can be primary | |
| */ | |
| function validNics(vm, object) { | |
| var nics = []; | |
| var primaries = 0; | |
| var interfaces = 0; | |
| var array, obj; | |
| if (Array.isArray(object)) { | |
| array = object; | |
| } else if (typeof (object) === 'string') { | |
| array = object.split(','); | |
| } else { | |
| throw new Error('Malformed NICs object'); | |
| } | |
| if (array.length === 0) { | |
| throw new Error('At least one NIC must be updated'); | |
| } | |
| var additionalParams = ['allow_dhcp_spoofing', 'allow_ip_spoofing', | |
| 'allow_mac_spoofing', 'allow_restricted_traffic']; | |
| var whitelistAttrs = additionalParams.concat('mac', 'interface', 'primary'); | |
| var nic; | |
| for (var i = 0; i < array.length; i++) { | |
| nic = {}; | |
| obj = array[i]; | |
| // Verify the list of fields passed through a whitelist to make sure | |
| // users are informed when providing incorrect properties | |
| for (var key in obj) { | |
| if (whitelistAttrs.indexOf(key) === -1) { | |
| throw new Error( | |
| format('\'%s\' is not a valid property to update', key)); | |
| } | |
| } | |
| if (obj.mac === undefined) { | |
| throw new Error('All NICs must have a `mac` attribute'); | |
| } | |
| nic.mac = obj.mac; | |
| if (isPrimary(obj.primary)) { | |
| nic.primary = obj.primary; | |
| primaries++; | |
| } | |
| if (obj['interface'] !== undefined) { | |
| nic['interface'] = obj['interface']; | |
| interfaces++; | |
| } | |
| additionalParams.forEach(function (addlParam) { | |
| if (obj.hasOwnProperty(addlParam)) { | |
| nic[addlParam] = obj[addlParam]; | |
| } | |
| }); | |
| nics.push(nic); | |
| } | |
| // Two primaries were specified | |
| if (primaries > 1) { | |
| throw new Error('Cannot specify more than one primary NIC'); | |
| } else if (interfaces > 0 && interfaces !== vm.nics.length) { | |
| throw new Error('If reordering, must specify a new `interface` ' + | |
| 'for every NIC that the VM currently has'); | |
| } | |
| return nics; | |
| } | |
| exports.validNics = validNics; | |
| /* | |
| * Validates if locality object is understandable by DAPI. | |
| */ | |
| function validLocality(object) { | |
| if (!object || typeof (object) !== 'object') { | |
| throw new Error('malformed locality object'); | |
| } | |
| var check = function (hintName) { | |
| var hint = object[hintName]; | |
| if (!hint) { | |
| return; | |
| } | |
| if (typeof (hint) !== 'string' && !Array.isArray(hint)) { | |
| throw new Error('locality entry is neither string nor array'); | |
| } | |
| if (typeof (hint) === 'string' && !UUID_RE.test(hint)) { | |
| throw new Error('locality contains malformed UUID'); | |
| } | |
| if (Array.isArray(hint)) { | |
| for (var i = 0; i !== hint.length; i++) { | |
| if (!UUID_RE.test(hint[i])) { | |
| throw new Error('locality contains malformed UUID'); | |
| } | |
| } | |
| } | |
| }; | |
| check('near'); | |
| check('far'); | |
| return object; | |
| } | |
| exports.validLocality = validLocality; | |
| /* | |
| * Validates if an alias is url compatible | |
| */ | |
| function validAlias(alias) { | |
| return ALIAS_RE.test(alias); | |
| } | |
| exports.validAlias = validAlias; | |
| /* | |
| * Validates if a vm brand is either joyent or kvm | |
| */ | |
| function validBrand(brand) { | |
| return VALID_VM_BRANDS.indexOf(brand) != -1; | |
| } | |
| exports.validBrand = validBrand; | |
| /* | |
| * Validates if a param is a valid number. If gezero (greater or equal than | |
| * zero) is true, then >= will be used instead of > | |
| */ | |
| function validNumber(param, gezero) { | |
| var number = parseInt(param, 10); | |
| if (gezero === true) { | |
| return (number >= 0 ? true : false); | |
| } else { | |
| return (number > 0 ? true : false); | |
| } | |
| } | |
| exports.validNumber = validNumber; | |
| /* | |
| * Validates if a vm is owned by the owner_uuid in the params | |
| */ | |
| function validOwner(vm, params) { | |
| var owner_uuid = params.owner_uuid; | |
| if (owner_uuid) { | |
| if (!validUUID(owner_uuid)) { | |
| var error = [ errors.invalidUuidErr('owner_uuid') ]; | |
| throw new errors.ValidationFailedError('Invalid Parameters', error); | |
| } | |
| if (owner_uuid != vm.owner_uuid) { | |
| throw new restify.ResourceNotFoundError('VM not found'); | |
| } | |
| } | |
| return true; | |
| } | |
| exports.validOwner = validOwner; | |
| /* | |
| * Validates if a metadata object contains only strings, numbers or booleans | |
| */ | |
| function validMetadata(name, obj) { | |
| var types = ['string', 'boolean', 'number']; | |
| var error; | |
| for (var key in obj) { | |
| if (types.indexOf(typeof (obj[key])) == -1) { | |
| error = [ errors.invalidParamErr(name + '.' + key, | |
| 'Forbidden Data Type') ]; | |
| throw new errors.ValidationFailedError( | |
| 'Invalid Parameter', error); | |
| } else if (name === 'customer_metadata' && PW_SUFFIX.test(key)) { | |
| error = [ errors.invalidParamErr(name + '.' + key, | |
| 'Forbidden Metadata Key') ]; | |
| throw new errors.ValidationFailedError( | |
| 'Invalid Parameter', error); | |
| } | |
| } | |
| return true; | |
| } | |
| exports.validMetadata = validMetadata; | |
| /* | |
| * Validates if the customer_metadata keys violate the | |
| * internal_metadata_namespaces restrictions | |
| */ | |
| function validateMetadataNamespaces(vm, params) { | |
| var namespaces = vm.internal_metadata_namespaces; | |
| if (!namespaces) { | |
| return true; | |
| } | |
| var invalid = []; | |
| var custMdataKeys = Object.keys(params.set_customer_metadata); | |
| for (var i = 0; i < custMdataKeys.length; i++) { | |
| // foo:id -> 'foo' # not valid | |
| // foo -> 'foo' # valid, not namespaced | |
| var splitted = custMdataKeys[i].split(':'); | |
| if (splitted.length == 1) { | |
| continue; | |
| } | |
| if (namespaces.indexOf(splitted[0]) !== -1) { | |
| invalid.push(custMdataKeys[i]); | |
| } | |
| } | |
| if (invalid.length) { | |
| var formattedNs = namespaces.map(function (ns) { | |
| return '\'' + ns + ':*' + '\''; | |
| }); | |
| var error = [ errors.invalidParamErr('customer_metadata', | |
| format('Invalid metadata keys: %s (protected namespaces: %s)', | |
| invalid.join(', '), formattedNs)) ]; | |
| throw new errors.ValidationFailedError( | |
| 'Invalid Parameter', error); | |
| } | |
| return true; | |
| } | |
| exports.validateMetadataNamespaces = validateMetadataNamespaces; | |
| /* | |
| * Validates that the disks for the KVM VM have a valid format | |
| */ | |
| function validDisks(disks, errs) { | |
| var i; | |
| var ndisks = disks.length; | |
| var disk0 = disks[0]; | |
| if (disk0.image_uuid === undefined) { | |
| errs.push(errors.missingParamErr('disks.0.image_uuid')); | |
| } else if (!validUUID(disk0.image_uuid)) { | |
| errs.push(errors.invalidUuidErr('disks.0.image_uuid')); | |
| } | |
| if (disk0.size !== undefined) { | |
| errs.push(errors.invalidParamErr('disks.0.size', 'Not Allowed')); | |
| } | |
| for (i = 1; i < ndisks; i++) { | |
| var disk = disks[i]; | |
| if (disk.image_uuid !== undefined) { | |
| errs.push(errors.invalidParamErr('disks.' + i + '.image_uuid', | |
| 'Not Allowed')); | |
| } | |
| if (disk.size === undefined) { | |
| errs.push(errors.missingParamErr('disks.' + i + '.size')); | |
| } | |
| } | |
| return true; | |
| } | |
| /* | |
| * Does additional validation depending on the VM brand. This function only | |
| * populates the errors array that was passed in case some fields are not valid, | |
| * and later on the main validation code will throw an exception if needed | |
| */ | |
| function validateBrandParams(params, errs) { | |
| if (params.brand === 'kvm') { | |
| if (!params.disks) { | |
| errs.push(errors.missingParamErr('disks')); | |
| return; | |
| } else if (typeof (params.disks) === 'string') { | |
| try { | |
| params.disks = JSON.parse(params.disks); | |
| } catch (e) { | |
| errs.push(errors.invalidParamErr('disks', 'Malformed JSON')); | |
| return; | |
| } | |
| } | |
| if (!Array.isArray(params.disks) || (params.disks.length < 1)) { | |
| errs.push(errors.invalidParamErr('disks')); | |
| } else { | |
| validDisks(params.disks, errs); | |
| } | |
| } else { | |
| // Only non-kvm vms require image_uuid | |
| if (params.image_uuid === undefined) { | |
| errs.push(errors.missingParamErr('image_uuid')); | |
| } else if (!validUUID(params.image_uuid)) { | |
| errs.push(errors.invalidUuidErr('image_uuid')); | |
| } | |
| } | |
| } | |
| /* | |
| * Validates and creates metadata object to be used for vmadm update | |
| * | |
| * - vm: VM to update | |
| * - mdataKey: any of customer_metadata, internal_metadata or tags | |
| * - params: params to be sent to WFAPI | |
| * - obj: input object from the request | |
| */ | |
| function createMetadataObject(vm, mdataKey, params, obj) { | |
| var metadata; | |
| var error; | |
| if (typeof (obj) === 'object') { | |
| metadata = obj; | |
| } else if (typeof (obj) === 'string') { | |
| try { | |
| metadata = JSON.parse(obj); | |
| } catch (e) { | |
| error = [ errors.invalidParamErr(mdataKey, 'Malformed JSON') ]; | |
| throw new errors.ValidationFailedError('Invalid Parameters', error); | |
| } | |
| } else { | |
| error = [ errors.invalidParamErr(mdataKey, 'Invalid data type') ]; | |
| throw new errors.ValidationFailedError('Invalid Parameters', error); | |
| } | |
| validMetadata(mdataKey, metadata); | |
| var updateObject = common.setMetadata(vm, mdataKey, metadata); | |
| if (updateObject['set_' + mdataKey]) { | |
| params['set_' + mdataKey] = updateObject['set_' + mdataKey]; | |
| } | |
| if (updateObject['remove_' + mdataKey]) { | |
| params['remove_' + mdataKey] = updateObject['remove_' + mdataKey]; | |
| } | |
| return true; | |
| } | |
| /* | |
| * Validates that the vm alias is unique per customer | |
| */ | |
| function validateUniqueAlias(moray, params, callback) { | |
| var query = { | |
| owner_uuid: params.owner_uuid, | |
| alias: params.alias, | |
| state: 'active', | |
| _update: true | |
| }; | |
| moray.listVms(query, function (err, vms) { | |
| if (err) { | |
| return callback(err); | |
| } | |
| if (vms.length > 0) { | |
| var message = 'Already exists for this owner_uuid'; | |
| var error = errors.duplicateParamErr('alias', message); | |
| /*JSSTYLED*/ | |
| return callback(null, error); | |
| } else { | |
| return callback(null); | |
| } | |
| }); | |
| } | |
| /* | |
| * Simple validator, just makes sure the networks parameter has the correct | |
| * format, it can be either a string (comma separated) or an array | |
| */ | |
| exports.validateNetworks = function (params) { | |
| var errs = []; | |
| if (params.networks) { | |
| try { | |
| params.networks = validNetworks(params.networks, false); | |
| } catch (e) { | |
| errs.push(errors.invalidParamErr('networks', e.message)); | |
| } | |
| } else { | |
| errs.push(errors.missingParamErr('networks')); | |
| } | |
| if (errs.length) { | |
| throw new errors.ValidationFailedError('Invalid VM update parameters', | |
| errs); | |
| } | |
| return true; | |
| }; | |
| /* | |
| * This validator makes sure that each nic object in the array only contains | |
| * the following attributes: | |
| * | |
| * - mac: neeeded to identify the NIC | |
| * - interface: needed if want to reorder nics | |
| * - primary: needed if want to reassign primary | |
| */ | |
| exports.validateNics = function (vm, params) { | |
| var errs = []; | |
| if (params.nics) { | |
| try { | |
| params.nics = validNics(vm, params.nics); | |
| } catch (e) { | |
| errs.push(errors.invalidParamErr('nics', e.message)); | |
| } | |
| } else { | |
| errs.push(errors.missingParamErr('nics')); | |
| } | |
| if (errs.length) { | |
| throw new errors.ValidationFailedError('Invalid VM update parameters', | |
| errs); | |
| } | |
| return true; | |
| }; | |
| /* | |
| * Simple validator, just makes sure the mac addresses parameter has the correct | |
| * format, it can be either a string (comma separated) or an array | |
| */ | |
| exports.validateMacs = function (params) { | |
| var errs = []; | |
| if (params.macs) { | |
| if (!validStrings(params.macs)) { | |
| errs.push(errors.invalidParamErr('macs', | |
| 'Invalid MAC addresses object')); | |
| } | |
| } else { | |
| errs.push(errors.missingParamErr('macs')); | |
| } | |
| if (errs.length) { | |
| throw new errors.ValidationFailedError('Invalid VM update parameters', | |
| errs); | |
| } | |
| if (typeof (params.macs) === 'string') { | |
| params.macs = params.macs.split(','); | |
| } | |
| return true; | |
| }; | |
| /* | |
| * Sets default attributes for a vm on things that depend on | |
| * RAM or disk for example | |
| */ | |
| exports.setDefaultValues = function (params, options) { | |
| var config = {}; | |
| var i; | |
| if (options && options.config) { | |
| config = options.config; | |
| if (config.overlay.natPool) { | |
| params.sdc_nat_pool = config.overlay.natPool; | |
| } | |
| } | |
| if (params.uuid === undefined) { | |
| params.uuid = libuuid.create(); | |
| } | |
| if (params.ram) { | |
| params.ram = parseInt(params.ram, 10); | |
| if (params.max_physical_memory === undefined) { | |
| params.max_physical_memory = params.ram; | |
| } | |
| } | |
| // Use these default values when a package was not specified | |
| if (params.billing_id === undefined) { | |
| if (params.max_swap === undefined) { | |
| var swap = params.ram * 2; | |
| if (swap < MIN_SWAP) swap = MIN_SWAP; | |
| params.max_swap = swap; | |
| } | |
| if (params.quota === undefined) { | |
| params.quota = DEFAULT_QUOTA; | |
| } | |
| if (params.cpu_shares === undefined) { | |
| if (params.ram > 128) | |
| params.cpu_shares = Math.floor(params.ram / 128); | |
| else | |
| params.cpu_shares = 1; | |
| } | |
| } | |
| if (params.post_back_urls && | |
| typeof (params.post_back_urls) === 'string') { | |
| params.post_back_urls = params.post_back_urls.split(','); | |
| } | |
| if (params.firewall_enabled === undefined) { | |
| params.firewall_enabled = false; | |
| } | |
| // Add additional values for KVM disks | |
| if (params.brand === 'kvm') { | |
| console.log(params); | |
| params.disks[0].image_name = params.image.name; | |
| params.disks[0].image_size = params.image.image_size; | |
| // Set a default refreservation for i > 0 disks | |
| for (i = 1; i < params.disks.length; i++) { | |
| if (params.disks[i].refreservation === undefined) { | |
| if (config && config.reserveKvmStorage === false) { | |
| params.disks[i].refreservation = 0; | |
| } else { | |
| params.disks[i].refreservation = params.disks[i].size; | |
| } | |
| } | |
| } | |
| } | |
| }; |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| diff --git a/lib/common/validation.js b/lib/common/validation.js | |
| index 142dac6..73fa6cd 100644 | |
| --- a/lib/common/validation.js | |
| +++ b/lib/common/validation.js | |
| @@ -356,6 +356,8 @@ var validators = { | |
| tags: createValidateMetadataFn('tags'), | |
| + ticket: createValidateUUIDFn('uuid', false), | |
| + | |
| tmpfs: createValidateNumberFn('tmpfs', true), | |
| uuid: createValidateUUIDFn('uuid', false), | |
| @@ -1674,6 +1676,10 @@ exports.setDefaultValues = function (params, options) { | |
| if (options && options.config) { | |
| config = options.config; | |
| + | |
| + if (config.overlay.natPool) { | |
| + params.sdc_nat_pool = config.overlay.natPool; | |
| + } | |
| } | |
| if (params.uuid === undefined) { | |
| diff --git a/lib/workflows/job-common.js b/lib/workflows/job-common.js | |
| index d9c122d..a55fddf 100644 | |
| --- a/lib/workflows/job-common.js | |
| +++ b/lib/workflows/job-common.js | |
| @@ -495,6 +495,7 @@ function provisionNics(job, cb) { | |
| // Every NIC we provision is added to this array | |
| var nics = []; | |
| var primaryFound = false; | |
| + job.params.fabricNatNics = []; | |
| networks.forEach(function (net) { | |
| if (net.primary) | |
| @@ -513,10 +514,19 @@ function provisionNics(job, cb) { | |
| return { | |
| owner_uuid: job.params.owner_uuid, | |
| belongs_to_uuid: job.params.uuid || job.params.vm_uuid, | |
| - belongs_to_type: 'zone' | |
| + belongs_to_type: 'zone', | |
| + cn_uuid: job.params.server_uuid | |
| }; | |
| } | |
| + // If this is a nic on a fabric and has no gateway provisioned, add it | |
| + function addFabricNatNic(fNic) { | |
| + if (fNic && fNic.fabric && fNic.gateway && !fNic.gateway_provisioned && | |
| + fNic.ip !== fNic.gateway) { | |
| + job.params.fabricNatNics.push(fNic); | |
| + } | |
| + } | |
| + | |
| // Get current list of NICs that might have been provisioned ahead of time | |
| napi.listNics(nicParams(), function (err, res) { | |
| if (err) { | |
| @@ -561,6 +571,7 @@ function provisionNics(job, cb) { | |
| next(suberr); | |
| } else { | |
| nics.push(nic); | |
| + addFabricNatNic(nic); | |
| next(); | |
| } | |
| }); | |
| diff --git a/lib/workflows/provision.js b/lib/workflows/provision.js | |
| index 63e8379..7ade96c 100644 | |
| --- a/lib/workflows/provision.js | |
| +++ b/lib/workflows/provision.js | |
| @@ -13,6 +13,7 @@ | |
| */ | |
| var async = require('async'); | |
| +var fabricCommon = require('./fabric-common'); | |
| var restify = require('restify'); | |
| var common = require('./job-common'); | |
| var childProcess = require('child_process'); | |
| @@ -46,6 +47,10 @@ function validateParams(job, cb) { | |
| return cb('No IMGAPI URL provided'); | |
| } | |
| + if (sapiUrl === undefined) { | |
| + return cb('No SAPI URL provided'); | |
| + } | |
| + | |
| if (job.params['owner_uuid'] === undefined) { | |
| return cb('\'owner_uuid\' is required'); | |
| } | |
| @@ -531,7 +536,15 @@ var workflow = module.exports = { | |
| retry: 1, | |
| body: common.provisionNics, | |
| modules: { sdcClients: 'sdc-clients', async: 'async' } | |
| - }, { | |
| + } | |
| + | |
| + /** | |
| + * Fabric NAT provisioning | |
| + */ | |
| + ].concat( | |
| + fabricCommon.provisionChain).concat([ | |
| + | |
| + { | |
| name: 'prepare_payload', | |
| timeout: 10, | |
| retry: 1, | |
| @@ -549,7 +562,15 @@ var workflow = module.exports = { | |
| retry: 1, | |
| body: common.pollTask, | |
| modules: { sdcClients: 'sdc-clients' } | |
| - }, { | |
| + | |
| + }, | |
| + | |
| + /* | |
| + * If we've provisioned fabric NAT zones for this VM, wait until | |
| + * they've finished before sending off the provision. | |
| + */ | |
| + fabricCommon.provisionWaitTask, | |
| + { | |
| name: 'cnapi.provision_vm', | |
| timeout: 10, | |
| retry: 1, | |
| @@ -579,7 +600,12 @@ var workflow = module.exports = { | |
| retry: 1, | |
| body: common.releaseVMTicket, | |
| modules: { sdcClients: 'sdc-clients' } | |
| - } ], | |
| + }, | |
| + | |
| + // If this was a fabric nat provision, clean up the ticket | |
| + fabricCommon.releaseTicketTask | |
| + | |
| + ]), | |
| timeout: 3810, | |
| onerror: [ { | |
| name: 'napi.cleanup_nics', | |
| @@ -606,6 +632,10 @@ var workflow = module.exports = { | |
| modules: { sdcClients: 'sdc-clients' }, | |
| body: common.releaseVMTicket | |
| }, | |
| + | |
| + // If this was a fabric nat provision, clean up the ticket | |
| + fabricCommon.releaseTicketTask, | |
| + | |
| { | |
| name: 'On error', | |
| body: function (job, cb) { | |
| @@ -625,5 +655,9 @@ var workflow = module.exports = { | |
| name: 'cnapi.cleanup_allocation_ticket', | |
| modules: { sdcClients: 'sdc-clients' }, | |
| body: common.releaseAllocationTicket | |
| - }] | |
| + }, | |
| + | |
| + // If this was a fabric nat provision, clean up the ticket | |
| + fabricCommon.releaseTicketTask | |
| + ] | |
| }; | |
| diff --git a/sapi_manifests/vmapi/template b/sapi_manifests/vmapi/template | |
| index a0cba58..ceeb8bd 100644 | |
| --- a/sapi_manifests/vmapi/template | |
| +++ b/sapi_manifests/vmapi/template | |
| @@ -7,6 +7,11 @@ | |
| "api": { | |
| "port": 80 | |
| }, | |
| + "overlay": { | |
| +{{#fabric_cfg}} | |
| + "natPool": "{{{sdc_nat_pool}}}" | |
| +{{/fabric_cfg}} | |
| + }, | |
| "heartbeater": { | |
| "host": "{{{RABBITMQ_SERVICE}}}", | |
| "queue": "heartbeat.vmapi", | |
| diff --git a/tools/jsl.node.conf b/tools/jsl.node.conf | |
| index a10db43..8bf9239 100644 | |
| --- a/tools/jsl.node.conf | |
| +++ b/tools/jsl.node.conf | |
| @@ -134,6 +134,7 @@ | |
| +define ufdsDn | |
| +define ufdsPassword | |
| +define imgapiUrl | |
| ++define sapiUrl | |
| +define vmapiUrl | |
| +define urlModule | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment