#. -*- coding: utf-8 - # required proxmox permissions: PVEAdmin # # afx 2015-2017 # site from proxmoxer import ProxmoxAPI import base64 import json import time import socket import random from unidecode import unidecode #local import grid import utils import ioconfig import novnc def auth(slave_name): """ return control object from config slave names """ adminuser = ioconfig.parser.get('general', 'adminuser') slaveip = ioconfig.parser.get(str(slave_name), 'ipv4') slavepass = ioconfig.parser.get(str(slave_name), 'password') slavetype = ioconfig.parser.get(str(slave_name), 'type') if slavetype == 'proxmox': proxobject = ProxmoxAPI(slaveip, user=adminuser, password=slavepass, verify_ssl=False) return proxobject def create(json): """ create an unit. returns JSON with data """ try: region_id = ioconfig.parser.get(str(json['region']), 'regionid') region_fullname = ioconfig.parser.get(str(json['region']), 'fullname') except: ioconfig.logger.error('grid> region not found') return None vm_name_utf8 = json['hostname'] vm_name = unidecode(vm_name_utf8) try: vm_pass = json['rootpass'] except: vm_pass = '!%%^)@&&(K3B' #slave_name = str(grid.query_happiness(region_id, weight)) #TODO: provide weight parameters here and calculate route #slave_name = 'lexx' slave_name = 'warrior' unit_id = int(time.time() * 10000 * 10000) phy_id = grid.phyidgen(json['type']) proxobject = auth(slave_name) real_slave_name = proxobject.cluster.status.get()[0]['name'] description = vm_name + ' (' + str(unit_id) + '-' + str(phy_id) + ')\n' + 'owned by ' + json['clientemail'] + ' (' + json['clientid'] + ')\n' if json['type'] == 'deploy': #create partition image_name = 'vm-' + str(phy_id) + '-disk-1' try: local_storage = proxobject.nodes(real_slave_name).storage('lvm') storage_create_result = local_storage.content.post(vmid=phy_id, filename=image_name, size=json['hdd'] + 'G') ioconfig.logger.info('%s[%s]> allocated %s as %s' % (json['clientemail'], slave_name, json['hdd'], image_name)) except: ioconfig.logger.info('%s[%s]> unable to allocate %s' % (json['clientemail'], slave_name, image_name)) response = { 'status':'FAIL' } return response create_result = proxobject.nodes(real_slave_name).qemu.post(vmid=int(phy_id), name=vm_name, onboot=1, sockets=1, cores=json['cpu'], memory=json['mem'], scsihw='virtio-scsi-pci', scsi0='file=lvm:' + image_name + ',discard=on', description=description) data = { 'unit_id': int(unit_id), 'type': 'kvm', 'clientid': json['clientid'], 'clientemail': json['clientemail'], 'hostname': vm_name, 'region': region_fullname, 'slave': slave_name, 'phyid': phy_id } response = { 'status': 'deploy_created', 'unit_id': unit_id, 'hostname': vm_name, 'password': vm_pass, 'slave': real_slave_name } if json['type'] == 'router': create_result = proxobject.nodes(real_slave_name).lxc.post(vmid=int(phy_id), hostname=vm_name, onboot=1, unprivileged=1, password=vm_pass, cores=json['cpu'], memory=json['mem'], net0='name=eth0,bridge=' + json['bridge_id'] + ',gw=' + json['region_gw'] + ',hwaddr=' + json['macaddr'] + ',ip=' + json['ipv4addr'] + '/' + json['region_netmask'], ostemplate='backup:vztmpl/debian-9.0-standard_9.0-2_amd64.tar.gz', rootfs='volume=lvm:' + image_name, swap=32, description=description) data = { 'unit_id': int(unit_id), 'type': 'lxc', 'clientid': json['clientid'], 'clientemail': json['clientemail'], 'hostname': vm_name, 'region': region_fullname, 'slave': slave_name, 'phyid': phy_id } response = { 'status': 'router_created', 'unit_id': unit_id, 'hostname': vm_name, 'password': vm_pass, 'slave': real_slave_name } if json['type'] == 'bridge': data = { 'unit_id': int(unit_id), 'type': 'vmbr', 'clientid': json['clientid'], 'clientemail': json['clientemail'], 'region': region_fullname, 'slave': slave_name, 'phyid': phy_id } #TODO: CREATE BRIDGE response = { 'status': 'bridge_created', 'unit_id': unit_id, 'hostname': vm_name, 'password': vm_pass, 'slave': real_slave_name } time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run grid.create(data) return response def remove(json): """ terminate an unit """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) ioconfig.logger.info('%s[%s]> deleting %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': result = proxobject.nodes(slave_name).qemu(phy_id).delete() grid.delete(json) if unit_type == 'lxc': result = proxobject.nodes(slave_name).lxc(phy_id).delete() grid.delete(json) response = { 'status':'{}_deleted'.format(unit_type) } return response def query(json): """ return the db info of an unit """ return grid.query(json) def status(json): """ returns the status of an unit """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) #slave_name = proxobject.cluster.status.get()[0]['name'] ioconfig.logger.info('%s[%s]> status of %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': result = proxobject.nodes(slave_name).qemu(phy_id).status.current.get() if unit_type == 'lxc': result = proxobject.nodes(slave_name).lxc(phy_id).status.current.get() return result def start(json): """ starts a machine """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) #slave_name = proxobject.cluster.status.get()[0]['name'] ioconfig.logger.info('%s[%s]> starting %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': result = proxobject.nodes(slave_name).qemu(phy_id).status.start.post() if unit_type == 'lxc': result = proxobject.nodes(slave_name).lxc(phy_id).status.start.post() #TODO: SET START AT BOOT FLAG response = { 'status':'START' } return response def shutdown(json): """ acpi shutdown the machine.. """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) #slave_name = proxobject.cluster.status.get()[0]['name'] ioconfig.logger.info('%s[%s]> acpi shutdown %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': result = proxobject.nodes(slave_name).qemu(phy_id).status.shutdown.post() if unit_type == 'lxc': result = proxobject.nodes(slave_name).lxc(phy_id).status.shutdown.post() #TODO: REMOVE START AT BOOT FLAG #ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result)) response = { 'status':'SHUTDOWN', 'vmid':phy_id } return response def stop(json): """ poweroff the machine.. """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) #slave_name = proxobject.cluster.status.get()[0]['name'] ioconfig.logger.info('%s[%s]> power off %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': result = proxobject.nodes(slave_name).qemu(phy_id).status.stop.post() if unit_type == 'lxc': result = proxobject.nodes(slave_name).lxc(phy_id).status.stop.post() #TODO: REMOVE START AT BOOT FLAG #ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result)) response = { 'status':'STOP', 'vmid':phy_id } return response def suspend(json): """ suspend machine """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) #slave_name = proxobject.cluster.status.get()[0]['name'] ioconfig.logger.info('%s[%s]> suspending %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': result = proxobject.nodes(slave_name).qemu(phy_id).status.suspend.post() if unit_type == 'lxc': result = proxobject.nodes(slave_name).lxc(phy_id).status.suspend.post() response = { 'status':'SUSPEND', 'vmid':phy_id } return response def resume(json): """ resume machine """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) #slave_name = proxobject.cluster.status.get()[0]['name'] ioconfig.logger.info('%s[%s]> resuming %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': result = proxobject.nodes(slave_name).qemu(phy_id).status.resume.post() if unit_type == 'lxc': result = proxobject.nodes(slave_name).lxc(phy_id).status.resume.post() response = { 'status':'RESUME', 'vmid':phy_id } return response def vmrrd(json): """ retrieve rrd graphs (PNG) """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) proxobject.cluster.status.get()[0]['name'] result = {} if unit_type == 'kvm': statusquery = proxobject.nodes(slave_name).qemu(phy_id).status.current.get() rcpu = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu') rmem = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem') rnet = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout') rhdd = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite') status = str(statusquery['qmpstatus']) if unit_type == 'lxc': status = proxobject.nodes(slave_name).lxc(phy_id).status.current.get() rcpu = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu') rmem = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem') rnet = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout') rhdd = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite') status = str(statusquery['qmpstatus']) #TODO: maybe change this? #ioconfig.logger.info('%s[%s]> rrd of %s %s (%s). status: %s' % (vm_owner, slave_name, unit_type, phy_id, vm_host, status)) response = { 'status':status, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd } return response def vmvnc(json): """ invoke vnc ticket """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) #slave_name = proxobject.cluster.status.get()[0]['name'] ioconfig.logger.info('%s[%s]> invoking vnc ticket for %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': ticket = proxobject.nodes(slave_name).qemu(phy_id).vncproxy.post(websocket=1) #socket = proxobject.nodes(slave_name).qemu(phy_id).vncwebsocket.get(port=ticket['port'], # vncticket=ticket['ticket']) if unit_type == 'lxc': ticket = proxobject.nodes(slave_name).lxc(phy_id).vncproxy.post() #socket = proxobject.nodes(slave_name).lxc(phy_id).vncwebsocket.get(port=ticket['port'], # vncticket=ticket['ticket']) slaveip = ioconfig.parser.get(str(slave_name), 'ipv4') #slaveport = socket['port'] slaveport = ticket['port'] slave_id = 1 #TODO: fix this vnchost = ioconfig.parser.get('general', 'novnc_host') listenport = str(int(slaveport) + 1000 + (int(slave_id) * 100)) #TODO: max 100 parallel connections/slave. vnc_target = { 'target_host': slaveip, 'target_port': slaveport, 'listen_host': vnchost, 'listen_port': listenport } vnc_options = { 'idle-timeout': 20, 'verbose': True, 'cert': ioconfig.parser.get('general', 'ssl_cert'), 'key': ioconfig.parser.get('general', 'ssl_key'), 'ssl-only': True } novnc.spawn(vnc_target, vnc_options) external_url = ioconfig.parser.get('general', 'novnc_url') prefix = external_url + "?host=" + vnchost + "&port=" + listenport + "&view_only=false&encrypt=1&true_color=1&password=" vnc_url = prefix + ticket['ticket'] time.sleep(3) #wait few seconds for the parallel vncwebsocket ioconfig.logger.info('{}[{}]> vnc port {} ready'.format(vm_owner, slave_name, listenport)) #response = { 'status':'VNC', 'fqdn':external_url, 'host':myip, 'port':listenport, 'encrypt':'0', 'true_color':'1', 'ticket':ticket['ticket'] } response = { 'status':'VNC', 'url':vnc_url } #print(vnc_url) return response #DEPRECATED def vmlist(proxobject): """ get unit list """ #we keep a single node proxmoxes so node id = 0 slave_name = proxobject.cluster.status.get()[0]['name'] query_kvm = proxobject.nodes(slave_name).qemu.get() query_lxc = proxobject.nodes(slave_name).lxc.get() for kvm_dict in query_kvm: kvm_dict['vmtype'] = 'kvm' for lxc_dict in query_lxc: lxc_dict['vmtype'] = 'lxc' vmlist = query_kvm + query_lxc #merge machine list return vmlist if __name__ == '__main__': #internal module tests time.sleep(1) #vmvnc(656758)