#. -*- coding: utf-8 - # site from proxmoxer import ProxmoxAPI import base64 import json import time import socket import random from unidecode import unidecode #local import grid import utils import ioconfig import novnc def auth(slave_name): """ return control object from config slave names """ adminuser = ioconfig.parser.get('general', 'adminuser') slaveip = ioconfig.parser.get(str(slave_name), 'ipv4') slavepass = ioconfig.parser.get(str(slave_name), 'password') proxobject = ProxmoxAPI(slaveip, user=adminuser, password=slavepass, verify_ssl=False) return proxobject def create(json): """ create an unit. returns JSON with data """ region_name = json['region'] #just for the record. slaves are equal in the grid, as long as they are quieried slave_name = json['slave'] proxobject = auth(slave_name) real_slave_name = proxobject.cluster.status.get()[0]['name'] unit_id = int(time.time() * 10000 * 10000) #currently unit_id is just a timestamp phy_id = grid.phyidgen(slave_name, json['type']) if phy_id == 99999: response = { 'status': 'phy_id_alloc_failed' } return response description = ' (' + str(unit_id) + ' - ' + str(phy_id) + ')\n' + 'owned by ' + json['clientemail'] + ' (' + json['clientid'] + ')\n' if json['type'] == 'kvm': vm_name_utf8 = json['hostname'] vm_name = unidecode(vm_name_utf8) image_name = 'vm-' + str(phy_id) + '-disk-1' try: local_storage = proxobject.nodes(real_slave_name).storage('lvm') storage_create_result = local_storage.content.post(vmid=phy_id, filename=image_name, size=json['hdd'] + 'G') ioconfig.logger.info('%s[%s]> allocated %s as %s' % (json['clientemail'], slave_name, json['hdd'], image_name)) except: ioconfig.logger.info('%s[%s]> unable to allocate %s' % (json['clientemail'], slave_name, image_name)) response = { 'status':'vol_alloc_failed' } return response try: create_result = proxobject.nodes(real_slave_name).qemu.post(vmid=int(phy_id), name=vm_name, onboot=1, sockets=1, cores=json['cpu'], memory=json['mem'], scsihw='virtio-scsi-pci', scsi0='file=lvm:' + image_name + ',discard=on', net0='virtio=8A:32:CD:E4:EE:11,bridge=' + json['net0if'] + ',tag=' + str(phy_id), description=description) except Exception as e: print(e) response = { 'status': 'kvm_create_failed' } return response data = { 'unit_id': int(unit_id), 'type': 'kvm', 'clientid': json['clientid'], 'clientemail': json['clientemail'], 'hostname': vm_name, 'region': region_name, 'slave': real_slave_name, 'phy_id': phy_id, 'net0if': json['net0if'] } grid.create(data) response = { 'status': 'kvm_created', 'unit_id': unit_id, 'region': region_name, 'slave': real_slave_name, 'vlanid': phy_id} if json['type'] == 'lxc': vm_name_utf8 = json['hostname'] vm_name = unidecode(vm_name_utf8) image_name = 'vm-' + str(phy_id) + '-disk-1' #try: # local_storage = proxobject.nodes(real_slave_name).storage('lvm') # storage_create_result = local_storage.content.post(vmid=phy_id, filename=image_name, size=json['hdd'] + 'G') # ioconfig.logger.info('%s[%s]> allocated %s as %s' % (json['clientemail'], slave_name, json['hdd'], image_name)) #except: # ioconfig.logger.info('%s[%s]> unable to allocate %s' % (json['clientemail'], slave_name, image_name)) # response = { 'status':'vol_alloc_failed' } # return response try: vm_pass = json['rootpass'] except: vm_pass = '!%%^)@&&(K3B' try: create_result = proxobject.nodes(real_slave_name).lxc.post(vmid=int(phy_id), hostname=vm_name, onboot=1, unprivileged=1, password=vm_pass, cores=json['cpu'], memory=json['mem'], net0='name=eth0,bridge=' + json['net0if'] + ',ip=' + json['net0ip'] + '/' + json['net0mask'], net1='name=eth1,bridge=' + json['net1if'] + ',ip=' + json['net1ip'] + '/' + json['net1mask'] + ',gw=' + json['net1gw'], ostemplate='backup:vztmpl/debian-9.0-standard_9.0-2_amd64.tar.gz', rootfs='volume=lvm:' + str(json['hdd']), swap=32, description=description) except: return { 'status': 'lxc_create_failed' } data = { 'unit_id': int(unit_id), 'type': 'lxc', 'clientid': json['clientid'], 'clientemail': json['clientemail'], 'hostname': vm_name, 'region': region_name, 'slave': slave_name, 'phy_id': phy_id, 'net0if': json['net0if'] } grid.create(data) response = { 'status': 'lxc_created', 'unit_id': unit_id, 'hostname': vm_name, 'region': region_name, 'slave': real_slave_name } #if json['type'] == 'br': # try: # create_result = proxobject.nodes(real_slave_name).network.post(iface='vmbr' + str(phy_id), # type='bridge', # autostart=1) # except Exception as e: # print(e) # return { 'status': 'br_create_failed' } # data = { 'unit_id': int(unit_id), # 'type': 'br', # 'clientid': json['clientid'], # 'clientemail': json['clientemail'], # 'region': region_name, # 'slave': slave_name, # 'phy_id': phy_id # } # grid.create(data) # response = { 'status': 'bridge_created', 'unit_id': unit_id, 'region': region_name, 'slave': real_slave_name, 'phy_id': phy_id } return response def remove(json): """ terminate an unit """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) ioconfig.logger.info('%s[%s]> deleting %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': result = proxobject.nodes(slave_name).qemu(phy_id).delete() grid.delete(json) if unit_type == 'lxc': result = proxobject.nodes(slave_name).lxc(phy_id).delete() grid.delete(json) response = { 'status':'{}_deleted'.format(unit_type) } return response def query(json): """ return the db info of an unit """ query = grid.query(json) query['status'] = 'query_success' return query def status(json): """ returns the status of an unit """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) #slave_name = proxobject.cluster.status.get()[0]['name'] ioconfig.logger.info('%s[%s]> status of %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': statusquery = proxobject.nodes(slave_name).qemu(phy_id).status.current.get() result = {'status': str(statusquery['qmpstatus'])} if unit_type == 'lxc': result = proxobject.nodes(slave_name).lxc(phy_id).status.current.get() return result def start(json): """ starts a machine """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) #slave_name = proxobject.cluster.status.get()[0]['name'] ioconfig.logger.info('%s[%s]> starting %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': result = proxobject.nodes(slave_name).qemu(phy_id).status.start.post() if unit_type == 'lxc': result = proxobject.nodes(slave_name).lxc(phy_id).status.start.post() #TODO: SET START AT BOOT FLAG response = { 'status':'START' } return response def shutdown(json): """ acpi shutdown the machine.. """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) #slave_name = proxobject.cluster.status.get()[0]['name'] ioconfig.logger.info('%s[%s]> acpi shutdown %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': result = proxobject.nodes(slave_name).qemu(phy_id).status.shutdown.post() if unit_type == 'lxc': result = proxobject.nodes(slave_name).lxc(phy_id).status.shutdown.post() #TODO: REMOVE START AT BOOT FLAG #ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result)) response = { 'status':'SHUTDOWN', 'vmid':phy_id } return response def stop(json): """ poweroff the machine.. """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) #slave_name = proxobject.cluster.status.get()[0]['name'] ioconfig.logger.info('%s[%s]> power off %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': result = proxobject.nodes(slave_name).qemu(phy_id).status.stop.post() if unit_type == 'lxc': result = proxobject.nodes(slave_name).lxc(phy_id).status.stop.post() #TODO: REMOVE START AT BOOT FLAG #ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result)) response = { 'status':'STOP', 'vmid':phy_id } return response def suspend(json): """ suspend machine """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) #slave_name = proxobject.cluster.status.get()[0]['name'] ioconfig.logger.info('%s[%s]> suspending %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': result = proxobject.nodes(slave_name).qemu(phy_id).status.suspend.post() if unit_type == 'lxc': result = proxobject.nodes(slave_name).lxc(phy_id).status.suspend.post() response = { 'status':'SUSPEND', 'vmid':phy_id } return response def resume(json): """ resume machine """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) #slave_name = proxobject.cluster.status.get()[0]['name'] ioconfig.logger.info('%s[%s]> resuming %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': result = proxobject.nodes(slave_name).qemu(phy_id).status.resume.post() if unit_type == 'lxc': result = proxobject.nodes(slave_name).lxc(phy_id).status.resume.post() response = { 'status':'RESUME', 'vmid':phy_id } return response def vmrrd(json): """ retrieve rrd graphs (PNG) """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) proxobject.cluster.status.get()[0]['name'] result = {} if unit_type == 'kvm': statusquery = proxobject.nodes(slave_name).qemu(phy_id).status.current.get() rcpu = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu') rmem = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem') rnet = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout') rhdd = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite') status = str(statusquery['qmpstatus']) if unit_type == 'lxc': statusquery = proxobject.nodes(slave_name).lxc(phy_id).status.current.get() rcpu = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu') rmem = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem') rnet = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout') rhdd = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite') status = str(statusquery['qmpstatus']) #TODO: maybe change this? #ioconfig.logger.info('%s[%s]> rrd of %s %s (%s). status: %s' % (vm_owner, slave_name, unit_type, phy_id, vm_host, status)) response = { 'status':status, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd } return response def vmvnc(json): """ invoke vnc ticket """ unit_type = json['type'] slave_name, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) #slave_name = proxobject.cluster.status.get()[0]['name'] ioconfig.logger.info('%s[%s]> invoking vnc ticket for %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) if unit_type == 'kvm': ticket = proxobject.nodes(slave_name).qemu(phy_id).vncproxy.post(websocket=1) #socket = proxobject.nodes(slave_name).qemu(phy_id).vncwebsocket.get(port=ticket['port'], # vncticket=ticket['ticket']) if unit_type == 'lxc': ticket = proxobject.nodes(slave_name).lxc(phy_id).vncproxy.post() #socket = proxobject.nodes(slave_name).lxc(phy_id).vncwebsocket.get(port=ticket['port'], # vncticket=ticket['ticket']) slaveip = ioconfig.parser.get(str(slave_name), 'ipv4') #slaveport = socket['port'] slaveport = ticket['port'] vnchost = ioconfig.parser.get('general', 'novnc_host') listenport = random.randint(7000, 9999) #listenport = random.randint(7000, 7001) vnc_target = { 'target_host': slaveip, 'target_port': slaveport, 'listen_host': vnchost, 'listen_port': listenport } vnc_options = { 'idle-timeout': 20, 'verbose': True, 'cert': ioconfig.parser.get('general', 'ssl_cert'), 'key': ioconfig.parser.get('general', 'ssl_key'), 'ssl-only': True } novnc.spawn(vnc_target, vnc_options) external_url = ioconfig.parser.get('general', 'novnc_url') prefix = external_url + "?host=" + vnchost + "&port=" + str(listenport) + "&view_only=false&encrypt=1&true_color=1&password=" vnc_url = prefix + ticket['ticket'] time.sleep(3) #wait few seconds for the parallel vncwebsocket ioconfig.logger.info('{}[{}]> vnc port {} ready'.format(vm_owner, slave_name, str(listenport))) #response = { 'status':'VNC', 'fqdn':external_url, 'host':myip, 'port':listenport, 'encrypt':'0', 'true_color':'1', 'ticket':ticket['ticket'] } response = { 'status':'VNC', 'url':vnc_url } #print(vnc_url) return response #DEPRECATED def vmlist(proxobject): """ get unit list """ #we keep a single node proxmoxes so node id = 0 slave_name = proxobject.cluster.status.get()[0]['name'] query_kvm = proxobject.nodes(slave_name).qemu.get() query_lxc = proxobject.nodes(slave_name).lxc.get() for kvm_dict in query_kvm: kvm_dict['vmtype'] = 'kvm' for lxc_dict in query_lxc: lxc_dict['vmtype'] = 'lxc' vmlist = query_kvm + query_lxc #merge machine list return vmlist if __name__ == '__main__': #internal module tests time.sleep(1) #vmvnc(656758)