diff --git a/grid.py b/grid.py index df09f4a..50311a1 100644 --- a/grid.py +++ b/grid.py @@ -19,51 +19,53 @@ import ioconfig logger = ioconfig.logger config = ioconfig.parser +def query(json): + if json['type'] == 'deploy' or json['type'] == 'router': + data = read(json) + return data['slave'], data['type'], data['phyid'], data['hostname'], data['clientemail'] -def queryvm(req_cube_id): - """ returns slave_name, vm_id and vm_type for the requested cubeid """ - data = querydb(req_cube_id) - #print(data) - return data['slave'], data['type'], data['vmid'], data['hostname'], data['clientemail'] + if json['type'] == 'bridge': + data = read(json) + return data['slave'], data['type'], data['phyid'], data['clientemail'] +def create(data): + write(data) -def querydb(cubeid): +def read(data): """ open a metadata file """ try: - dbfile = 'db/vm.{}.json'.format(cubeid) + dbfile = 'db/{}.{}.json'.format(data['type'], data['unit_id']) dbf = open(dbfile, 'r') data = json.load(dbf) dbf.close() - #logger.info('{}> --> {}'.format(dbfile, data)) + logger.info('{}> --> {}'.format(dbfile, data)) return data except Exception as e: logger.critical('{}> '.format(e)) pass return None - -def writedb(src_data): +def write(data): """ create new metadata file """ try: - dbfile = 'db/vm.{}.json'.format(src_data['cube']) + dbfile = 'db/{}.{}.json'.format(data['type'], data['unit_id']) dbf = open(dbfile, 'w') - json.dump(src_data, dbf) + json.dump(data, dbf) dbf.close() - #TODO: send mail - logger.info('grid> {} --> {}'.format(src_data, dbfile)) + logger.info('grid> {} --> {}'.format(data, dbfile)) except Exception as e: logger.critical('grid> {}'.format(e)) pass return None -def deletedb(cubeid): +def delete(unit_type, unit_id): """ remove metadata file """ - dbfile = 'db/vm.{}.json'.format(cubeid) + dbfile = 'db/{}.{}.json'.format(unit_type, unit_id) + #TODO: perhaps just move the datafile to an archive directory os.remove(dbfile) return None - -def query_happiness(region_id): +def analyze_happiness(region_id): """ analyzes grid data for the reuqested region and returns proposed slave_id, based on a "happiness" factor. happiness means alive and free :) """ grid_data = readcache() @@ -93,6 +95,7 @@ def query_happiness(region_id): return happy_slave +### DEPRECATED def generate_vmid(): """ analyzes cached grid data and return proposed vmid for new machines """ grid_data = readcache() @@ -144,29 +147,6 @@ def findDiff(d1, d2, path=""): logger.warning('cache> ' + str(k) + ' ' + str(d1[k]) + ' [-]') logger.warning('cache> ' + str(k) + ' ' + str(d2[k]) + ' [+]') - - - -### DEPRECATED -def genmac(int_value): - """ convert kinda long enough int to MAC string """ - prefix_sum = sum(int(digit) for digit in str(int_value)) - if (prefix_sum > 255): - prefix_hex = 'ff' - else: - prefix_hex = format(prefix_sum, 'x') - - suffix = int(str(int_value)[-12:]) - suffix_hex = format(suffix, 'x') - length = len(suffix_hex) - suffix_hex = suffix_hex.zfill(length+length%2) - - addr = prefix_hex + suffix_hex - #logger.info('grid> mac-string {} genrated from: {} ({}->{}) ({}->{}) '.format(addr, int_value, prefix, prefix_hex, suffix, suffix_hex)) - print('grid> mac-string {} genrated from: {} (sum {}->{}) ({}->{}) '.format(addr, int_value, prefix_sum, prefix_hex, suffix, suffix_hex)) - - return ':'.join(addr[i:i+2] for i in range(0,len(addr),2)) - def generate_ipv4(region_name, how_many=1): """ this function should check the range, exclude deployed machines and return a list of available ips """ ip_range_min = ioconfig.parser.get(str(region_name), 'ipv4_min') @@ -221,7 +201,6 @@ def generate_ipv4(region_name, how_many=1): logger.info('region[{}]> ip addresses {} selected.'.format(str(region_name), str(requested_ips))) return requested_ips - def readreal(): """ read the current state and return its contents """ try: diff --git a/plugin.py b/plugin.py index 4c00701..861b883 100644 --- a/plugin.py +++ b/plugin.py @@ -25,257 +25,248 @@ def auth(slave_name): slavepass = ioconfig.parser.get(str(slave_name), 'password') slavetype = ioconfig.parser.get(str(slave_name), 'type') - #vendor specific - #if slavetype == 'proxmoxia': - # connection = lib_proxmoxia.Connector(slaveip) - # auth_token = connection.get_auth_token(adminuser, slavepass) - # proxobject = lib_proxmoxia.Proxmox(connection) if slavetype == 'proxmox': proxobject = ProxmoxAPI(slaveip, user=adminuser, password=slavepass, verify_ssl=False) return proxobject -def vmlist(proxobject): - """ get vmlist """ - #slave_name = proxobject.get('cluster/status')#'name'] - #we keep a single node proxmoxes so node id = 0 - slave_name = proxobject.cluster.status.get()[0]['name'] - #query_kvm = proxobject.get('nodes/%s/qemu' % slave_name) - query_kvm = proxobject.nodes(slave_name).qemu.get() - query_lxc = proxobject.nodes(slave_name).lxc.get() - for kvm_dict in query_kvm: - kvm_dict['vmtype'] = 'kvm' - for lxc_dict in query_lxc: - lxc_dict['vmtype'] = 'lxc' - vmlist = query_kvm + query_lxc #merge machine list - return vmlist - -def vmcreate(req): - """ create vm. returns JSON with data """ +def create(json): + """ create an unit. returns JSON with data """ try: - region_id = ioconfig.parser.get(str(req['region']), 'regionid') - region_fullname = ioconfig.parser.get(str(req['region']), 'fullname') + region_id = ioconfig.parser.get(str(json['region']), 'regionid') + region_fullname = ioconfig.parser.get(str(json['region']), 'fullname') except: - ioconfig.logger.error('grid> no region found') + ioconfig.logger.error('grid> region not found') return None - vm_name_utf8 = req['hostname'] + vm_name_utf8 = json['hostname'] vm_name = unidecode(vm_name_utf8) try: - vm_pass = req['rootpass'] + vm_pass = json['rootpass'] except: - vm_pass = 'datapoint' + vm_pass = '!%%^)@&&(K3B' #slave_name = str(grid.query_happiness(region_id, weight)) #TODO: provide weight parameters here and calculate route - slave_name = 'lexx' - #slave_name = 'warrior' - vm_id = random.randint(1000, 9999) - cubeid = int(time.time() * 10000 * 10000) - deploy = { 'cube': int(cubeid), - 'type': req['type'], - 'clientid': req['clientid'], - 'clientemail': req['clientemail'], - 'hostname': vm_name, - 'region': region_fullname, - 'slave': slave_name, - 'vmid': vm_id, - 'cpu': req['cpu'], - 'mem': req['mem'], - 'hdd': req['hdd'] - } + #slave_name = 'lexx' + slave_name = 'warrior' + + unit_id = int(time.time() * 10000 * 10000) + phy_id = grid.phyidgen(json['type']) proxobject = auth(slave_name) real_slave_name = proxobject.cluster.status.get()[0]['name'] - description = vm_name + ' (' + str(cubeid) + '/' + str(vm_id) + ')\n' + 'owned by ' + req['clientemail'] + ' (' + req['clientid'] + ')\n' + description = vm_name + ' (' + str(unit_id) + '-' + str(phy_id) + ')\n' + 'owned by ' + json['clientemail'] + ' (' + json['clientid'] + ')\n' - if req['type'] == 'kvm': + if json['type'] == 'deploy': #create partition - image_name = 'vm-' + str(vm_id) + '-disk-1' + image_name = 'vm-' + str(phy_id) + '-disk-1' try: local_storage = proxobject.nodes(real_slave_name).storage('lvm') - storage_create_result = local_storage.content.post(vmid=vm_id, filename=image_name, size=req['hdd'] + 'G') - ioconfig.logger.info('slave[%s]> allocated %s as %s' % (slave_name, req['hdd'], image_name)) + storage_create_result = local_storage.content.post(vmid=phy_id, filename=image_name, size=json['hdd'] + 'G') + ioconfig.logger.info('%s[%s]> allocated %s as %s' % (json['clientemail'], slave_name, json['hdd'], image_name)) except: - ioconfig.logger.info('slave[%s]> unable to allocate %s' % (slave_name, image_name)) + ioconfig.logger.info('%s[%s]> unable to allocate %s' % (json['clientemail'], slave_name, image_name)) response = { 'status':'FAIL' } return response - create_result = proxobject.nodes(real_slave_name).qemu.post(vmid=int(vm_id), - name=vm_name, - sockets=1, - cores=req['cpu'], - memory=req['mem'], - virtio0='file=lvm:' + image_name, - onboot=1, - description=description) - - if req['type'] == 'lxc': - create_result = proxobject.nodes(real_slave_name).lxc.post(vmid=int(vm_id), - cpus=req['cpu'], - memory=req['mem'], - swap=16, - ostemplate='backup:vztmpl/ubuntu-16.04-standard_16.04-1_amd64.tar.gz', - hostname=vm_name, - password=vm_pass, - rootfs='lvm:' + req['hdd'], - virtio0='file=lvm:' + image_name, + create_result = proxobject.nodes(real_slave_name).qemu.post(vmid=int(phy_id), + name=vm_name, onboot=1, + sockets=1, + cores=json['cpu'], + memory=json['mem'], + scsihw='virtio-scsi-pci', + scsi0='file=lvm:' + image_name + ',discard=on', description=description) + data = { 'unit_id': int(unit_id), + 'type': 'kvm', + 'clientid': json['clientid'], + 'clientemail': json['clientemail'], + 'hostname': vm_name, + 'region': region_fullname, + 'slave': slave_name, + 'phyid': phy_id + } - print(str(create_result)) - #start the machihe + if json['type'] == 'router': + create_result = proxobject.nodes(real_slave_name).lxc.post(vmid=int(phy_id), + hostname=vm_name, + onboot=1, + unprivileged=1, + password=vm_pass, + cores=json['cpu'], + memory=json['mem'], + net0='name=eth0,bridge=' + json['bridge_id'] + ',gw=' + json['region_gw'] + ',hwaddr=' + json['macaddr'] + ',ip=' + json['ipv4addr'] + '/' + json['region_netmask'], + ostemplate='backup:vztmpl/debian-9.0-standard_9.0-2_amd64.tar.gz', + rootfs='volume=lvm:' + image_name, + swap=32, + description=description) + data = { 'unit_id': int(unit_id), + 'type': 'lxc', + 'clientid': json['clientid'], + 'clientemail': json['clientemail'], + 'hostname': vm_name, + 'region': region_fullname, + 'slave': slave_name, + 'phyid': phy_id + } + + if json['type'] == 'bridge': + #TODO: CREATE BRIDGE + data = { 'unit_id': int(unit_id), + 'type': 'vmbr', + 'clientid': json['clientid'], + 'clientemail': json['clientemail'], + 'region': region_fullname, + 'slave': slave_name, + 'phyid': phy_id + } + time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run - response = { 'status': 'CREATE', 'cube': cubeid, 'hostname': vm_name, 'password': vm_pass, 'slave': real_slave_name } - grid.writedb(deploy) + grid.create(data) + response = { 'status': 'CREATED', 'unit_id': unit_id, 'hostname': vm_name, 'password': vm_pass, 'slave': real_slave_name } return response -def vmremove(cubeid): - """ terminate a vm """ - slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) +def remove(json): + """ terminate an unit """ + slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) - ioconfig.logger.info('%s[%s]> deleting %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host)) - if vm_type == 'kvm': - result = proxobject.nodes(slave_name).qemu(vm_id).delete() - if vm_type == 'lxc': - result = proxobject.nodes(slave_name).lxc(vm_id).delete() - grid.deletedb(cubeid) - response = { 'status':'DELETE', 'cube': cubeid, 'hostname': vm_host } + ioconfig.logger.info('%s[%s]> deleting %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) + if unit_type == 'kvm': + result = proxobject.nodes(slave_name).qemu(phy_id).delete() + grid.delete(json) + if unit_type == 'lxc': + result = proxobject.nodes(slave_name).lxc(phy_id).delete() + grid.delete(json) + response = { 'status':'DELETED'} return response - -def vmstatus(cubeid): - """ returns the status of the machine """ - slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) +def status(json): + """ returns the status of an unit """ + slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) - #slave_name = proxobject.c:luster.status.get()[0]['name'] - ioconfig.logger.info('%s[%s]> status of %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host)) - if vm_type == 'kvm': - result = proxobject.nodes(slave_name).qemu(vm_id).status.current.get() - if vm_type == 'lxc': - result = proxobject.nodes(slave_name).lxc(vm_id).status.current.get() + #slave_name = proxobject.cluster.status.get()[0]['name'] + ioconfig.logger.info('%s[%s]> status of %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) + if unit_type == 'kvm': + result = proxobject.nodes(slave_name).qemu(phy_id).status.current.get() + if unit_type == 'lxc': + result = proxobject.nodes(slave_name).lxc(phy_id).status.current.get() return result - -def vmstart(cubeid): +def start(json): """ starts a machine """ - slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) + slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) - #slave_name = proxobject.c:luster.status.get()[0]['name'] - ioconfig.logger.info('%s[%s]> starting %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host)) - if vm_type == 'kvm': - result = proxobject.nodes(slave_name).qemu(vm_id).status.start.post() - if vm_type == 'lxc': - result = proxobject.nodes(slave_name).lxc(vm_id).status.start.post() + #slave_name = proxobject.cluster.status.get()[0]['name'] + ioconfig.logger.info('%s[%s]> starting %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) + if unit_type == 'kvm': + result = proxobject.nodes(slave_name).qemu(phy_id).status.start.post() + if unit_type == 'lxc': + result = proxobject.nodes(slave_name).lxc(phy_id).status.start.post() #TODO: SET START AT BOOT FLAG response = { 'status':'START' } return response - -def vmshutdown(cubeid): +def shutdown(json): """ acpi shutdown the machine.. """ - slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) + slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) - #slave_name = proxobject.c:luster.status.get()[0]['name'] - ioconfig.logger.info('%s[%s]> acpi shutdown %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host)) + #slave_name = proxobject.cluster.status.get()[0]['name'] + ioconfig.logger.info('%s[%s]> acpi shutdown %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) - if vm_type == 'kvm': - result = proxobject.nodes(slave_name).qemu(vm_id).status.shutdown.post() - if vm_type == 'lxc': - result = proxobject.nodes(slave_name).lxc(vm_id).status.shutdown.post() + if unit_type == 'kvm': + result = proxobject.nodes(slave_name).qemu(phy_id).status.shutdown.post() + if unit_type == 'lxc': + result = proxobject.nodes(slave_name).lxc(phy_id).status.shutdown.post() #TODO: REMOVE START AT BOOT FLAG #ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result)) - response = { 'status':'SHUTDOWN', 'vmid':vm_id } + response = { 'status':'SHUTDOWN', 'vmid':phy_id } return response - -def vmstop(cubeid): +def stop(json): """ poweroff the machine.. """ - slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) + slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) - #slave_name = proxobject.c:luster.status.get()[0]['name'] - ioconfig.logger.info('%s[%s]> power off %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host)) + #slave_name = proxobject.cluster.status.get()[0]['name'] + ioconfig.logger.info('%s[%s]> power off %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) - if vm_type == 'kvm': - result = proxobject.nodes(slave_name).qemu(vm_id).status.stop.post() - if vm_type == 'lxc': - result = proxobject.nodes(slave_name).lxc(vm_id).status.stop.post() + if unit_type == 'kvm': + result = proxobject.nodes(slave_name).qemu(phy_id).status.stop.post() + if unit_type == 'lxc': + result = proxobject.nodes(slave_name).lxc(phy_id).status.stop.post() #TODO: REMOVE START AT BOOT FLAG #ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result)) - response = { 'status':'STOP', 'vmid':vm_id } + response = { 'status':'STOP', 'vmid':phy_id } return response - -def vmsuspend(cubeid): +def suspend(json): """ suspend machine """ - slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) + slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) - #slave_name = proxobject.c:luster.status.get()[0]['name'] - ioconfig.logger.info('%s[%s]> suspending %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host)) + #slave_name = proxobject.cluster.status.get()[0]['name'] + ioconfig.logger.info('%s[%s]> suspending %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) - if vm_type == 'kvm': - result = proxobject.nodes(slave_name).qemu(vm_id).status.suspend.post() - if vm_type == 'lxc': - result = proxobject.nodes(slave_name).lxc(vm_id).status.suspend.post() - response = { 'status':'SUSPEND', 'vmid':vm_id } + if unit_type == 'kvm': + result = proxobject.nodes(slave_name).qemu(phy_id).status.suspend.post() + if unit_type == 'lxc': + result = proxobject.nodes(slave_name).lxc(phy_id).status.suspend.post() + response = { 'status':'SUSPEND', 'vmid':phy_id } return response - -def vmresume(cubeid): +def resume(json): """ resume machine """ - slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) + slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) - #slave_name = proxobject.c:luster.status.get()[0]['name'] - ioconfig.logger.info('%s[%s]> resuming %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host)) + #slave_name = proxobject.cluster.status.get()[0]['name'] + ioconfig.logger.info('%s[%s]> resuming %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) - if vm_type == 'kvm': - result = proxobject.nodes(slave_name).qemu(vm_id).status.resume.post() - if vm_type == 'lxc': - result = proxobject.nodes(slave_name).lxc(vm_id).status.resume.post() - response = { 'status':'RESUME', 'vmid':vm_id } + if unit_type == 'kvm': + result = proxobject.nodes(slave_name).qemu(phy_id).status.resume.post() + if unit_type == 'lxc': + result = proxobject.nodes(slave_name).lxc(phy_id).status.resume.post() + response = { 'status':'RESUME', 'vmid':phy_id } return response - -def vmrrd(cubeid): +def vmrrd(json): """ retrieve rrd graphs (PNG) """ - slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) + slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) proxobject.cluster.status.get()[0]['name'] result = {} - if vm_type == 'kvm': - statusquery = proxobject.nodes(slave_name).qemu(vm_id).status.current.get() - rcpu = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu') - rmem = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem') - rnet = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout') - rhdd = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite') + if unit_type == 'kvm': + statusquery = proxobject.nodes(slave_name).qemu(phy_id).status.current.get() + rcpu = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu') + rmem = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem') + rnet = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout') + rhdd = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite') status = str(statusquery['qmpstatus']) - if vm_type == 'lxc': - status = proxobject.nodes(slave_name).lxc(vm_id).status.current.get() - rcpu = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu') - rmem = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem') - rnet = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout') - rhdd = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite') + if unit_type == 'lxc': + status = proxobject.nodes(slave_name).lxc(phy_id).status.current.get() + rcpu = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu') + rmem = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem') + rnet = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout') + rhdd = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite') status = str(statusquery['qmpstatus']) #TODO: maybe change this? - #ioconfig.logger.info('%s[%s]> rrd of %s %s (%s). status: %s' % (vm_owner, slave_name, vm_type, vm_id, vm_host, status)) + #ioconfig.logger.info('%s[%s]> rrd of %s %s (%s). status: %s' % (vm_owner, slave_name, unit_type, phy_id, vm_host, status)) response = { 'status':status, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd } return response - -def vmvnc(cubeid): +def vmvnc(json): """ invoke vnc ticket """ - slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) + slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json) proxobject = auth(slave_name) - #slave_name = proxobject.c:luster.status.get()[0]['name'] - ioconfig.logger.info('%s[%s]> invoking vnc ticket for %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host)) + #slave_name = proxobject.cluster.status.get()[0]['name'] + ioconfig.logger.info('%s[%s]> invoking vnc ticket for %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host)) - if vm_type == 'kvm': - ticket = proxobject.nodes(slave_name).qemu(vm_id).vncproxy.post(websocket=1) - #socket = proxobject.nodes(slave_name).qemu(vm_id).vncwebsocket.get(port=ticket['port'], + if unit_type == 'kvm': + ticket = proxobject.nodes(slave_name).qemu(phy_id).vncproxy.post(websocket=1) + #socket = proxobject.nodes(slave_name).qemu(phy_id).vncwebsocket.get(port=ticket['port'], # vncticket=ticket['ticket']) - if vm_type == 'lxc': - ticket = proxobject.nodes(slave_name).lxc(vm_id).vncproxy.post() - #socket = proxobject.nodes(slave_name).lxc(vm_id).vncwebsocket.get(port=ticket['port'], + if unit_type == 'lxc': + ticket = proxobject.nodes(slave_name).lxc(phy_id).vncproxy.post() + #socket = proxobject.nodes(slave_name).lxc(phy_id).vncwebsocket.get(port=ticket['port'], # vncticket=ticket['ticket']) slaveip = ioconfig.parser.get(str(slave_name), 'ipv4') @@ -311,14 +302,19 @@ def vmvnc(cubeid): #print(vnc_url) return response - -#def getmyip(): -# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) -# s.connect(("gmail.com",80)) -# myip = s.getsockname()[0] -# s.close -# return myip - +#DEPRECATED +def vmlist(proxobject): + """ get unit list """ + #we keep a single node proxmoxes so node id = 0 + slave_name = proxobject.cluster.status.get()[0]['name'] + query_kvm = proxobject.nodes(slave_name).qemu.get() + query_lxc = proxobject.nodes(slave_name).lxc.get() + for kvm_dict in query_kvm: + kvm_dict['vmtype'] = 'kvm' + for lxc_dict in query_lxc: + lxc_dict['vmtype'] = 'lxc' + vmlist = query_kvm + query_lxc #merge machine list + return vmlist if __name__ == '__main__': #internal module tests diff --git a/proxmaster.py b/proxmaster.py index ae4641e..4fe1266 100644 --- a/proxmaster.py +++ b/proxmaster.py @@ -23,7 +23,7 @@ def welcome(): logger.info('# proxmaster ][ (c) 2015-2017 deflax.net #') -def selector(fn, req, vmid=0): +def selector(fn, req): """ try to exec commands """ json = req.context['doc'] #print(json) @@ -35,43 +35,35 @@ def selector(fn, req, vmid=0): return status, body try: - if fn == 'vmcreate': - body = plugin.vmcreate(json) + if fn == 'create': + body = plugin.create(json) + elif fn == 'remove': + body = plugin.remove(json) + elif fn == 'status': + body = plugin.status(json) - elif fn == 'vmremove': - body = plugin.vmremove(vmid) - - elif fn == 'vmstatus': - body = plugin.vmstatus(vmid) - - elif fn == 'vmsuspend': - body = plugin.vmsuspend(vmid) - - elif fn == 'vmresume': - body = plugin.vmresume(vmid) - - elif fn == 'vmstart': - body = plugin.vmstart(vmid) - - elif fn == 'vmshutdown': - body = plugin.vmshutdown(vmid) - - elif fn == 'vmstop': - body = plugin.vmstop(vmid) + elif fn == 'start': + body = plugin.start(json) + elif fn == 'shutdown': + body = plugin.shutdown(json) + elif fn == 'stop': + body = plugin.stop(json) + elif fn == 'suspend': + body = plugin.suspend(json) + elif fn == 'resume': + body = plugin.resume(json) elif fn == 'vmrrd': - body = plugin.vmrrd(vmid) - + body = plugin.vmrrd(json) elif fn == 'vmvnc': - body = plugin.vmvnc(vmid) - + body = plugin.vmvnc(json) except: logger.critical('grid> {} error'.format(fn)) status = falcon.HTTP_404 raise else: - #logger.info('grid> {} ok'.format(fn)) + logger.info('grid> {}'.format(fn)) status = falcon.HTTP_202 return status, body @@ -135,85 +127,74 @@ def max_body(limit): return hook - -class CreateResource(object): +class CreateUnit(object): @falcon.before(max_body(64 * 1024)) def on_post(self, req, resp): - """Create a cluster node, returns array of: status, vmid, pass, ipv4, """ - logger.info('grid> create new cube') - resp.status, response = selector('vmcreate', req) + """ creates an unit """ + resp.status, response = selector('create', req) req.context['result'] = response -class RemoveResource(object): +class RemoveUnit(object): @falcon.before(max_body(64 * 1024)) - def on_post(self, req, resp, vmid): - """ remove machine completely""" - logger.info('grid> remove ' + str(vmid)) - resp.status, response = selector('vmremove', req, vmid) + def on_post(self, req, resp): + """ removes unit completely""" + resp.status, response = selector('remove', req) req.context['result'] = response -class StatusResource(object): +class StatusUnit(object): @falcon.before(max_body(64 * 1024)) - def on_post(self, req, resp, vmid): - """ check vm status """ - logger.info('grid> status ' + str(vmid)) - resp.status, response = selector('vmstatus', req, vmid) + def on_post(self, req, resp): + """ checks unit status """ + resp.status, response = selector('status', req) req.context['result'] = response -class SuspendResource(object): +class SuspendUnit(object): @falcon.before(max_body(64 * 1024)) - def on_post(self, req, resp, vmid): + def on_post(self, req, resp): """ Temporary suspend the instance """ - #logger.info('grid> suspend ' + str(vmid)) - resp.status, response = selector('vmsuspend', req, vmid) + resp.status, response = selector('suspend', req) req.context['result'] = response -class ResumeResource(object): +class ResumeUnit(object): @falcon.before(max_body(64 * 1024)) - def on_post(self, req, resp, vmid): + def on_post(self, req, resp): """ Unuspend the instance """ - #logger.info('grid> resume ' + str(vmid)) - resp.status, response = selector('vmresume', req, vmid) + resp.status, response = selector('resume', req) req.context['result'] = response -class StartResource(object): +class StartUnit(object): @falcon.before(max_body(64 * 1024)) - def on_post(self, req, resp, vmid): + def on_post(self, req, resp): """ Start the instance """ - logger.info('grid> start ' + str(vmid)) - resp.status, response = selector('vmstart', req, vmid) + resp.status, response = selector('start', req) req.context['result'] = response -class ShutdownResource(object): +class ShutdownUnit(object): @falcon.before(max_body(64 * 1024)) - def on_post(self, req, resp, vmid): + def on_post(self, req, resp): """ ACPI Shutdown the instance """ - logger.info('grid> shutdown ' + str(vmid)) - resp.status, response = selector('vmshutdown', req, vmid) + resp.status, response = selector('shutdown', req) req.context['result'] = response -class StopResource(object): +class StopUnit(object): @falcon.before(max_body(64 * 1024)) - def on_post(self, req, resp, vmid): + def on_post(self, req, resp): """ Stop the instance """ - logger.info('grid> stop ' + str(vmid)) - resp.status, response = selector('vmstop', req, vmid) + resp.status, response = selector('stop', req) req.context['result'] = response -class RRDResource(object): +class RRDVM(object): @falcon.before(max_body(64 * 1024)) - def on_post(self, req, resp, vmid): + def on_post(self, req, resp): """ Generate rrd pngs """ - #logger.info('grid> rrd ' + str(vmid)) - resp.status, response = selector('vmrrd', req, vmid) + resp.status, response = selector('vmrrd', req) req.context['result'] = response -class VNCResource(object): +class VNCVM(object): @falcon.before(max_body(64 * 1024)) - def on_post(self, req, resp, vmid): + def on_post(self, req, resp): """ Create a VNC link to the instance """ - logger.info('grid> vnc ' + str(vmid)) - resp.status, response = selector('vmvnc', req, vmid) + resp.status, response = selector('vmvnc', req) req.context['result'] = response if __name__ == '__main__': @@ -225,35 +206,18 @@ wsgi_app = api = application = falcon.API(middleware=[ ]) # setup routes -res_create = CreateResource() -api.add_route('/vmcreate', res_create) +api.add_route('/create', CreateUnit()) +api.add_route('/remove', RemoveUnit()) +api.add_route('/status', StatusUnit()) -res_remove = RemoveResource() -api.add_route('/vmremove/{vmid}', res_remove) +api.add_route('/start', StartUnit()) +api.add_route('/suspend', SuspendUnit()) +api.add_route('/resume', ResumeUnit()) +api.add_route('/shutdown', ShutdownUnit()) +api.add_route('/stop', StopUnit()) -res_status = StatusResource() -api.add_route('/vmstatus/{vmid}', res_status) - -res_suspend = SuspendResource() -api.add_route('/vmsuspend/{vmid}', res_suspend) - -res_resume = ResumeResource() -api.add_route('/vmresume/{vmid}', res_resume) - -res_start = StartResource() -api.add_route('/vmstart/{vmid}', res_start) - -res_shutdown = ShutdownResource() -api.add_route('/vmshutdown/{vmid}', res_shutdown) - -res_stop = StopResource() -api.add_route('/vmstop/{vmid}', res_stop) - -res_rrd = RRDResource() -api.add_route('/vmrrd/{vmid}', res_rrd) - -res_vnc = VNCResource() -api.add_route('/vmvnc/{vmid}', res_vnc) +api.add_route('/vmrrd', RRDVM()) +api.add_route('/vmvnc', VNCVM()) #display motd welcome()