From f27f26de6f1c5ebece30d50c96ae8829c444a672 Mon Sep 17 00:00:00 2001 From: deflax Date: Wed, 19 Jul 2017 22:15:51 +0300 Subject: [PATCH] add clientemail owner to grid.queryvm. deprecate mac and ipv4 generators --- README.md | 8 ++- config.ini.dist | 2 +- grid.py | 151 ++++++++++++++++++++++++------------------------ plugin.py | 46 +++++++-------- 4 files changed, 103 insertions(+), 104 deletions(-) diff --git a/README.md b/README.md index 9e842e1..3aa2f19 100644 --- a/README.md +++ b/README.md @@ -11,9 +11,11 @@ Python RESTful API for managing a grid of vm slaves - nginx_example_vhost.txt 5. make sure this iptables rule is included: iptables -A tcp_inbound -p TCP --match multiport --dports 6900:8000 -j ACCEPT #vnc range -6. generate self signed cert for ssl: - openssl req -new -x509 -days 365 -nodes -out self.pem -keyout self.pem -7. o/ +6. apt install letsencrypt +7. generate letsencrypt cert using letsencrypt tool and add +00 00 1 * * root /etc/init.d/nginx stop && letsencrypt renew && /etc/init.d/nginx start +to /etc/crontab +8. chmod 705 /etc/letsencrypt/archive ; chmod 705 /etc/letsencrypt/live start: 1. crontab -e diff --git a/config.ini.dist b/config.ini.dist index 647cc76..d56f286 100644 --- a/config.ini.dist +++ b/config.ini.dist @@ -8,7 +8,7 @@ adminuser = masteradmin@pve apipass = sakdlsadas vmid_min = 1000 vmid_max = 999999 -novnc_url = https://panel.example.com/novnc/vnc_auto.html +novnc_url = https://panel.example.com/novnc/vnc_lite.html ssl_cert = /etc/letsencrypt/live/api.example.com/fullchain.pem ssl_key = /etc/letsencrypt/live/api.example.com/privkey.pem diff --git a/grid.py b/grid.py index f329352..9e8903c 100644 --- a/grid.py +++ b/grid.py @@ -23,7 +23,7 @@ def queryvm(req_cube_id): """ returns slave_name, vm_id and vm_type for the requested cubeid """ data = querydb(req_cube_id) #print(data) - return data['slave'], data['type'], data['vmid'], data['host'] + return data['slave'], data['type'], data['vmid'], data['host'], data['email'], data['clientemail'] def querydb(cubeid): @@ -86,81 +86,6 @@ def query_happiness(region_id): return happy_slave -def generate_ipv4(region_name, how_many=1): - """ this function should check the range, exclude deployed machines and return a list of available ips """ - ip_range_min = ioconfig.parser.get(str(region_name), 'ipv4_min') - ip_range_max = ioconfig.parser.get(str(region_name), 'ipv4_max') - - region_ipset = netaddr.IPSet(netaddr.IPRange(ip_range_min, ip_range_max)) - region_ips = [] - for ip in region_ipset: - region_ips.append(ip) - - ip_min = 0 - ip_max = len(region_ips) - 1 - - tested_ips = [] #initialize ip cache - requested_ips = [] - #all_ips = utils.find_rec(grid_data, 'ipaddr') #TODO: we cant list that for KVM so we should use another approach. perhaps as separate macaddress - ipaddress table which we will use to manipulate a static lease dhcp server. at this point this function is useless because plugins do not or cant track the address of the actual machine. proxmaster should divide each region to segments and each segment should export a static lease config which will be quieried when we search for unused addresses. - - all_ips = [] #TODO: replace with db searching function - - for ips in range(int(how_many)): - counter = 0 - while True: - if counter == 50: - logger.error('region[{}]> ip range full'.format(str(region_name))) - return None - else: - counter += 1 - - requested_ip_index = random.randint(ip_min, ip_max) - requested_ip = str(region_ips[requested_ip_index]) - - if requested_ip in tested_ips: - logger.warning('region[{}]> ip addres {} already tested. cache: {}'.format(str(region_name), str(requested_ip), str(tested_ips))) - continue - - if requested_ip in requested_ips: - logger.warning('region[{}]> ip address {} already generated.'.format(str(region_name), str(requested_ip))) - tested_ips.append(requested_ip) - continue - - if requested_ip in all_ips: - position = used_ips.index(requested_ip) - logger.warning('region[{}]> ip address {} already exist. location: {}'.format(str(region_name), str(position))) - tested_ips.append(requested_ip) - continue - else: - tested_ips = [] #clear ip cache - break - - logger.info('region[{}]> ip address {} selected.'.format(str(region_name), str(requested_ip))) - requested_ips.append(requested_ip) - logger.info('region[{}]> ip addresses {} selected.'.format(str(region_name), str(requested_ips))) - return requested_ips - - -def genmac(int_value): - """ convert kinda long enough int to MAC string """ - prefix_sum = sum(int(digit) for digit in str(int_value)) - if (prefix_sum > 255): - prefix_hex = 'ff' - else: - prefix_hex = format(prefix_sum, 'x') - - suffix = int(str(int_value)[-12:]) - suffix_hex = format(suffix, 'x') - length = len(suffix_hex) - suffix_hex = suffix_hex.zfill(length+length%2) - - addr = prefix_hex + suffix_hex - #logger.info('grid> mac-string {} genrated from: {} ({}->{}) ({}->{}) '.format(addr, int_value, prefix, prefix_hex, suffix, suffix_hex)) - print('grid> mac-string {} genrated from: {} (sum {}->{}) ({}->{}) '.format(addr, int_value, prefix_sum, prefix_hex, suffix, suffix_hex)) - - return ':'.join(addr[i:i+2] for i in range(0,len(addr),2)) - - def generate_vmid(): """ analyzes cached grid data and return proposed vmid for new machines """ grid_data = readcache() @@ -216,6 +141,80 @@ def findDiff(d1, d2, path=""): ### DEPRECATED +def genmac(int_value): + """ convert kinda long enough int to MAC string """ + prefix_sum = sum(int(digit) for digit in str(int_value)) + if (prefix_sum > 255): + prefix_hex = 'ff' + else: + prefix_hex = format(prefix_sum, 'x') + + suffix = int(str(int_value)[-12:]) + suffix_hex = format(suffix, 'x') + length = len(suffix_hex) + suffix_hex = suffix_hex.zfill(length+length%2) + + addr = prefix_hex + suffix_hex + #logger.info('grid> mac-string {} genrated from: {} ({}->{}) ({}->{}) '.format(addr, int_value, prefix, prefix_hex, suffix, suffix_hex)) + print('grid> mac-string {} genrated from: {} (sum {}->{}) ({}->{}) '.format(addr, int_value, prefix_sum, prefix_hex, suffix, suffix_hex)) + + return ':'.join(addr[i:i+2] for i in range(0,len(addr),2)) + +def generate_ipv4(region_name, how_many=1): + """ this function should check the range, exclude deployed machines and return a list of available ips """ + ip_range_min = ioconfig.parser.get(str(region_name), 'ipv4_min') + ip_range_max = ioconfig.parser.get(str(region_name), 'ipv4_max') + + region_ipset = netaddr.IPSet(netaddr.IPRange(ip_range_min, ip_range_max)) + region_ips = [] + for ip in region_ipset: + region_ips.append(ip) + + ip_min = 0 + ip_max = len(region_ips) - 1 + + tested_ips = [] #initialize ip cache + requested_ips = [] + #all_ips = utils.find_rec(grid_data, 'ipaddr') #TODO: we cant list that for KVM so we should use another approach. perhaps as separate macaddress - ipaddress table which we will use to manipulate a static lease dhcp server. at this point this function is useless because plugins do not or cant track the address of the actual machine. proxmaster should divide each region to segments and each segment should export a static lease config which will be quieried when we search for unused addresses. + + all_ips = [] #TODO: replace with db searching function + + for ips in range(int(how_many)): + counter = 0 + while True: + if counter == 50: + logger.error('region[{}]> ip range full'.format(str(region_name))) + return None + else: + counter += 1 + + requested_ip_index = random.randint(ip_min, ip_max) + requested_ip = str(region_ips[requested_ip_index]) + + if requested_ip in tested_ips: + logger.warning('region[{}]> ip addres {} already tested. cache: {}'.format(str(region_name), str(requested_ip), str(tested_ips))) + continue + + if requested_ip in requested_ips: + logger.warning('region[{}]> ip address {} already generated.'.format(str(region_name), str(requested_ip))) + tested_ips.append(requested_ip) + continue + + if requested_ip in all_ips: + position = used_ips.index(requested_ip) + logger.warning('region[{}]> ip address {} already exist. location: {}'.format(str(region_name), str(position))) + tested_ips.append(requested_ip) + continue + else: + tested_ips = [] #clear ip cache + break + + logger.info('region[{}]> ip address {} selected.'.format(str(region_name), str(requested_ip))) + requested_ips.append(requested_ip) + logger.info('region[{}]> ip addresses {} selected.'.format(str(region_name), str(requested_ips))) + return requested_ips + + def readreal(): """ read the current state and return its contents """ try: diff --git a/plugin.py b/plugin.py index 083789c..6359039 100644 --- a/plugin.py +++ b/plugin.py @@ -78,14 +78,13 @@ def vmcreate(req): vm_id = random.randint(1000, 9999) cubeid = int(time.time() * 10000 * 10000) ### ipv4 - ipv4_list = grid.generate_ipv4(req['region'], req['vps_ipv4']) - ipv4_dict = {} - ipidx = 0 - for ip in ipv4_list: - ipv4_dict[str(ipidx)] = str(ip) - ipidx += 1 - - macaddr = grid.genmac(cubeid) + #ipv4_list = grid.generate_ipv4(req['region'], req['vps_ipv4']) + #ipv4_dict = {} + #ipidx = 0 + #for ip in ipv4_list: + # ipv4_dict[str(ipidx)] = str(ip) + # ipidx += 1 + #macaddr = grid.genmac(cubeid) #metadata deploy = { 'cube': int(cubeid), @@ -100,15 +99,14 @@ def vmcreate(req): 'clientemail': req['clientemail'], 'recipe': req['vps_recipe'], 'iso9660': 'ubuntu-16.04.1-server-amd64.iso', - 'ipv4list': ipv4_list, - 'macaddr': macaddr } + 'ip0': req['vps_ipv4'], + 'mac0': req['vps_mac'] } proxobject = auth(slave_name) real_slave_name = proxobject.cluster.status.get()[0]['name'] #print(real_slave_name) - description = vm_name + ' (' + str(cubeid) + '/' + str(vm_id) + ')\n' + 'owned by ' + req['clientname'] + ' (' + req['clientid'] + ')\n' + 'master ip: ' + ipv4_list[0] - + description = vm_name + ' (' + str(cubeid) + '/' + str(vm_id) + ')\n' + 'owned by ' + req['clientname'] + ' (' + req['clientid'] + ')\n' + 'master ip: ' + req['vps_ipv4'] + '\nmac address: ' + req['vps_mac'] if req['vps_type'] == 'kvm': #create partition @@ -129,7 +127,7 @@ def vmcreate(req): memory=req['vps_mem'], virtio0='file=lvm:' + image_name, ide1='backup:iso/' + deploy['iso9660'] + ',media=cdrom', - net0='virtio,bridge=vmbr0,macaddr=' + macaddr, + net0='virtio,bridge=vmbr0,macaddr=' + req['macaddr'], onboot=1, description=description) @@ -143,7 +141,7 @@ def vmcreate(req): password=vm_pass, rootfs='lvm:' + req['vps_hdd'], virtio0='file=lvm:' + image_name, - ip_address=ipv4_list[0], + ip_address=req['ipv4'], onboot=1, description=description) @@ -154,14 +152,14 @@ def vmcreate(req): #time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run #vmstart(cubeid) - response = { 'status':'CREATE', 'cube':cubeid, 'name':vm_name, 'password':vm_pass, 'ipv4list':str(ipv4_list) } + response = { 'status':'CREATE', 'cube':cubeid, 'name':vm_name, 'password':vm_pass } grid.writedb(deploy) return response def vmstatus(cubeid): """ returns the status of the machine """ - slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) + slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) proxobject = auth(slave_name) #slave_name = proxobject.c:luster.status.get()[0]['name'] ioconfig.logger.info('slave[%s]> get status of %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host)) @@ -174,7 +172,7 @@ def vmstatus(cubeid): def vmstart(cubeid): """ starts a machine """ - slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) + slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) proxobject = auth(slave_name) #slave_name = proxobject.c:luster.status.get()[0]['name'] ioconfig.logger.info('slave[%s]> starting %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host)) @@ -188,7 +186,7 @@ def vmstart(cubeid): def vmshutdown(cubeid): """ acpi shutdown the machine.. """ - slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) + slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) proxobject = auth(slave_name) #slave_name = proxobject.c:luster.status.get()[0]['name'] ioconfig.logger.info('slave[%s]> acpi shutdown %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host)) @@ -204,7 +202,7 @@ def vmshutdown(cubeid): def vmstop(cubeid): """ poweroff the machine.. """ - slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) + slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) proxobject = auth(slave_name) #slave_name = proxobject.c:luster.status.get()[0]['name'] ioconfig.logger.info('slave[%s]> power off %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host)) @@ -220,7 +218,7 @@ def vmstop(cubeid): def vmsuspend(cubeid): """ suspend machine """ - slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) + slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) proxobject = auth(slave_name) #slave_name = proxobject.c:luster.status.get()[0]['name'] ioconfig.logger.info('slave[%s]> suspending %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host)) @@ -235,7 +233,7 @@ def vmsuspend(cubeid): def vmresume(cubeid): """ resume machine """ - slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) + slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) proxobject = auth(slave_name) #slave_name = proxobject.c:luster.status.get()[0]['name'] ioconfig.logger.info('slave[%s]> resuming %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host)) @@ -250,7 +248,7 @@ def vmresume(cubeid): def vmrrd(cubeid): """ retrieve rrd graphs (PNG) """ - slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) + slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) proxobject = auth(slave_name) proxobject.cluster.status.get()[0]['name'] @@ -279,7 +277,7 @@ def vmrrd(cubeid): def vmvnc(cubeid): """ invoke vnc ticket """ - slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) + slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid) proxobject = auth(slave_name) #slave_name = proxobject.c:luster.status.get()[0]['name'] ioconfig.logger.info('slave[%s]> invoking vnc ticket for %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host)) @@ -338,5 +336,5 @@ def vmvnc(cubeid): if __name__ == '__main__': #internal module tests time.sleep(1) - vmvnc(656758) + #vmvnc(656758)