add clientemail owner to grid.queryvm. deprecate mac and ipv4 generators

This commit is contained in:
deflax 2017-07-19 22:15:51 +03:00
parent 4d5bff651f
commit f27f26de6f
4 changed files with 103 additions and 104 deletions

View file

@ -11,9 +11,11 @@ Python RESTful API for managing a grid of vm slaves
- nginx_example_vhost.txt - nginx_example_vhost.txt
5. make sure this iptables rule is included: 5. make sure this iptables rule is included:
iptables -A tcp_inbound -p TCP --match multiport --dports 6900:8000 -j ACCEPT #vnc range iptables -A tcp_inbound -p TCP --match multiport --dports 6900:8000 -j ACCEPT #vnc range
6. generate self signed cert for ssl: 6. apt install letsencrypt
openssl req -new -x509 -days 365 -nodes -out self.pem -keyout self.pem 7. generate letsencrypt cert using letsencrypt tool and add
7. o/ 00 00 1 * * root /etc/init.d/nginx stop && letsencrypt renew && /etc/init.d/nginx start
to /etc/crontab
8. chmod 705 /etc/letsencrypt/archive ; chmod 705 /etc/letsencrypt/live
start: start:
1. crontab -e 1. crontab -e

View file

@ -8,7 +8,7 @@ adminuser = masteradmin@pve
apipass = sakdlsadas apipass = sakdlsadas
vmid_min = 1000 vmid_min = 1000
vmid_max = 999999 vmid_max = 999999
novnc_url = https://panel.example.com/novnc/vnc_auto.html novnc_url = https://panel.example.com/novnc/vnc_lite.html
ssl_cert = /etc/letsencrypt/live/api.example.com/fullchain.pem ssl_cert = /etc/letsencrypt/live/api.example.com/fullchain.pem
ssl_key = /etc/letsencrypt/live/api.example.com/privkey.pem ssl_key = /etc/letsencrypt/live/api.example.com/privkey.pem

151
grid.py
View file

@ -23,7 +23,7 @@ def queryvm(req_cube_id):
""" returns slave_name, vm_id and vm_type for the requested cubeid """ """ returns slave_name, vm_id and vm_type for the requested cubeid """
data = querydb(req_cube_id) data = querydb(req_cube_id)
#print(data) #print(data)
return data['slave'], data['type'], data['vmid'], data['host'] return data['slave'], data['type'], data['vmid'], data['host'], data['email'], data['clientemail']
def querydb(cubeid): def querydb(cubeid):
@ -86,81 +86,6 @@ def query_happiness(region_id):
return happy_slave return happy_slave
def generate_ipv4(region_name, how_many=1):
""" this function should check the range, exclude deployed machines and return a list of available ips """
ip_range_min = ioconfig.parser.get(str(region_name), 'ipv4_min')
ip_range_max = ioconfig.parser.get(str(region_name), 'ipv4_max')
region_ipset = netaddr.IPSet(netaddr.IPRange(ip_range_min, ip_range_max))
region_ips = []
for ip in region_ipset:
region_ips.append(ip)
ip_min = 0
ip_max = len(region_ips) - 1
tested_ips = [] #initialize ip cache
requested_ips = []
#all_ips = utils.find_rec(grid_data, 'ipaddr') #TODO: we cant list that for KVM so we should use another approach. perhaps as separate macaddress - ipaddress table which we will use to manipulate a static lease dhcp server. at this point this function is useless because plugins do not or cant track the address of the actual machine. proxmaster should divide each region to segments and each segment should export a static lease config which will be quieried when we search for unused addresses.
all_ips = [] #TODO: replace with db searching function
for ips in range(int(how_many)):
counter = 0
while True:
if counter == 50:
logger.error('region[{}]> ip range full'.format(str(region_name)))
return None
else:
counter += 1
requested_ip_index = random.randint(ip_min, ip_max)
requested_ip = str(region_ips[requested_ip_index])
if requested_ip in tested_ips:
logger.warning('region[{}]> ip addres {} already tested. cache: {}'.format(str(region_name), str(requested_ip), str(tested_ips)))
continue
if requested_ip in requested_ips:
logger.warning('region[{}]> ip address {} already generated.'.format(str(region_name), str(requested_ip)))
tested_ips.append(requested_ip)
continue
if requested_ip in all_ips:
position = used_ips.index(requested_ip)
logger.warning('region[{}]> ip address {} already exist. location: {}'.format(str(region_name), str(position)))
tested_ips.append(requested_ip)
continue
else:
tested_ips = [] #clear ip cache
break
logger.info('region[{}]> ip address {} selected.'.format(str(region_name), str(requested_ip)))
requested_ips.append(requested_ip)
logger.info('region[{}]> ip addresses {} selected.'.format(str(region_name), str(requested_ips)))
return requested_ips
def genmac(int_value):
""" convert kinda long enough int to MAC string """
prefix_sum = sum(int(digit) for digit in str(int_value))
if (prefix_sum > 255):
prefix_hex = 'ff'
else:
prefix_hex = format(prefix_sum, 'x')
suffix = int(str(int_value)[-12:])
suffix_hex = format(suffix, 'x')
length = len(suffix_hex)
suffix_hex = suffix_hex.zfill(length+length%2)
addr = prefix_hex + suffix_hex
#logger.info('grid> mac-string {} genrated from: {} ({}->{}) ({}->{}) '.format(addr, int_value, prefix, prefix_hex, suffix, suffix_hex))
print('grid> mac-string {} genrated from: {} (sum {}->{}) ({}->{}) '.format(addr, int_value, prefix_sum, prefix_hex, suffix, suffix_hex))
return ':'.join(addr[i:i+2] for i in range(0,len(addr),2))
def generate_vmid(): def generate_vmid():
""" analyzes cached grid data and return proposed vmid for new machines """ """ analyzes cached grid data and return proposed vmid for new machines """
grid_data = readcache() grid_data = readcache()
@ -216,6 +141,80 @@ def findDiff(d1, d2, path=""):
### DEPRECATED ### DEPRECATED
def genmac(int_value):
""" convert kinda long enough int to MAC string """
prefix_sum = sum(int(digit) for digit in str(int_value))
if (prefix_sum > 255):
prefix_hex = 'ff'
else:
prefix_hex = format(prefix_sum, 'x')
suffix = int(str(int_value)[-12:])
suffix_hex = format(suffix, 'x')
length = len(suffix_hex)
suffix_hex = suffix_hex.zfill(length+length%2)
addr = prefix_hex + suffix_hex
#logger.info('grid> mac-string {} genrated from: {} ({}->{}) ({}->{}) '.format(addr, int_value, prefix, prefix_hex, suffix, suffix_hex))
print('grid> mac-string {} genrated from: {} (sum {}->{}) ({}->{}) '.format(addr, int_value, prefix_sum, prefix_hex, suffix, suffix_hex))
return ':'.join(addr[i:i+2] for i in range(0,len(addr),2))
def generate_ipv4(region_name, how_many=1):
""" this function should check the range, exclude deployed machines and return a list of available ips """
ip_range_min = ioconfig.parser.get(str(region_name), 'ipv4_min')
ip_range_max = ioconfig.parser.get(str(region_name), 'ipv4_max')
region_ipset = netaddr.IPSet(netaddr.IPRange(ip_range_min, ip_range_max))
region_ips = []
for ip in region_ipset:
region_ips.append(ip)
ip_min = 0
ip_max = len(region_ips) - 1
tested_ips = [] #initialize ip cache
requested_ips = []
#all_ips = utils.find_rec(grid_data, 'ipaddr') #TODO: we cant list that for KVM so we should use another approach. perhaps as separate macaddress - ipaddress table which we will use to manipulate a static lease dhcp server. at this point this function is useless because plugins do not or cant track the address of the actual machine. proxmaster should divide each region to segments and each segment should export a static lease config which will be quieried when we search for unused addresses.
all_ips = [] #TODO: replace with db searching function
for ips in range(int(how_many)):
counter = 0
while True:
if counter == 50:
logger.error('region[{}]> ip range full'.format(str(region_name)))
return None
else:
counter += 1
requested_ip_index = random.randint(ip_min, ip_max)
requested_ip = str(region_ips[requested_ip_index])
if requested_ip in tested_ips:
logger.warning('region[{}]> ip addres {} already tested. cache: {}'.format(str(region_name), str(requested_ip), str(tested_ips)))
continue
if requested_ip in requested_ips:
logger.warning('region[{}]> ip address {} already generated.'.format(str(region_name), str(requested_ip)))
tested_ips.append(requested_ip)
continue
if requested_ip in all_ips:
position = used_ips.index(requested_ip)
logger.warning('region[{}]> ip address {} already exist. location: {}'.format(str(region_name), str(position)))
tested_ips.append(requested_ip)
continue
else:
tested_ips = [] #clear ip cache
break
logger.info('region[{}]> ip address {} selected.'.format(str(region_name), str(requested_ip)))
requested_ips.append(requested_ip)
logger.info('region[{}]> ip addresses {} selected.'.format(str(region_name), str(requested_ips)))
return requested_ips
def readreal(): def readreal():
""" read the current state and return its contents """ """ read the current state and return its contents """
try: try:

View file

@ -78,14 +78,13 @@ def vmcreate(req):
vm_id = random.randint(1000, 9999) vm_id = random.randint(1000, 9999)
cubeid = int(time.time() * 10000 * 10000) cubeid = int(time.time() * 10000 * 10000)
### ipv4 ### ipv4
ipv4_list = grid.generate_ipv4(req['region'], req['vps_ipv4']) #ipv4_list = grid.generate_ipv4(req['region'], req['vps_ipv4'])
ipv4_dict = {} #ipv4_dict = {}
ipidx = 0 #ipidx = 0
for ip in ipv4_list: #for ip in ipv4_list:
ipv4_dict[str(ipidx)] = str(ip) # ipv4_dict[str(ipidx)] = str(ip)
ipidx += 1 # ipidx += 1
#macaddr = grid.genmac(cubeid)
macaddr = grid.genmac(cubeid)
#metadata #metadata
deploy = { 'cube': int(cubeid), deploy = { 'cube': int(cubeid),
@ -100,15 +99,14 @@ def vmcreate(req):
'clientemail': req['clientemail'], 'clientemail': req['clientemail'],
'recipe': req['vps_recipe'], 'recipe': req['vps_recipe'],
'iso9660': 'ubuntu-16.04.1-server-amd64.iso', 'iso9660': 'ubuntu-16.04.1-server-amd64.iso',
'ipv4list': ipv4_list, 'ip0': req['vps_ipv4'],
'macaddr': macaddr } 'mac0': req['vps_mac'] }
proxobject = auth(slave_name) proxobject = auth(slave_name)
real_slave_name = proxobject.cluster.status.get()[0]['name'] real_slave_name = proxobject.cluster.status.get()[0]['name']
#print(real_slave_name) #print(real_slave_name)
description = vm_name + ' (' + str(cubeid) + '/' + str(vm_id) + ')\n' + 'owned by ' + req['clientname'] + ' (' + req['clientid'] + ')\n' + 'master ip: ' + ipv4_list[0] description = vm_name + ' (' + str(cubeid) + '/' + str(vm_id) + ')\n' + 'owned by ' + req['clientname'] + ' (' + req['clientid'] + ')\n' + 'master ip: ' + req['vps_ipv4'] + '\nmac address: ' + req['vps_mac']
if req['vps_type'] == 'kvm': if req['vps_type'] == 'kvm':
#create partition #create partition
@ -129,7 +127,7 @@ def vmcreate(req):
memory=req['vps_mem'], memory=req['vps_mem'],
virtio0='file=lvm:' + image_name, virtio0='file=lvm:' + image_name,
ide1='backup:iso/' + deploy['iso9660'] + ',media=cdrom', ide1='backup:iso/' + deploy['iso9660'] + ',media=cdrom',
net0='virtio,bridge=vmbr0,macaddr=' + macaddr, net0='virtio,bridge=vmbr0,macaddr=' + req['macaddr'],
onboot=1, onboot=1,
description=description) description=description)
@ -143,7 +141,7 @@ def vmcreate(req):
password=vm_pass, password=vm_pass,
rootfs='lvm:' + req['vps_hdd'], rootfs='lvm:' + req['vps_hdd'],
virtio0='file=lvm:' + image_name, virtio0='file=lvm:' + image_name,
ip_address=ipv4_list[0], ip_address=req['ipv4'],
onboot=1, onboot=1,
description=description) description=description)
@ -154,14 +152,14 @@ def vmcreate(req):
#time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run #time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run
#vmstart(cubeid) #vmstart(cubeid)
response = { 'status':'CREATE', 'cube':cubeid, 'name':vm_name, 'password':vm_pass, 'ipv4list':str(ipv4_list) } response = { 'status':'CREATE', 'cube':cubeid, 'name':vm_name, 'password':vm_pass }
grid.writedb(deploy) grid.writedb(deploy)
return response return response
def vmstatus(cubeid): def vmstatus(cubeid):
""" returns the status of the machine """ """ returns the status of the machine """
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name) proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name'] #slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> get status of %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host)) ioconfig.logger.info('slave[%s]> get status of %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
@ -174,7 +172,7 @@ def vmstatus(cubeid):
def vmstart(cubeid): def vmstart(cubeid):
""" starts a machine """ """ starts a machine """
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name) proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name'] #slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> starting %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host)) ioconfig.logger.info('slave[%s]> starting %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
@ -188,7 +186,7 @@ def vmstart(cubeid):
def vmshutdown(cubeid): def vmshutdown(cubeid):
""" acpi shutdown the machine.. """ """ acpi shutdown the machine.. """
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name) proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name'] #slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> acpi shutdown %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host)) ioconfig.logger.info('slave[%s]> acpi shutdown %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
@ -204,7 +202,7 @@ def vmshutdown(cubeid):
def vmstop(cubeid): def vmstop(cubeid):
""" poweroff the machine.. """ """ poweroff the machine.. """
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name) proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name'] #slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> power off %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host)) ioconfig.logger.info('slave[%s]> power off %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
@ -220,7 +218,7 @@ def vmstop(cubeid):
def vmsuspend(cubeid): def vmsuspend(cubeid):
""" suspend machine """ """ suspend machine """
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name) proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name'] #slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> suspending %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host)) ioconfig.logger.info('slave[%s]> suspending %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
@ -235,7 +233,7 @@ def vmsuspend(cubeid):
def vmresume(cubeid): def vmresume(cubeid):
""" resume machine """ """ resume machine """
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name) proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name'] #slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> resuming %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host)) ioconfig.logger.info('slave[%s]> resuming %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
@ -250,7 +248,7 @@ def vmresume(cubeid):
def vmrrd(cubeid): def vmrrd(cubeid):
""" retrieve rrd graphs (PNG) """ """ retrieve rrd graphs (PNG) """
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name) proxobject = auth(slave_name)
proxobject.cluster.status.get()[0]['name'] proxobject.cluster.status.get()[0]['name']
@ -279,7 +277,7 @@ def vmrrd(cubeid):
def vmvnc(cubeid): def vmvnc(cubeid):
""" invoke vnc ticket """ """ invoke vnc ticket """
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid) slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name) proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name'] #slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> invoking vnc ticket for %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host)) ioconfig.logger.info('slave[%s]> invoking vnc ticket for %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
@ -338,5 +336,5 @@ def vmvnc(cubeid):
if __name__ == '__main__': if __name__ == '__main__':
#internal module tests #internal module tests
time.sleep(1) time.sleep(1)
vmvnc(656758) #vmvnc(656758)