refactoring

This commit is contained in:
deflax 2016-11-03 06:05:51 +02:00
parent 16b6fe5073
commit 18b2ee383c
7 changed files with 314 additions and 534 deletions

View file

@ -1,151 +0,0 @@
# -*- coding: utf-8
#
# manage clientsdb.json
#import site packages
import json
#import local packages
import ioconfig
import utils
def addclient(vmid, vmname, clientid, clientname, clientemail, vmpass):
""" add new client with the requested vm to the clientsdb.json """
clientsdb = readclientsdb()
if str(clientid) in clientsdb:
ioconfig.logger.info('client[{}]> already exist. merging.'.format(clientid))
else:
ioconfig.logger.info('client[{}]> does not exist. creating...'.format(clientid))
#generate password and send it to the client
#newpass = utils.genpassword(30)
#ioconfig.logger.info('client[{}]> initial password is: {}'.format(clientid, newpass))
#salt = bcrypt.gensalt()
#b_newpass = newpass.encode('ascii')
#encpasswd = bcrypt.hashpw(b_newpass, salt).decode('ascii')
#vcard = { 'name':str(clientname), 'email':str(clientemail), 'encpasswd':str(encpasswd), 'id':str(clientid) }
vcard = { 'name':str(clientname), 'email':str(clientemail), 'id':str(clientid) }
newclient = { str(clientid):vcard }
clientsdb.update(newclient)
#Send initial email to the user as we will use the internal auth from now on.
#utils.sendmail(clientemail, '{} logged in.'.format)
ioconfig.logger.info('client[{}]> vmid {} is now owned by {} ({})'.format(clientid, vmid, clientemail, clientname))
#create initial vm template
vmdata = { 'hostname':str(vmname), 'vmid':str(vmid), 'ownerid':str(clientid) }
clientsdb[str(clientid)][str(vmid)] = vmdata
writeclientsdb(clientsdb)
def inventory(clientid):
""" returns a list of owned vmids if client id matches the client database. (logged-in users)"""
try:
clientsdb = readclientsdb()
user = clientsdb[str(clientid)]
email = user['email']
response = {}
for vmid,data in user.items():
response[vmid] = data
response.pop('id')
response.pop('email')
response.pop('name')
ioconfig.logger.info('client[{}]> {} inventory sent.'.format(clientid, email))
return response
except:
ioconfig.logger.error('clients> user id: {} could not be listed.'.format(clientid))
return None
def vmowner(vmid, vmname, verbose):
""" find the owner of the vm """
clientsdb = readclientsdb()
try:
clientid = utils.find_rec(clientsdb, str(vmid))[0]['ownerid']
clientname = clientsdb[str(clientid)]['name']
except:
raise
clientid = '0' #unknown owner
clientname = 'unknown'
if verbose:
ioconfig.logger.info('client[{}]> {} is the owner of {} ({})'.format(str(clientid), clientname, str(vmid), vmname))
return clientid
def readclientsdb():
""" read client db """
try:
with open('clients.json') as dbr:
clientsdb = json.load(dbr)
dbr.close()
except:
clientsdb = {}
ioconfig.logger.warning('clients> initializing...')
#writeclientsdb(clientsdb)
return clientsdb
def writeclientsdb(clientsdb):
""" write db """
with open('clients.json', 'w') as dbw:
json.dump(clientsdb, dbw)
dbw.close()
if __name__ == '__main__':
inventory(1)
print('---')
inventory(2)
#def validate(clientemail, password):
# """ returns a list of owned vmids if credentials match an user from the database. (fresh logins)"""
# #1. search for the client
# try:
# clientsdb = readclientsdb()
# path = utils.get_path(clientsdb, clientemail)
# c_id = str(path[0])
# except:
# ioconfig.logger.error('clients> {} was not found in the database!'.format(clientemail))
# #log bad ips here...
# return None
# #2. check the password
# encpass = clientsdb[c_id]['encpasswd']
# b_srvpass = password.encode('ascii', 'ignore')
# b_encpass = encpass.encode('ascii', 'ignore')
# if (hmac.compare_digest(bcrypt.hashpw(b_srvpass, b_encpass), b_encpass)):
# #login successful
# ioconfig.logger.info('client[{}]> {} logged in successfully'.format(c_id, clientemail))
# #3. generate vmlist to return the owned ids to the client.
# return clientvms(clientsdb[c_id])
# else:
# ioconfig.logger.warning('client[{}]> {} access denied!'.format(c_id, clientemail))
# #cant compare password
# return None
#
#
#def setencpasswd(clientemail, newpass):
# """ setup a new management password """
# salt = bcrypt.gensalt()
# b_newpass = newpass.encode('ascii')
# encpasswd = bcrypt.hashpw(b_newpass, salt).decode('ascii')
# try:
# clientsdb = readclientsdb()
# path = utils.get_path(clientsdb, clientemail)
# c_id = str(path[0])
# #check the returned path with forward query
# query = clientsdb[c_id]['email']
# #ioconfig.logger.info('client[{}]> path={}'.format(c_id, str(path)))
# except:
# ioconfig.logger.critical('clients> client {} not found'.format(clientemail))
# raise
#
# if query != clientemail:
# ioconfig.logger.critical('clients> test query returns different vmname! check clients db for consistency!')
# raise
# else:
# clientsdb[c_id]['encpasswd'] = encpasswd
# ioconfig.logger.info('client[{}]> {} password changed!'.format(c_id, clientemail))
# writeclientsdb(clientsdb)

View file

@ -5,24 +5,25 @@ workers = 3
[general]
logfile = log/proxmaster.log
adminuser = masteradmin@pve
apipass = CHANGEME
apipass = sakdlsadas
vmid_min = 1000
vmid_max = 999999
novnc_url = http://FQDN/noVNC
novnc_url = http://#/novnc/vnc_auto.html
[region_0]
name = CHANGEME
ipv4_min = 192.168.0.4
ipv4_max = 192.168.0.254
[slave_0]
name = CHANGEME0.fqdn.com
masterip = 192.168.0.2
password = CHANGEME
[Region]
regionid = 0
fullname = Region#1
ipv4_min = 87.120.110.40
ipv4_max = 87.120.110.240
[slave_1]
name = CHANGEME1.fqdn.com
masterip = 192.168.0.3
password = CHANGEME
regionid = 0
[fighter]
type = proxmox
ipv4 = 2.3.4.5
password = dslfsdfds
regionid = Region
[warrior]
type = proxmox
ipv4 = 1.2.3.4
password = sadlaksda
regionid = Region

16
db/README Normal file
View file

@ -0,0 +1,16 @@
this folder contains the cubes metadata:
format:
{ 'cube': 3798972137423410,
'type: 'kvm',
'host: hostname,
'region': 'izba',
'slave': 'warrior',
'vmid': 4500,
'cpu_mem_hdd': (vps_cpu, vps_mem, vps_hdd),
'clientid', 12,
'clientname': 'Master',
'clientemail':'info@domain.com',
'os': 'ubuntu',
}

447
grid.py
View file

@ -14,12 +14,194 @@ import netaddr
import utils
import plugin
import ioconfig
import clientsdb
logger = ioconfig.logger
config = ioconfig.parser
def query_vm(req_cube_id):
""" returns slave_name, vm_id and vm_type for the requested cubeid """
data = querydb(req_cube_id)
return data['slave'], data['type'], data['vmid']
def querydb(cubeid):
""" open a metadata file """
try:
dbfile = 'db/vm.{}.json'.format(src_data['cube'])
with open(dbfile as datafile):
data = json.load(datafile)
datafile.close()
logger.info('grid> {} --> {}'.format(dbfile, data))
except Exception as e:
logger.critical('grid> {}'.format(e))
pass
return None
def writedb(src_data):
""" create new metadata file """
try:
dbfile = 'db/vm.{}.json'.format(src_data['cube'])
with open(dbfile, 'w' as dbf:
json.dump(src_data, dbf)
dbf.close()
#TODO: send mail
logger.info('grid> {} <-- {}'.format(dbfile, src_data))
except Exception as e:
logger.critical('grid> {}'.format(e))
pass
return None
def query_happiness(region_id):
""" analyzes grid data for the reuqested region and returns proposed slave_id,
based on a "happiness" factor. happiness means alive and free :) """
grid_data = readcache()
grid_data = grid_data[str(region_id)]
all_slaves = []
for element in grid_data:
try:
if str(element) == grid_data[element]['id']:
all_slaves.append(element)
except:
continue
all_slaves = [ int(x) for x in all_slaves ] #convert values from str to int
alive_slaves = []
for slaveid in all_slaves:
if str(grid_data[str(slaveid)]['alive']) == 'up':
alive_slaves.append(slaveid)
logger.info('region[{}]> alive slaves {}'.format(str(region_id), str(alive_slaves)))
#happy_slave = random.choice(alive_slaves)
if len(alive_slaves) < 1:
logger.error('region[{}]> grid is full. add more slaves'.format(str(region_id)))
else:
happy_slave = 0 #TODO: analyze slaves and make informed decision.
logger.info('region[{}]> {} selected'.format(str(region_id), str(happy_slave)))
return happy_slave
def generate_ipv4(region_name, how_many=1):
""" this function should check the range, exclude deployed machines and return a list of available ips """
ip_range_min = ioconfig.parser.get(region_name), 'ipv4_min')
ip_range_max = ioconfig.parser.get(region_name), 'ipv4_max')
region_ipset = netaddr.IPSet(netaddr.IPRange(ip_range_min, ip_range_max))
region_ips = []
for ip in region_ipset:
region_ips.append(ip)
ip_min = 0
ip_max = len(region_ips) - 1
tested_ips = [] #initialize ip cache
requested_ips = []
#all_ips = utils.find_rec(grid_data, 'ipaddr') #TODO: we cant list that for KVM so we should use another approach. perhaps as separate macaddress - ipaddress table which we will use to manipulate a static lease dhcp server. at this point this function is useless because plugins do not or cant track the address of the actual machine. proxmaster should divide each region to segments and each segment should export a static lease config which will be quieried when we search for unused addresses.
all_ips = [] #TODO: replace with db searching function
for ips in range(int(how_many)):
counter = 0
while True:
if counter == 50:
logger.error('region[{}]> ip range full'.format(str(region_id)))
return None
else:
counter += 1
requested_ip_index = random.randint(ip_min, ip_max)
requested_ip = str(region_ips[requested_ip_index])
if requested_ip in tested_ips:
logger.warning('region[{}]> ip addres {} already tested. cache: {}'.format(str(region_id), str(requested_ip), str(tested_ips)))
continue
if requested_ip in requested_ips:
logger.warning('region[{}]> ip address {} already generated.'.format(str(region_id), str(requested_ip)))
tested_ips.append(requested_ip)
continue
if requested_ip in all_ips:
position = used_ips.index(requested_ip)
logger.warning('region[{}]> ip address {} already exist. location: {}'.format(str(region_id), str(position)))
tested_ips.append(requested_ip)
continue
else:
tested_ips = [] #clear ip cache
break
logger.info('region[{}]> ip address {} selected.'.format(str(region_id), str(requested_ip)))
requested_ips.append(requested_ip)
logger.info('region[{}]> ip addresses {} selected.'.format(str(region_id), str(requested_ips)))
return requested_ips
def generate_vmid():
""" analyzes cached grid data and return proposed vmid for new machines """
grid_data = readcache()
tested_vmids = [] #initialize id cache
id_min = grid_data['vmid_min']
id_max = grid_data['vmid_max']
#all_vmid = utils.find_rec(grid_data, 'vmid') #get all vmid values from the nested grid
all_vmid = [] #TODO: see generate_ipv4 remark
all_vmid = [ int(x) for x in all_vmid ] #convert values from str to int (its a vmid right?)
counter = 0
while True:
if counter == 50:
logger.error('grid> ip range full')
return None
else:
counter += 1
requested_vmid = random.randint(int(id_min), int(id_max)) #max 90k machines
if requested_vmid in tested_vmids:
logger.warning('grid> vmid ' + str(requested_vmid) + ' already tested. cache:' + str(tested_vmids))
continue
if requested_vmid in all_vmid:
position = all_vmid.index(requested_vmid)
logger.warning('grid> vmid ' + str(requested_vmid) + ' already exist. location:' + str(position))
tested_vmids.append(requested_vmid)
else:
tested_vmids = [] #clear tested vmid cache
break
logger.info('grid> vmid ' + str(requested_vmid) + ' selected')
return requested_vmid
def findDiff(d1, d2, path=""):
for k in d1.keys():
if not k in d2.keys():
logger.warning('cache> ' + str(k) + ' as key not in d2')
else:
if type(d1[k]) is dict:
if path == "":
path = k
else:
path = path + "->" + k
findDiff(d1[k],d2[k], path)
else:
if d1[k] != d2[k]:
logger.warning('cache> ' + str(k) + ' ' + str(d1[k]) + ' [-]')
logger.warning('cache> ' + str(k) + ' ' + str(d2[k]) + ' [+]')
def readreal():
""" read the current state and return its contents """
try:
with open('grid-real.json') as gridfile:
grid_data = json.load(gridfile)
gridfile.close()
resulttime = grid_data['synctime']
logger.info('grid> sync for ' + resulttime)
except:
grid_data = {}
logger.error('cache> cannot read temp file')
return grid_data
def sync(cached=True):
""" calls slave objects and mix their nodes in a common cluster grid """
a = datetime.datetime.now()
@ -92,7 +274,7 @@ def sync(cached=True):
if cache_slave['alive'] == 'up':
#slave was not down so it must be up...
cache_slave = update_cache(real_slave, cache_file, prefix, 'up')
cache_slave = updatedb(real_slave, cache_file, prefix, 'up')
logger.info(prefix + 'sync success o/')
else:
#if the slave was down before, compare the state before overwriting the cache
@ -102,10 +284,10 @@ def sync(cached=True):
findDiff(cache_slave, real_slave)
if cache_slave != real_slave:
logger.warning(prefix + 'cache != current status. please restore host!')
cache_slave = update_cache(cache_slave, cache_file, prefix, 'down')
cache_slave = updatedb(cache_slave, cache_file, prefix, 'down')
else:
logger.info(prefix + 'cache == current status. host restored. o/')
cache_slave = update_cache(cache_slave, cache_file, prefix, 'up')
cache_slave = updatedb(cache_slave, cache_file, prefix, 'up')
#what to do with cache if host is down
if real_slave['alive'] == 'down':
@ -115,7 +297,7 @@ def sync(cached=True):
cache_slave = json.load(fscr)
fscr.close()
logger.warning(prefix + '...done')
cache_slave = update_cache(cache_slave, cache_file, prefix, 'down')
cache_slave = updatedb(cache_slave, cache_file, prefix, 'down')
except:
logger.error(prefix + 'sync failure!')
cache_slave = real_slave.copy()
@ -140,263 +322,10 @@ def sync(cached=True):
return real_grid
def update_cache(cachedata, cachefile, prefix, newstatus):
""" update administravite status """
cachedata['alive'] = newstatus
WriteCache(cachedata, cachefile)
#TODO send mail
logger.info(prefix + 'administratively ' + newstatus)
return cachedata
def WriteCache(src_data, cache_file):
with open(cache_file, 'w') as fcw:
json.dump(src_data, fcw)
fcw.close()
def query_region(region_name):
""" translate region name to region id """
grid_data = readcache()
all_regions = []
for element in grid_data:
try:
if str(element) == grid_data[element]['id']:
all_regions.append(element)
except:
continue
for region in all_regions:
if grid_data[region]['region'] == region_name:
logger.info('region[{}]> region id={}'.format(region_name, region))
return grid_data[region]['id']
break
logger.error('grid> cant find region ' + region_name)
return "-1"
def query_happiness(region_id):
""" analyzes grid data for the reuqested region and returns proposed slave_id,
based on a "happiness" factor. happiness means alive and free :) """
grid_data = readcache()
grid_data = grid_data[str(region_id)]
all_slaves = []
for element in grid_data:
try:
if str(element) == grid_data[element]['id']:
all_slaves.append(element)
except:
continue
all_slaves = [ int(x) for x in all_slaves ] #convert values from str to int
alive_slaves = []
for slaveid in all_slaves:
if str(grid_data[str(slaveid)]['alive']) == 'up':
alive_slaves.append(slaveid)
logger.info('region[{}]> alive slaves {}'.format(str(region_id), str(alive_slaves)))
#happy_slave = random.choice(alive_slaves)
if len(alive_slaves) < 1:
logger.error('region[{}]> grid is full. add more slaves'.format(str(region_id)))
else:
happy_slave = 1 #TODO: analyze slaves and make informed decision.
logger.info('region[{}]> {} selected'.format(str(region_id), str(happy_slave)))
return happy_slave
def generate_ipv4(region_id, how_many=1):
""" analyzes cached grid data and returns ip addresses for new machines. """
grid_data = readcache()
ip_range_min = grid_data[str(region_id)]['ipv4_min']
ip_range_max = grid_data[str(region_id)]['ipv4_max']
region_ipset = netaddr.IPSet(netaddr.IPRange(ip_range_min, ip_range_max))
region_ips = []
for ip in region_ipset:
region_ips.append(ip)
ip_min = 0
ip_max = len(region_ips) - 1
tested_ips = [] #initialize ip cache
requested_ips = []
all_ips = utils.find_rec(grid_data, 'ipaddr') #TODO: we cant list that for KVM so we should use another approach. perhaps as separate macaddress - ipaddress table which we will use to manipulate a static lease dhcp server. at this point this function is useless because plugins do not or cant track the address of the actual machine. proxmaster should divide each region to segments and each segment should export a static lease config which will be quieried when we search for unused addresses.
for ips in range(int(how_many)):
counter = 0
while True:
if counter == 50:
logger.error('region[{}]> ip range full'.format(str(region_id)))
return None
else:
counter += 1
requested_ip_index = random.randint(ip_min, ip_max)
requested_ip = str(region_ips[requested_ip_index])
if requested_ip in tested_ips:
logger.warning('region[{}]> ip addres {} already tested. cache: {}'.format(str(region_id), str(requested_ip), str(tested_ips)))
continue
if requested_ip in requested_ips:
logger.warning('region[{}]> ip address {} already generated.'.format(str(region_id), str(requested_ip)))
tested_ips.append(requested_ip)
continue
if requested_ip in all_ips:
position = used_ips.index(requested_ip)
logger.warning('region[{}]> ip address {} already exist. location: {}'.format(str(region_id), str(position)))
tested_ips.append(requested_ip)
continue
else:
tested_ips = [] #clear ip cache
break
logger.info('region[{}]> ip address {} selected.'.format(str(region_id), str(requested_ip)))
requested_ips.append(requested_ip)
logger.info('region[{}]> ip addresses {} selected.'.format(str(region_id), str(requested_ips)))
return requested_ips
def generate_vmid():
""" analyzes cached grid data and return proposed vmid for new machines """
grid_data = readcache()
tested_vmids = [] #initialize id cache
id_min = grid_data['vmid_min']
id_max = grid_data['vmid_max']
all_vmid = utils.find_rec(grid_data, 'vmid') #get all vmid values from the nested grid
all_vmid = [ int(x) for x in all_vmid ] #convert values from str to int (its a vmid right?)
counter = 0
while True:
if counter == 50:
logger.error('grid> ip range full')
return None
else:
counter += 1
requested_vmid = random.randint(int(id_min), int(id_max)) #max 90k machines
if requested_vmid in tested_vmids:
logger.warning('grid> vmid ' + str(requested_vmid) + ' already tested. cache:' + str(tested_vmids))
continue
if requested_vmid in all_vmid:
position = all_vmid.index(requested_vmid)
logger.warning('grid> vmid ' + str(requested_vmid) + ' already exist. location:' + str(position))
tested_vmids.append(requested_vmid)
else:
tested_vmids = [] #clear tested vmid cache
break
logger.info('grid> vmid ' + str(requested_vmid) + ' selected')
return requested_vmid
def query_slave_data(slave_id):
""" read the cache for the requested slave_id """
grid_data = readcache()
all_regions = []
for element in grid_data:
try:
if str(element) == grid_data[element]['id']:
all_regions.append(element)
except:
continue
try:
for region in all_regions:
cslave = grid_data[region]
result_slave = cslave[str(slave_id)] #select the slave from all regions
if result_slave != None:
return result_slave
break
except:
raise
return {}
def query_vm(req_vmid):
""" returns slave_id and vm_type for the requested vmid """
#read current state (no cache)
sync(False)
grid_data = readreal()
#search for the requested vmid
#TODO: maybe we should also check if the owner has permissions to manage it,
###### if for some reason the admin panel is compromised.
all_vmid = utils.find_rec(grid_data, 'vmid')
target = int(0)
for running_vmid in all_vmid:
if str(req_vmid) == str(running_vmid):
target = running_vmid
break
else:
continue
if target == 0:
logger.error('grid> vmid {} cannot be found!'.format(req_vmid))
return int(-1), "None"
path = utils.get_path(grid_data, target)
region_id = path[0]
slave_id = path[1]
try:
vm_type = grid_data[str(region_id)][str(slave_id)][str(target)]['type']
except:
logger.error('vm[{}]> type is unknown!'.format(vm_id))
raise
#logger.info('vm[{}]> type={} path={}'.format(target, vm_type, str(path)))
return slave_id, vm_type
def findDiff(d1, d2, path=""):
for k in d1.keys():
if not k in d2.keys():
logger.warning('cache> ' + str(k) + ' as key not in d2')
else:
if type(d1[k]) is dict:
if path == "":
path = k
else:
path = path + "->" + k
findDiff(d1[k],d2[k], path)
else:
if d1[k] != d2[k]:
logger.warning('cache> ' + str(k) + ' ' + str(d1[k]) + ' [-]')
logger.warning('cache> ' + str(k) + ' ' + str(d2[k]) + ' [+]')
def readcache():
""" read the last saved cache and return its contents """
try:
with open('grid-cache.json') as gridfile:
grid_data = json.load(gridfile)
gridfile.close()
except:
grid_data = {}
logger.error('cache> cannot read cache file')
return grid_data
def readreal():
""" read the current state and return its contents """
try:
with open('grid-real.json') as gridfile:
grid_data = json.load(gridfile)
gridfile.close()
resulttime = grid_data['synctime']
logger.info('grid> sync for ' + resulttime)
except:
grid_data = {}
logger.error('cache> cannot read temp file')
return grid_data
if __name__ == '__main__':
#print(sync())
#print(query_region('Plovdiv, Bulgaria'))
print(sync())
#print(query_happiness(0))
#print(generate_ipv4(0,3))
#print(generate_vmid())
#print(query_slave_data(0))
print(query_vm(147344))

View file

@ -1,12 +0,0 @@
#!/bin/bash
#makes jsons human (and machine) readable
echo "CACHE:"
cat grid-cache.json | python3 -m json.tool
echo " "
echo "GRID:"
cat grid-real.json | python3 -m json.tool
echo " "
echo "CLIENTS:"
cat clients.json | python3 -m json.tool

154
plugin.py
View file

@ -13,25 +13,25 @@ from unidecode import unidecode
#local
import grid
import clientsdb
import utils
import ioconfig
import novnc
def auth(slave_id, masterip=None, enc_password=None):
""" captures slave we want to auth from the cache and extract the credentials """
def auth(slave_name):
""" return control object from config slave names """
adminuser = ioconfig.parser.get('general', 'adminuser')
if masterip is None:
result_slave = grid.query_slave_data(slave_id)
masterip = result_slave['masterip']
enc_password = result_slave['password']
adminpassword = base64.b64decode(enc_password).decode('ascii')
slaveip = ioconfig.parser.get(str(slave_name), 'ipv4')
slavepass = ioconfig.parser.get(str(slave_name), 'password')
slavetype = ioconfig.parser.get(str(slave_name), 'type')
#vendor specific
#connection = lib_proxmoxia.Connector(masterip)
#auth_token = connection.get_auth_token(adminuser, adminpassword)
#proxobject = lib_proxmoxia.Proxmox(connection)
proxobject = ProxmoxAPI(masterip, user=adminuser, password=adminpassword, verify_ssl=False)
if slavetype == 'proxmoxia':
connection = lib_proxmoxia.Connector(slaveip)
auth_token = connection.get_auth_token(adminuser, slavepass)
proxobject = lib_proxmoxia.Proxmox(connection)
if slavetype == 'proxmox':
proxobject = ProxmoxAPI(slaveip, user=adminuser, password=slavepass, verify_ssl=False)
return proxobject
@ -39,11 +39,13 @@ def auth(slave_id, masterip=None, enc_password=None):
def vmlist(proxobject):
""" get vmlist """
#we keep a single node proxmoxes so node id = 0
#slave_name = proxobject.get('cluster/status')#'name']
slave_name = proxobject.cluster.status.get()[0]['name']
#query_kvm = proxobject.get('nodes/%s/qemu' % slave_name)
query_kvm = proxobject.nodes(slave_name).qemu.get()
query_lxc = proxobject.nodes(slave_name).lxc.get()
for kvm_dict in query_kvm:
kvm_dict['vmtype'] = 'kvm'
for lxc_dict in query_lxc:
@ -53,41 +55,54 @@ def vmlist(proxobject):
def vmcreate(req):
""" create vm. returns JSON with data for whmcs """
grid.sync()
region_id = grid.query_region(req['region'])
if region_id == "-1":
""" create vm. returns JSON with data """
try:
region_id = ioconfig.parser.get(str(req['region']), 'regionid')
region_fullname = ioconfig.parser.get(str(req['region']), 'fullname')
except:
ioconfig.logger.error('grid> no region found')
response = 'NO REGION FOUND'
return response
slave_id = str(grid.query_happiness(region_id))
vm_id = str(grid.generate_vmid())
vm_ipv4 = grid.generate_ipv4(region_id, req['vps_ipv4'])
return None
vm_name_utf8 = req['hostname']
vm_name = unidecode(vm_name_utf8)
try:
vm_pass = req['vmpass']
client_id = req['clientid']
client_name = req['clientname']
except:
vm_pass = 'kvm-no-pass'
proxobject = auth(slave_id) #we dont know the ip of slave_id so we leave the auth function to find it itself.
#generators
slave_name = 'lexx' #staic route
#slave_name = str(grid.query_happiness(region_id, weight)) #TODO: provide weight parameters here and calculate route
vmid = 4500
#vmid = str(grid.generate_vmid()) #TODO: this should be between 100 and 65000
cube_id = time.time() #TODO: make sure this is unique. time since epoch is not random enough but should do the trick for now
ipv4_list = grid.generate_ipv4(req['region'], req['vps_ipv4'])
#metadata
deploy = { 'cube': int(cube_id),
'type': req['type'],
'host': vm_name,
'region': region_fullname,
'slave': slave_name,
'vmid': vmid,
'cpu_mem_hdd': (req['vps_cpu'], req['vps_mem'], req['vps_hdd']),
'clientid': req['clientid'],
'clientname': req['clientname'],
'clientemail': req['clientemail'],
'os': req['vps_os'],
'ipv4': ipv4_list }
proxobject = auth(slave_name)
slave_name = proxobject.cluster.status.get()[0]['name']
ipv4_dict = {}
ipidx = 0
ioconfig.logger.info('slave[' + slave_name + ']> recieved data: %s, %s, %s, %s, %s', region_id, slave_id, vm_id, vm_ipv4, req)
ioconfig.logger.info('slave[' + slave_name + ']> deploying %s on %s at %s with %s and %s', vm_id, slave_id , region_id, vm_ipv4, req)
for ip in vm_ipv4:
ipv4_dict[str(ipidx)] = str(ip)
ipidx += 1
response = { 'status':'CREATE', 'vmid':vm_id, 'name':vm_name, 'password':vm_pass, 'ipv4_0':vm_ipv4[0] }
description = vm_name + ' (' + vm_id + ')\n'
description += 'owned by ' + client_name + ' (' + client_id + ')\n'
description += 'master ip: ' + vm_ipv4[0]
description = vm_name + ' (' + vm_id + ')\n' + 'owned by ' + client_name + ' (' + client_id + ')\n' + 'master ip: ' + vm_ipv4[0]
#create partition
image_name = 'vm-' + vm_id + '-disk-0'
@ -107,6 +122,7 @@ def vmcreate(req):
net0='e1000,bridge=pub',
onboot=1,
description=description)
if req['vps_type'] == 'LXC':
create_result = proxobject.nodes(slave_name).lxc.post(vmid=vm_id,
hostname=vm_name,
@ -119,11 +135,8 @@ def vmcreate(req):
onboot=1,
description=description)
#populate the client db
client_id = req['clientid']
client_name = req['clientname']
client_email = req['clientemail']
clientsdb.addclient(vm_id, vm_name, client_id, client_name, client_email, vm_pass)
print('result:')
print(create_result)
#start the machihe
time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run
@ -131,12 +144,12 @@ def vmcreate(req):
return response
def vmstatus(vm_id):
def vmstatus(cubeid):
""" returns the status of the machine """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
proxobject = auth(slave_name)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> get status of %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.current.get()
@ -145,14 +158,13 @@ def vmstatus(vm_id):
return result
def vmstart(vm_id):
def vmstart(cubeid):
""" starts a machine """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
proxobject = auth(slave_name)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> starting %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.start.post()
if vm_type == 'lxc':
@ -161,12 +173,12 @@ def vmstart(vm_id):
return response
def vmshutdown(vm_id):
def vmshutdown(cubeid):
""" acpi shutdown the machine.. """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
proxobject = auth(slave_name)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> acpi shutdown %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
@ -178,12 +190,12 @@ def vmshutdown(vm_id):
return response
def vmstop(vm_id):
def vmstop(cubeid):
""" poweroff the machine.. """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
proxobject = auth(slave_name)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> power off %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
@ -195,12 +207,12 @@ def vmstop(vm_id):
return response
def vmsuspend(vm_id):
def vmsuspend(cubeid):
""" suspend machine """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
proxobject = auth(slave_name)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> suspending %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
@ -211,12 +223,12 @@ def vmsuspend(vm_id):
return response
def vmresume(vm_id):
def vmresume(cubeid):
""" resume machine """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
proxobject = auth(slave_name)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> resuming %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
@ -227,12 +239,12 @@ def vmresume(vm_id):
return response
def vmrrd(vm_id):
def vmrrd(cubeid):
""" retrieve rrd graphs (PNG) """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
proxobject = auth(slave_name)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> query rrd of %s %s' % (slave_name, vm_type, vm_id))
result = {}
@ -253,10 +265,10 @@ def vmrrd(vm_id):
def vmvnc(vm_id):
""" invoke vnc ticket """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
proxobject = auth(slave_name)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> invoking vnc ticket for %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
@ -268,7 +280,7 @@ def vmvnc(vm_id):
#socket = proxobject.nodes(slave_name).lxc(vm_id).vncwebsocket.get(port=ticket['port'],
# vncticket=ticket['ticket'])
slaveip = grid.query_slave_data(slave_id)['masterip']
slaveip = ioconfig.parser.get(str(slave_name), 'ipv4')
#slaveport = socket['port']
slaveport = ticket['port']
listenport = str(int(slaveport) + 1000 + (int(slave_id) * 100)) #TODO: max 100 parallel connections/slave.

View file

@ -13,7 +13,6 @@ import json
import ioconfig
import grid
import plugin
import clientsdb
config = ioconfig.parser
logger = ioconfig.logger
@ -36,11 +35,7 @@ def selector(fn, req, vmid=0):
return status, body
try:
if fn == 'inventory':
clientid = json['clientid']
body = clientsdb.inventory(clientid)
elif fn == 'vmcreate':
if fn == 'vmcreate':
body = plugin.vmcreate(json)
elif fn == 'vmstatus':
@ -141,13 +136,6 @@ def max_body(limit):
return hook
class InventoryResource(object):
@falcon.before(max_body(64 * 1024))
def on_post(self, req, resp):
""" get client id, compare it with the client db and returns a list of managed objects """
resp.status, response = selector('inventory', req)
req.context['result'] = response
class CreateResource(object):
@falcon.before(max_body(64 * 1024))
def on_post(self, req, resp):
@ -237,9 +225,6 @@ wsgi_app = api = application = falcon.API(middleware=[
])
# setup routes
res_inventory = InventoryResource()
api.add_route('/inventory', res_inventory)
res_create = CreateResource()
api.add_route('/vmcreate', res_create)