api data redesign
This commit is contained in:
parent
5ae9ecc575
commit
add7529a2e
3 changed files with 256 additions and 317 deletions
63
grid.py
63
grid.py
|
@ -19,51 +19,53 @@ import ioconfig
|
||||||
logger = ioconfig.logger
|
logger = ioconfig.logger
|
||||||
config = ioconfig.parser
|
config = ioconfig.parser
|
||||||
|
|
||||||
|
def query(json):
|
||||||
|
if json['type'] == 'deploy' or json['type'] == 'router':
|
||||||
|
data = read(json)
|
||||||
|
return data['slave'], data['type'], data['phyid'], data['hostname'], data['clientemail']
|
||||||
|
|
||||||
def queryvm(req_cube_id):
|
if json['type'] == 'bridge':
|
||||||
""" returns slave_name, vm_id and vm_type for the requested cubeid """
|
data = read(json)
|
||||||
data = querydb(req_cube_id)
|
return data['slave'], data['type'], data['phyid'], data['clientemail']
|
||||||
#print(data)
|
|
||||||
return data['slave'], data['type'], data['vmid'], data['hostname'], data['clientemail']
|
|
||||||
|
|
||||||
|
def create(data):
|
||||||
|
write(data)
|
||||||
|
|
||||||
def querydb(cubeid):
|
def read(data):
|
||||||
""" open a metadata file """
|
""" open a metadata file """
|
||||||
try:
|
try:
|
||||||
dbfile = 'db/vm.{}.json'.format(cubeid)
|
dbfile = 'db/{}.{}.json'.format(data['type'], data['unit_id'])
|
||||||
dbf = open(dbfile, 'r')
|
dbf = open(dbfile, 'r')
|
||||||
data = json.load(dbf)
|
data = json.load(dbf)
|
||||||
dbf.close()
|
dbf.close()
|
||||||
#logger.info('{}> --> {}'.format(dbfile, data))
|
logger.info('{}> --> {}'.format(dbfile, data))
|
||||||
return data
|
return data
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.critical('{}> '.format(e))
|
logger.critical('{}> '.format(e))
|
||||||
pass
|
pass
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def write(data):
|
||||||
def writedb(src_data):
|
|
||||||
""" create new metadata file """
|
""" create new metadata file """
|
||||||
try:
|
try:
|
||||||
dbfile = 'db/vm.{}.json'.format(src_data['cube'])
|
dbfile = 'db/{}.{}.json'.format(data['type'], data['unit_id'])
|
||||||
dbf = open(dbfile, 'w')
|
dbf = open(dbfile, 'w')
|
||||||
json.dump(src_data, dbf)
|
json.dump(data, dbf)
|
||||||
dbf.close()
|
dbf.close()
|
||||||
#TODO: send mail
|
logger.info('grid> {} --> {}'.format(data, dbfile))
|
||||||
logger.info('grid> {} --> {}'.format(src_data, dbfile))
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.critical('grid> {}'.format(e))
|
logger.critical('grid> {}'.format(e))
|
||||||
pass
|
pass
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def deletedb(cubeid):
|
def delete(unit_type, unit_id):
|
||||||
""" remove metadata file """
|
""" remove metadata file """
|
||||||
dbfile = 'db/vm.{}.json'.format(cubeid)
|
dbfile = 'db/{}.{}.json'.format(unit_type, unit_id)
|
||||||
|
#TODO: perhaps just move the datafile to an archive directory
|
||||||
os.remove(dbfile)
|
os.remove(dbfile)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def analyze_happiness(region_id):
|
||||||
def query_happiness(region_id):
|
|
||||||
""" analyzes grid data for the reuqested region and returns proposed slave_id,
|
""" analyzes grid data for the reuqested region and returns proposed slave_id,
|
||||||
based on a "happiness" factor. happiness means alive and free :) """
|
based on a "happiness" factor. happiness means alive and free :) """
|
||||||
grid_data = readcache()
|
grid_data = readcache()
|
||||||
|
@ -93,6 +95,7 @@ def query_happiness(region_id):
|
||||||
return happy_slave
|
return happy_slave
|
||||||
|
|
||||||
|
|
||||||
|
### DEPRECATED
|
||||||
def generate_vmid():
|
def generate_vmid():
|
||||||
""" analyzes cached grid data and return proposed vmid for new machines """
|
""" analyzes cached grid data and return proposed vmid for new machines """
|
||||||
grid_data = readcache()
|
grid_data = readcache()
|
||||||
|
@ -144,29 +147,6 @@ def findDiff(d1, d2, path=""):
|
||||||
logger.warning('cache> ' + str(k) + ' ' + str(d1[k]) + ' [-]')
|
logger.warning('cache> ' + str(k) + ' ' + str(d1[k]) + ' [-]')
|
||||||
logger.warning('cache> ' + str(k) + ' ' + str(d2[k]) + ' [+]')
|
logger.warning('cache> ' + str(k) + ' ' + str(d2[k]) + ' [+]')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### DEPRECATED
|
|
||||||
def genmac(int_value):
|
|
||||||
""" convert kinda long enough int to MAC string """
|
|
||||||
prefix_sum = sum(int(digit) for digit in str(int_value))
|
|
||||||
if (prefix_sum > 255):
|
|
||||||
prefix_hex = 'ff'
|
|
||||||
else:
|
|
||||||
prefix_hex = format(prefix_sum, 'x')
|
|
||||||
|
|
||||||
suffix = int(str(int_value)[-12:])
|
|
||||||
suffix_hex = format(suffix, 'x')
|
|
||||||
length = len(suffix_hex)
|
|
||||||
suffix_hex = suffix_hex.zfill(length+length%2)
|
|
||||||
|
|
||||||
addr = prefix_hex + suffix_hex
|
|
||||||
#logger.info('grid> mac-string {} genrated from: {} ({}->{}) ({}->{}) '.format(addr, int_value, prefix, prefix_hex, suffix, suffix_hex))
|
|
||||||
print('grid> mac-string {} genrated from: {} (sum {}->{}) ({}->{}) '.format(addr, int_value, prefix_sum, prefix_hex, suffix, suffix_hex))
|
|
||||||
|
|
||||||
return ':'.join(addr[i:i+2] for i in range(0,len(addr),2))
|
|
||||||
|
|
||||||
def generate_ipv4(region_name, how_many=1):
|
def generate_ipv4(region_name, how_many=1):
|
||||||
""" this function should check the range, exclude deployed machines and return a list of available ips """
|
""" this function should check the range, exclude deployed machines and return a list of available ips """
|
||||||
ip_range_min = ioconfig.parser.get(str(region_name), 'ipv4_min')
|
ip_range_min = ioconfig.parser.get(str(region_name), 'ipv4_min')
|
||||||
|
@ -221,7 +201,6 @@ def generate_ipv4(region_name, how_many=1):
|
||||||
logger.info('region[{}]> ip addresses {} selected.'.format(str(region_name), str(requested_ips)))
|
logger.info('region[{}]> ip addresses {} selected.'.format(str(region_name), str(requested_ips)))
|
||||||
return requested_ips
|
return requested_ips
|
||||||
|
|
||||||
|
|
||||||
def readreal():
|
def readreal():
|
||||||
""" read the current state and return its contents """
|
""" read the current state and return its contents """
|
||||||
try:
|
try:
|
||||||
|
|
350
plugin.py
350
plugin.py
|
@ -25,257 +25,248 @@ def auth(slave_name):
|
||||||
slavepass = ioconfig.parser.get(str(slave_name), 'password')
|
slavepass = ioconfig.parser.get(str(slave_name), 'password')
|
||||||
slavetype = ioconfig.parser.get(str(slave_name), 'type')
|
slavetype = ioconfig.parser.get(str(slave_name), 'type')
|
||||||
|
|
||||||
#vendor specific
|
|
||||||
#if slavetype == 'proxmoxia':
|
|
||||||
# connection = lib_proxmoxia.Connector(slaveip)
|
|
||||||
# auth_token = connection.get_auth_token(adminuser, slavepass)
|
|
||||||
# proxobject = lib_proxmoxia.Proxmox(connection)
|
|
||||||
if slavetype == 'proxmox':
|
if slavetype == 'proxmox':
|
||||||
proxobject = ProxmoxAPI(slaveip, user=adminuser, password=slavepass, verify_ssl=False)
|
proxobject = ProxmoxAPI(slaveip, user=adminuser, password=slavepass, verify_ssl=False)
|
||||||
return proxobject
|
return proxobject
|
||||||
|
|
||||||
def vmlist(proxobject):
|
def create(json):
|
||||||
""" get vmlist """
|
""" create an unit. returns JSON with data """
|
||||||
#slave_name = proxobject.get('cluster/status')#'name']
|
|
||||||
#we keep a single node proxmoxes so node id = 0
|
|
||||||
slave_name = proxobject.cluster.status.get()[0]['name']
|
|
||||||
#query_kvm = proxobject.get('nodes/%s/qemu' % slave_name)
|
|
||||||
query_kvm = proxobject.nodes(slave_name).qemu.get()
|
|
||||||
query_lxc = proxobject.nodes(slave_name).lxc.get()
|
|
||||||
for kvm_dict in query_kvm:
|
|
||||||
kvm_dict['vmtype'] = 'kvm'
|
|
||||||
for lxc_dict in query_lxc:
|
|
||||||
lxc_dict['vmtype'] = 'lxc'
|
|
||||||
vmlist = query_kvm + query_lxc #merge machine list
|
|
||||||
return vmlist
|
|
||||||
|
|
||||||
def vmcreate(req):
|
|
||||||
""" create vm. returns JSON with data """
|
|
||||||
try:
|
try:
|
||||||
region_id = ioconfig.parser.get(str(req['region']), 'regionid')
|
region_id = ioconfig.parser.get(str(json['region']), 'regionid')
|
||||||
region_fullname = ioconfig.parser.get(str(req['region']), 'fullname')
|
region_fullname = ioconfig.parser.get(str(json['region']), 'fullname')
|
||||||
except:
|
except:
|
||||||
ioconfig.logger.error('grid> no region found')
|
ioconfig.logger.error('grid> region not found')
|
||||||
return None
|
return None
|
||||||
vm_name_utf8 = req['hostname']
|
vm_name_utf8 = json['hostname']
|
||||||
vm_name = unidecode(vm_name_utf8)
|
vm_name = unidecode(vm_name_utf8)
|
||||||
try:
|
try:
|
||||||
vm_pass = req['rootpass']
|
vm_pass = json['rootpass']
|
||||||
except:
|
except:
|
||||||
vm_pass = 'datapoint'
|
vm_pass = '!%%^)@&&(K3B'
|
||||||
#slave_name = str(grid.query_happiness(region_id, weight)) #TODO: provide weight parameters here and calculate route
|
#slave_name = str(grid.query_happiness(region_id, weight)) #TODO: provide weight parameters here and calculate route
|
||||||
slave_name = 'lexx'
|
#slave_name = 'lexx'
|
||||||
#slave_name = 'warrior'
|
slave_name = 'warrior'
|
||||||
vm_id = random.randint(1000, 9999)
|
|
||||||
cubeid = int(time.time() * 10000 * 10000)
|
unit_id = int(time.time() * 10000 * 10000)
|
||||||
deploy = { 'cube': int(cubeid),
|
phy_id = grid.phyidgen(json['type'])
|
||||||
'type': req['type'],
|
|
||||||
'clientid': req['clientid'],
|
|
||||||
'clientemail': req['clientemail'],
|
|
||||||
'hostname': vm_name,
|
|
||||||
'region': region_fullname,
|
|
||||||
'slave': slave_name,
|
|
||||||
'vmid': vm_id,
|
|
||||||
'cpu': req['cpu'],
|
|
||||||
'mem': req['mem'],
|
|
||||||
'hdd': req['hdd']
|
|
||||||
}
|
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
real_slave_name = proxobject.cluster.status.get()[0]['name']
|
real_slave_name = proxobject.cluster.status.get()[0]['name']
|
||||||
description = vm_name + ' (' + str(cubeid) + '/' + str(vm_id) + ')\n' + 'owned by ' + req['clientemail'] + ' (' + req['clientid'] + ')\n'
|
description = vm_name + ' (' + str(unit_id) + '-' + str(phy_id) + ')\n' + 'owned by ' + json['clientemail'] + ' (' + json['clientid'] + ')\n'
|
||||||
|
|
||||||
if req['type'] == 'kvm':
|
if json['type'] == 'deploy':
|
||||||
#create partition
|
#create partition
|
||||||
image_name = 'vm-' + str(vm_id) + '-disk-1'
|
image_name = 'vm-' + str(phy_id) + '-disk-1'
|
||||||
try:
|
try:
|
||||||
local_storage = proxobject.nodes(real_slave_name).storage('lvm')
|
local_storage = proxobject.nodes(real_slave_name).storage('lvm')
|
||||||
storage_create_result = local_storage.content.post(vmid=vm_id, filename=image_name, size=req['hdd'] + 'G')
|
storage_create_result = local_storage.content.post(vmid=phy_id, filename=image_name, size=json['hdd'] + 'G')
|
||||||
ioconfig.logger.info('slave[%s]> allocated %s as %s' % (slave_name, req['hdd'], image_name))
|
ioconfig.logger.info('%s[%s]> allocated %s as %s' % (json['clientemail'], slave_name, json['hdd'], image_name))
|
||||||
except:
|
except:
|
||||||
ioconfig.logger.info('slave[%s]> unable to allocate %s' % (slave_name, image_name))
|
ioconfig.logger.info('%s[%s]> unable to allocate %s' % (json['clientemail'], slave_name, image_name))
|
||||||
response = { 'status':'FAIL' }
|
response = { 'status':'FAIL' }
|
||||||
return response
|
return response
|
||||||
|
|
||||||
create_result = proxobject.nodes(real_slave_name).qemu.post(vmid=int(vm_id),
|
create_result = proxobject.nodes(real_slave_name).qemu.post(vmid=int(phy_id),
|
||||||
name=vm_name,
|
name=vm_name,
|
||||||
sockets=1,
|
|
||||||
cores=req['cpu'],
|
|
||||||
memory=req['mem'],
|
|
||||||
virtio0='file=lvm:' + image_name,
|
|
||||||
onboot=1,
|
|
||||||
description=description)
|
|
||||||
|
|
||||||
if req['type'] == 'lxc':
|
|
||||||
create_result = proxobject.nodes(real_slave_name).lxc.post(vmid=int(vm_id),
|
|
||||||
cpus=req['cpu'],
|
|
||||||
memory=req['mem'],
|
|
||||||
swap=16,
|
|
||||||
ostemplate='backup:vztmpl/ubuntu-16.04-standard_16.04-1_amd64.tar.gz',
|
|
||||||
hostname=vm_name,
|
|
||||||
password=vm_pass,
|
|
||||||
rootfs='lvm:' + req['hdd'],
|
|
||||||
virtio0='file=lvm:' + image_name,
|
|
||||||
onboot=1,
|
onboot=1,
|
||||||
|
sockets=1,
|
||||||
|
cores=json['cpu'],
|
||||||
|
memory=json['mem'],
|
||||||
|
scsihw='virtio-scsi-pci',
|
||||||
|
scsi0='file=lvm:' + image_name + ',discard=on',
|
||||||
description=description)
|
description=description)
|
||||||
|
data = { 'unit_id': int(unit_id),
|
||||||
|
'type': 'kvm',
|
||||||
|
'clientid': json['clientid'],
|
||||||
|
'clientemail': json['clientemail'],
|
||||||
|
'hostname': vm_name,
|
||||||
|
'region': region_fullname,
|
||||||
|
'slave': slave_name,
|
||||||
|
'phyid': phy_id
|
||||||
|
}
|
||||||
|
|
||||||
print(str(create_result))
|
|
||||||
|
|
||||||
#start the machihe
|
if json['type'] == 'router':
|
||||||
|
create_result = proxobject.nodes(real_slave_name).lxc.post(vmid=int(phy_id),
|
||||||
|
hostname=vm_name,
|
||||||
|
onboot=1,
|
||||||
|
unprivileged=1,
|
||||||
|
password=vm_pass,
|
||||||
|
cores=json['cpu'],
|
||||||
|
memory=json['mem'],
|
||||||
|
net0='name=eth0,bridge=' + json['bridge_id'] + ',gw=' + json['region_gw'] + ',hwaddr=' + json['macaddr'] + ',ip=' + json['ipv4addr'] + '/' + json['region_netmask'],
|
||||||
|
ostemplate='backup:vztmpl/debian-9.0-standard_9.0-2_amd64.tar.gz',
|
||||||
|
rootfs='volume=lvm:' + image_name,
|
||||||
|
swap=32,
|
||||||
|
description=description)
|
||||||
|
data = { 'unit_id': int(unit_id),
|
||||||
|
'type': 'lxc',
|
||||||
|
'clientid': json['clientid'],
|
||||||
|
'clientemail': json['clientemail'],
|
||||||
|
'hostname': vm_name,
|
||||||
|
'region': region_fullname,
|
||||||
|
'slave': slave_name,
|
||||||
|
'phyid': phy_id
|
||||||
|
}
|
||||||
|
|
||||||
|
if json['type'] == 'bridge':
|
||||||
|
#TODO: CREATE BRIDGE
|
||||||
|
data = { 'unit_id': int(unit_id),
|
||||||
|
'type': 'vmbr',
|
||||||
|
'clientid': json['clientid'],
|
||||||
|
'clientemail': json['clientemail'],
|
||||||
|
'region': region_fullname,
|
||||||
|
'slave': slave_name,
|
||||||
|
'phyid': phy_id
|
||||||
|
}
|
||||||
|
|
||||||
time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run
|
time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run
|
||||||
|
|
||||||
response = { 'status': 'CREATE', 'cube': cubeid, 'hostname': vm_name, 'password': vm_pass, 'slave': real_slave_name }
|
grid.create(data)
|
||||||
grid.writedb(deploy)
|
response = { 'status': 'CREATED', 'unit_id': unit_id, 'hostname': vm_name, 'password': vm_pass, 'slave': real_slave_name }
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def vmremove(cubeid):
|
def remove(json):
|
||||||
""" terminate a vm """
|
""" terminate an unit """
|
||||||
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
|
slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
ioconfig.logger.info('%s[%s]> deleting %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
|
ioconfig.logger.info('%s[%s]> deleting %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
||||||
if vm_type == 'kvm':
|
if unit_type == 'kvm':
|
||||||
result = proxobject.nodes(slave_name).qemu(vm_id).delete()
|
result = proxobject.nodes(slave_name).qemu(phy_id).delete()
|
||||||
if vm_type == 'lxc':
|
grid.delete(json)
|
||||||
result = proxobject.nodes(slave_name).lxc(vm_id).delete()
|
if unit_type == 'lxc':
|
||||||
grid.deletedb(cubeid)
|
result = proxobject.nodes(slave_name).lxc(phy_id).delete()
|
||||||
response = { 'status':'DELETE', 'cube': cubeid, 'hostname': vm_host }
|
grid.delete(json)
|
||||||
|
response = { 'status':'DELETED'}
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
def status(json):
|
||||||
def vmstatus(cubeid):
|
""" returns the status of an unit """
|
||||||
""" returns the status of the machine """
|
slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json)
|
||||||
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
|
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.cluster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('%s[%s]> status of %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
|
ioconfig.logger.info('%s[%s]> status of %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
||||||
if vm_type == 'kvm':
|
if unit_type == 'kvm':
|
||||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.current.get()
|
result = proxobject.nodes(slave_name).qemu(phy_id).status.current.get()
|
||||||
if vm_type == 'lxc':
|
if unit_type == 'lxc':
|
||||||
result = proxobject.nodes(slave_name).lxc(vm_id).status.current.get()
|
result = proxobject.nodes(slave_name).lxc(phy_id).status.current.get()
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def start(json):
|
||||||
def vmstart(cubeid):
|
|
||||||
""" starts a machine """
|
""" starts a machine """
|
||||||
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
|
slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.cluster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('%s[%s]> starting %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
|
ioconfig.logger.info('%s[%s]> starting %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
||||||
if vm_type == 'kvm':
|
if unit_type == 'kvm':
|
||||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.start.post()
|
result = proxobject.nodes(slave_name).qemu(phy_id).status.start.post()
|
||||||
if vm_type == 'lxc':
|
if unit_type == 'lxc':
|
||||||
result = proxobject.nodes(slave_name).lxc(vm_id).status.start.post()
|
result = proxobject.nodes(slave_name).lxc(phy_id).status.start.post()
|
||||||
#TODO: SET START AT BOOT FLAG
|
#TODO: SET START AT BOOT FLAG
|
||||||
response = { 'status':'START' }
|
response = { 'status':'START' }
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
def shutdown(json):
|
||||||
def vmshutdown(cubeid):
|
|
||||||
""" acpi shutdown the machine.. """
|
""" acpi shutdown the machine.. """
|
||||||
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
|
slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.cluster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('%s[%s]> acpi shutdown %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
|
ioconfig.logger.info('%s[%s]> acpi shutdown %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
||||||
|
|
||||||
if vm_type == 'kvm':
|
if unit_type == 'kvm':
|
||||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.shutdown.post()
|
result = proxobject.nodes(slave_name).qemu(phy_id).status.shutdown.post()
|
||||||
if vm_type == 'lxc':
|
if unit_type == 'lxc':
|
||||||
result = proxobject.nodes(slave_name).lxc(vm_id).status.shutdown.post()
|
result = proxobject.nodes(slave_name).lxc(phy_id).status.shutdown.post()
|
||||||
#TODO: REMOVE START AT BOOT FLAG
|
#TODO: REMOVE START AT BOOT FLAG
|
||||||
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
|
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
|
||||||
response = { 'status':'SHUTDOWN', 'vmid':vm_id }
|
response = { 'status':'SHUTDOWN', 'vmid':phy_id }
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
def stop(json):
|
||||||
def vmstop(cubeid):
|
|
||||||
""" poweroff the machine.. """
|
""" poweroff the machine.. """
|
||||||
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
|
slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.cluster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('%s[%s]> power off %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
|
ioconfig.logger.info('%s[%s]> power off %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
||||||
|
|
||||||
if vm_type == 'kvm':
|
if unit_type == 'kvm':
|
||||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.stop.post()
|
result = proxobject.nodes(slave_name).qemu(phy_id).status.stop.post()
|
||||||
if vm_type == 'lxc':
|
if unit_type == 'lxc':
|
||||||
result = proxobject.nodes(slave_name).lxc(vm_id).status.stop.post()
|
result = proxobject.nodes(slave_name).lxc(phy_id).status.stop.post()
|
||||||
#TODO: REMOVE START AT BOOT FLAG
|
#TODO: REMOVE START AT BOOT FLAG
|
||||||
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
|
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
|
||||||
response = { 'status':'STOP', 'vmid':vm_id }
|
response = { 'status':'STOP', 'vmid':phy_id }
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
def suspend(json):
|
||||||
def vmsuspend(cubeid):
|
|
||||||
""" suspend machine """
|
""" suspend machine """
|
||||||
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
|
slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.cluster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('%s[%s]> suspending %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
|
ioconfig.logger.info('%s[%s]> suspending %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
||||||
|
|
||||||
if vm_type == 'kvm':
|
if unit_type == 'kvm':
|
||||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.suspend.post()
|
result = proxobject.nodes(slave_name).qemu(phy_id).status.suspend.post()
|
||||||
if vm_type == 'lxc':
|
if unit_type == 'lxc':
|
||||||
result = proxobject.nodes(slave_name).lxc(vm_id).status.suspend.post()
|
result = proxobject.nodes(slave_name).lxc(phy_id).status.suspend.post()
|
||||||
response = { 'status':'SUSPEND', 'vmid':vm_id }
|
response = { 'status':'SUSPEND', 'vmid':phy_id }
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
def resume(json):
|
||||||
def vmresume(cubeid):
|
|
||||||
""" resume machine """
|
""" resume machine """
|
||||||
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
|
slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.cluster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('%s[%s]> resuming %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
|
ioconfig.logger.info('%s[%s]> resuming %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
||||||
|
|
||||||
if vm_type == 'kvm':
|
if unit_type == 'kvm':
|
||||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.resume.post()
|
result = proxobject.nodes(slave_name).qemu(phy_id).status.resume.post()
|
||||||
if vm_type == 'lxc':
|
if unit_type == 'lxc':
|
||||||
result = proxobject.nodes(slave_name).lxc(vm_id).status.resume.post()
|
result = proxobject.nodes(slave_name).lxc(phy_id).status.resume.post()
|
||||||
response = { 'status':'RESUME', 'vmid':vm_id }
|
response = { 'status':'RESUME', 'vmid':phy_id }
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
def vmrrd(json):
|
||||||
def vmrrd(cubeid):
|
|
||||||
""" retrieve rrd graphs (PNG) """
|
""" retrieve rrd graphs (PNG) """
|
||||||
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
|
slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
proxobject.cluster.status.get()[0]['name']
|
proxobject.cluster.status.get()[0]['name']
|
||||||
|
|
||||||
result = {}
|
result = {}
|
||||||
if vm_type == 'kvm':
|
if unit_type == 'kvm':
|
||||||
statusquery = proxobject.nodes(slave_name).qemu(vm_id).status.current.get()
|
statusquery = proxobject.nodes(slave_name).qemu(phy_id).status.current.get()
|
||||||
rcpu = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
|
rcpu = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
|
||||||
rmem = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
|
rmem = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
|
||||||
rnet = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
|
rnet = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
|
||||||
rhdd = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
|
rhdd = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
|
||||||
status = str(statusquery['qmpstatus'])
|
status = str(statusquery['qmpstatus'])
|
||||||
|
|
||||||
if vm_type == 'lxc':
|
if unit_type == 'lxc':
|
||||||
status = proxobject.nodes(slave_name).lxc(vm_id).status.current.get()
|
status = proxobject.nodes(slave_name).lxc(phy_id).status.current.get()
|
||||||
rcpu = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
|
rcpu = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
|
||||||
rmem = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
|
rmem = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
|
||||||
rnet = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
|
rnet = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
|
||||||
rhdd = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
|
rhdd = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
|
||||||
status = str(statusquery['qmpstatus']) #TODO: maybe change this?
|
status = str(statusquery['qmpstatus']) #TODO: maybe change this?
|
||||||
|
|
||||||
#ioconfig.logger.info('%s[%s]> rrd of %s %s (%s). status: %s' % (vm_owner, slave_name, vm_type, vm_id, vm_host, status))
|
#ioconfig.logger.info('%s[%s]> rrd of %s %s (%s). status: %s' % (vm_owner, slave_name, unit_type, phy_id, vm_host, status))
|
||||||
|
|
||||||
response = { 'status':status, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd }
|
response = { 'status':status, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd }
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
def vmvnc(json):
|
||||||
def vmvnc(cubeid):
|
|
||||||
""" invoke vnc ticket """
|
""" invoke vnc ticket """
|
||||||
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
|
slave_name, unit_type, phy_id, vm_host, vm_owner = grid.query(json)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.cluster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('%s[%s]> invoking vnc ticket for %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
|
ioconfig.logger.info('%s[%s]> invoking vnc ticket for %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
||||||
|
|
||||||
if vm_type == 'kvm':
|
if unit_type == 'kvm':
|
||||||
ticket = proxobject.nodes(slave_name).qemu(vm_id).vncproxy.post(websocket=1)
|
ticket = proxobject.nodes(slave_name).qemu(phy_id).vncproxy.post(websocket=1)
|
||||||
#socket = proxobject.nodes(slave_name).qemu(vm_id).vncwebsocket.get(port=ticket['port'],
|
#socket = proxobject.nodes(slave_name).qemu(phy_id).vncwebsocket.get(port=ticket['port'],
|
||||||
# vncticket=ticket['ticket'])
|
# vncticket=ticket['ticket'])
|
||||||
if vm_type == 'lxc':
|
if unit_type == 'lxc':
|
||||||
ticket = proxobject.nodes(slave_name).lxc(vm_id).vncproxy.post()
|
ticket = proxobject.nodes(slave_name).lxc(phy_id).vncproxy.post()
|
||||||
#socket = proxobject.nodes(slave_name).lxc(vm_id).vncwebsocket.get(port=ticket['port'],
|
#socket = proxobject.nodes(slave_name).lxc(phy_id).vncwebsocket.get(port=ticket['port'],
|
||||||
# vncticket=ticket['ticket'])
|
# vncticket=ticket['ticket'])
|
||||||
|
|
||||||
slaveip = ioconfig.parser.get(str(slave_name), 'ipv4')
|
slaveip = ioconfig.parser.get(str(slave_name), 'ipv4')
|
||||||
|
@ -311,14 +302,19 @@ def vmvnc(cubeid):
|
||||||
#print(vnc_url)
|
#print(vnc_url)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
#DEPRECATED
|
||||||
#def getmyip():
|
def vmlist(proxobject):
|
||||||
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
""" get unit list """
|
||||||
# s.connect(("gmail.com",80))
|
#we keep a single node proxmoxes so node id = 0
|
||||||
# myip = s.getsockname()[0]
|
slave_name = proxobject.cluster.status.get()[0]['name']
|
||||||
# s.close
|
query_kvm = proxobject.nodes(slave_name).qemu.get()
|
||||||
# return myip
|
query_lxc = proxobject.nodes(slave_name).lxc.get()
|
||||||
|
for kvm_dict in query_kvm:
|
||||||
|
kvm_dict['vmtype'] = 'kvm'
|
||||||
|
for lxc_dict in query_lxc:
|
||||||
|
lxc_dict['vmtype'] = 'lxc'
|
||||||
|
vmlist = query_kvm + query_lxc #merge machine list
|
||||||
|
return vmlist
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
#internal module tests
|
#internal module tests
|
||||||
|
|
160
proxmaster.py
160
proxmaster.py
|
@ -23,7 +23,7 @@ def welcome():
|
||||||
logger.info('# proxmaster ][ (c) 2015-2017 deflax.net #')
|
logger.info('# proxmaster ][ (c) 2015-2017 deflax.net #')
|
||||||
|
|
||||||
|
|
||||||
def selector(fn, req, vmid=0):
|
def selector(fn, req):
|
||||||
""" try to exec commands """
|
""" try to exec commands """
|
||||||
json = req.context['doc']
|
json = req.context['doc']
|
||||||
#print(json)
|
#print(json)
|
||||||
|
@ -35,43 +35,35 @@ def selector(fn, req, vmid=0):
|
||||||
return status, body
|
return status, body
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if fn == 'vmcreate':
|
if fn == 'create':
|
||||||
body = plugin.vmcreate(json)
|
body = plugin.create(json)
|
||||||
|
elif fn == 'remove':
|
||||||
|
body = plugin.remove(json)
|
||||||
|
elif fn == 'status':
|
||||||
|
body = plugin.status(json)
|
||||||
|
|
||||||
elif fn == 'vmremove':
|
elif fn == 'start':
|
||||||
body = plugin.vmremove(vmid)
|
body = plugin.start(json)
|
||||||
|
elif fn == 'shutdown':
|
||||||
elif fn == 'vmstatus':
|
body = plugin.shutdown(json)
|
||||||
body = plugin.vmstatus(vmid)
|
elif fn == 'stop':
|
||||||
|
body = plugin.stop(json)
|
||||||
elif fn == 'vmsuspend':
|
elif fn == 'suspend':
|
||||||
body = plugin.vmsuspend(vmid)
|
body = plugin.suspend(json)
|
||||||
|
elif fn == 'resume':
|
||||||
elif fn == 'vmresume':
|
body = plugin.resume(json)
|
||||||
body = plugin.vmresume(vmid)
|
|
||||||
|
|
||||||
elif fn == 'vmstart':
|
|
||||||
body = plugin.vmstart(vmid)
|
|
||||||
|
|
||||||
elif fn == 'vmshutdown':
|
|
||||||
body = plugin.vmshutdown(vmid)
|
|
||||||
|
|
||||||
elif fn == 'vmstop':
|
|
||||||
body = plugin.vmstop(vmid)
|
|
||||||
|
|
||||||
elif fn == 'vmrrd':
|
elif fn == 'vmrrd':
|
||||||
body = plugin.vmrrd(vmid)
|
body = plugin.vmrrd(json)
|
||||||
|
|
||||||
elif fn == 'vmvnc':
|
elif fn == 'vmvnc':
|
||||||
body = plugin.vmvnc(vmid)
|
body = plugin.vmvnc(json)
|
||||||
|
|
||||||
except:
|
except:
|
||||||
logger.critical('grid> {} error'.format(fn))
|
logger.critical('grid> {} error'.format(fn))
|
||||||
status = falcon.HTTP_404
|
status = falcon.HTTP_404
|
||||||
raise
|
raise
|
||||||
|
|
||||||
else:
|
else:
|
||||||
#logger.info('grid> {} ok'.format(fn))
|
logger.info('grid> {}'.format(fn))
|
||||||
status = falcon.HTTP_202
|
status = falcon.HTTP_202
|
||||||
|
|
||||||
return status, body
|
return status, body
|
||||||
|
@ -135,85 +127,74 @@ def max_body(limit):
|
||||||
|
|
||||||
return hook
|
return hook
|
||||||
|
|
||||||
|
class CreateUnit(object):
|
||||||
class CreateResource(object):
|
|
||||||
@falcon.before(max_body(64 * 1024))
|
@falcon.before(max_body(64 * 1024))
|
||||||
def on_post(self, req, resp):
|
def on_post(self, req, resp):
|
||||||
"""Create a cluster node, returns array of: status, vmid, pass, ipv4, """
|
""" creates an unit """
|
||||||
logger.info('grid> create new cube')
|
resp.status, response = selector('create', req)
|
||||||
resp.status, response = selector('vmcreate', req)
|
|
||||||
req.context['result'] = response
|
req.context['result'] = response
|
||||||
|
|
||||||
class RemoveResource(object):
|
class RemoveUnit(object):
|
||||||
@falcon.before(max_body(64 * 1024))
|
@falcon.before(max_body(64 * 1024))
|
||||||
def on_post(self, req, resp, vmid):
|
def on_post(self, req, resp):
|
||||||
""" remove machine completely"""
|
""" removes unit completely"""
|
||||||
logger.info('grid> remove ' + str(vmid))
|
resp.status, response = selector('remove', req)
|
||||||
resp.status, response = selector('vmremove', req, vmid)
|
|
||||||
req.context['result'] = response
|
req.context['result'] = response
|
||||||
|
|
||||||
class StatusResource(object):
|
class StatusUnit(object):
|
||||||
@falcon.before(max_body(64 * 1024))
|
@falcon.before(max_body(64 * 1024))
|
||||||
def on_post(self, req, resp, vmid):
|
def on_post(self, req, resp):
|
||||||
""" check vm status """
|
""" checks unit status """
|
||||||
logger.info('grid> status ' + str(vmid))
|
resp.status, response = selector('status', req)
|
||||||
resp.status, response = selector('vmstatus', req, vmid)
|
|
||||||
req.context['result'] = response
|
req.context['result'] = response
|
||||||
|
|
||||||
class SuspendResource(object):
|
class SuspendUnit(object):
|
||||||
@falcon.before(max_body(64 * 1024))
|
@falcon.before(max_body(64 * 1024))
|
||||||
def on_post(self, req, resp, vmid):
|
def on_post(self, req, resp):
|
||||||
""" Temporary suspend the instance """
|
""" Temporary suspend the instance """
|
||||||
#logger.info('grid> suspend ' + str(vmid))
|
resp.status, response = selector('suspend', req)
|
||||||
resp.status, response = selector('vmsuspend', req, vmid)
|
|
||||||
req.context['result'] = response
|
req.context['result'] = response
|
||||||
|
|
||||||
class ResumeResource(object):
|
class ResumeUnit(object):
|
||||||
@falcon.before(max_body(64 * 1024))
|
@falcon.before(max_body(64 * 1024))
|
||||||
def on_post(self, req, resp, vmid):
|
def on_post(self, req, resp):
|
||||||
""" Unuspend the instance """
|
""" Unuspend the instance """
|
||||||
#logger.info('grid> resume ' + str(vmid))
|
resp.status, response = selector('resume', req)
|
||||||
resp.status, response = selector('vmresume', req, vmid)
|
|
||||||
req.context['result'] = response
|
req.context['result'] = response
|
||||||
|
|
||||||
class StartResource(object):
|
class StartUnit(object):
|
||||||
@falcon.before(max_body(64 * 1024))
|
@falcon.before(max_body(64 * 1024))
|
||||||
def on_post(self, req, resp, vmid):
|
def on_post(self, req, resp):
|
||||||
""" Start the instance """
|
""" Start the instance """
|
||||||
logger.info('grid> start ' + str(vmid))
|
resp.status, response = selector('start', req)
|
||||||
resp.status, response = selector('vmstart', req, vmid)
|
|
||||||
req.context['result'] = response
|
req.context['result'] = response
|
||||||
|
|
||||||
class ShutdownResource(object):
|
class ShutdownUnit(object):
|
||||||
@falcon.before(max_body(64 * 1024))
|
@falcon.before(max_body(64 * 1024))
|
||||||
def on_post(self, req, resp, vmid):
|
def on_post(self, req, resp):
|
||||||
""" ACPI Shutdown the instance """
|
""" ACPI Shutdown the instance """
|
||||||
logger.info('grid> shutdown ' + str(vmid))
|
resp.status, response = selector('shutdown', req)
|
||||||
resp.status, response = selector('vmshutdown', req, vmid)
|
|
||||||
req.context['result'] = response
|
req.context['result'] = response
|
||||||
|
|
||||||
class StopResource(object):
|
class StopUnit(object):
|
||||||
@falcon.before(max_body(64 * 1024))
|
@falcon.before(max_body(64 * 1024))
|
||||||
def on_post(self, req, resp, vmid):
|
def on_post(self, req, resp):
|
||||||
""" Stop the instance """
|
""" Stop the instance """
|
||||||
logger.info('grid> stop ' + str(vmid))
|
resp.status, response = selector('stop', req)
|
||||||
resp.status, response = selector('vmstop', req, vmid)
|
|
||||||
req.context['result'] = response
|
req.context['result'] = response
|
||||||
|
|
||||||
class RRDResource(object):
|
class RRDVM(object):
|
||||||
@falcon.before(max_body(64 * 1024))
|
@falcon.before(max_body(64 * 1024))
|
||||||
def on_post(self, req, resp, vmid):
|
def on_post(self, req, resp):
|
||||||
""" Generate rrd pngs """
|
""" Generate rrd pngs """
|
||||||
#logger.info('grid> rrd ' + str(vmid))
|
resp.status, response = selector('vmrrd', req)
|
||||||
resp.status, response = selector('vmrrd', req, vmid)
|
|
||||||
req.context['result'] = response
|
req.context['result'] = response
|
||||||
|
|
||||||
class VNCResource(object):
|
class VNCVM(object):
|
||||||
@falcon.before(max_body(64 * 1024))
|
@falcon.before(max_body(64 * 1024))
|
||||||
def on_post(self, req, resp, vmid):
|
def on_post(self, req, resp):
|
||||||
""" Create a VNC link to the instance """
|
""" Create a VNC link to the instance """
|
||||||
logger.info('grid> vnc ' + str(vmid))
|
resp.status, response = selector('vmvnc', req)
|
||||||
resp.status, response = selector('vmvnc', req, vmid)
|
|
||||||
req.context['result'] = response
|
req.context['result'] = response
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -225,35 +206,18 @@ wsgi_app = api = application = falcon.API(middleware=[
|
||||||
])
|
])
|
||||||
|
|
||||||
# setup routes
|
# setup routes
|
||||||
res_create = CreateResource()
|
api.add_route('/create', CreateUnit())
|
||||||
api.add_route('/vmcreate', res_create)
|
api.add_route('/remove', RemoveUnit())
|
||||||
|
api.add_route('/status', StatusUnit())
|
||||||
|
|
||||||
res_remove = RemoveResource()
|
api.add_route('/start', StartUnit())
|
||||||
api.add_route('/vmremove/{vmid}', res_remove)
|
api.add_route('/suspend', SuspendUnit())
|
||||||
|
api.add_route('/resume', ResumeUnit())
|
||||||
|
api.add_route('/shutdown', ShutdownUnit())
|
||||||
|
api.add_route('/stop', StopUnit())
|
||||||
|
|
||||||
res_status = StatusResource()
|
api.add_route('/vmrrd', RRDVM())
|
||||||
api.add_route('/vmstatus/{vmid}', res_status)
|
api.add_route('/vmvnc', VNCVM())
|
||||||
|
|
||||||
res_suspend = SuspendResource()
|
|
||||||
api.add_route('/vmsuspend/{vmid}', res_suspend)
|
|
||||||
|
|
||||||
res_resume = ResumeResource()
|
|
||||||
api.add_route('/vmresume/{vmid}', res_resume)
|
|
||||||
|
|
||||||
res_start = StartResource()
|
|
||||||
api.add_route('/vmstart/{vmid}', res_start)
|
|
||||||
|
|
||||||
res_shutdown = ShutdownResource()
|
|
||||||
api.add_route('/vmshutdown/{vmid}', res_shutdown)
|
|
||||||
|
|
||||||
res_stop = StopResource()
|
|
||||||
api.add_route('/vmstop/{vmid}', res_stop)
|
|
||||||
|
|
||||||
res_rrd = RRDResource()
|
|
||||||
api.add_route('/vmrrd/{vmid}', res_rrd)
|
|
||||||
|
|
||||||
res_vnc = VNCResource()
|
|
||||||
api.add_route('/vmvnc/{vmid}', res_vnc)
|
|
||||||
|
|
||||||
#display motd
|
#display motd
|
||||||
welcome()
|
welcome()
|
||||||
|
|
Loading…
Reference in a new issue