324 lines
13 KiB
Python
324 lines
13 KiB
Python
#. -*- coding: utf-8 -
|
|
# required proxmox permissions: PVESysAdmin, PVEVMAdmin
|
|
#
|
|
# afx 2015-2016
|
|
|
|
# site
|
|
from proxmoxer import ProxmoxAPI
|
|
import base64
|
|
import json
|
|
import time
|
|
import socket
|
|
from unidecode import unidecode
|
|
|
|
#local
|
|
import grid
|
|
import utils
|
|
import ioconfig
|
|
import novnc
|
|
|
|
def auth(slave_name):
|
|
""" return control object from config slave names """
|
|
adminuser = ioconfig.parser.get('general', 'adminuser')
|
|
slaveip = ioconfig.parser.get(str(slave_name), 'ipv4')
|
|
slavepass = ioconfig.parser.get(str(slave_name), 'password')
|
|
slavetype = ioconfig.parser.get(str(slave_name), 'type')
|
|
|
|
#vendor specific
|
|
if slavetype == 'proxmoxia':
|
|
connection = lib_proxmoxia.Connector(slaveip)
|
|
auth_token = connection.get_auth_token(adminuser, slavepass)
|
|
proxobject = lib_proxmoxia.Proxmox(connection)
|
|
|
|
if slavetype == 'proxmox':
|
|
proxobject = ProxmoxAPI(slaveip, user=adminuser, password=slavepass, verify_ssl=False)
|
|
|
|
return proxobject
|
|
|
|
|
|
def vmlist(proxobject):
|
|
""" get vmlist """
|
|
#we keep a single node proxmoxes so node id = 0
|
|
|
|
#slave_name = proxobject.get('cluster/status')#'name']
|
|
slave_name = proxobject.cluster.status.get()[0]['name']
|
|
#query_kvm = proxobject.get('nodes/%s/qemu' % slave_name)
|
|
query_kvm = proxobject.nodes(slave_name).qemu.get()
|
|
query_lxc = proxobject.nodes(slave_name).lxc.get()
|
|
|
|
for kvm_dict in query_kvm:
|
|
kvm_dict['vmtype'] = 'kvm'
|
|
for lxc_dict in query_lxc:
|
|
lxc_dict['vmtype'] = 'lxc'
|
|
vmlist = query_kvm + query_lxc #merge machine list
|
|
return vmlist
|
|
|
|
|
|
def vmcreate(req):
|
|
""" create vm. returns JSON with data """
|
|
try:
|
|
region_id = ioconfig.parser.get(str(req['region']), 'regionid')
|
|
region_fullname = ioconfig.parser.get(str(req['region']), 'fullname')
|
|
except:
|
|
ioconfig.logger.error('grid> no region found')
|
|
return None
|
|
vm_name_utf8 = req['hostname']
|
|
vm_name = unidecode(vm_name_utf8)
|
|
try:
|
|
vm_pass = req['vmpass']
|
|
except:
|
|
vm_pass = 'kvm-no-pass'
|
|
|
|
#generators
|
|
slave_name = 'lexx' #staic route
|
|
#slave_name = str(grid.query_happiness(region_id, weight)) #TODO: provide weight parameters here and calculate route
|
|
vmid = 4500
|
|
#vmid = str(grid.generate_vmid()) #TODO: this should be between 100 and 65000
|
|
cube_id = time.time() #TODO: make sure this is unique. time since epoch is not random enough but should do the trick for now
|
|
ipv4_list = grid.generate_ipv4(req['region'], req['vps_ipv4'])
|
|
|
|
#metadata
|
|
deploy = { 'cube': int(cube_id),
|
|
'type': req['type'],
|
|
'host': vm_name,
|
|
'region': region_fullname,
|
|
'slave': slave_name,
|
|
'vmid': vmid,
|
|
'cpu_mem_hdd': (req['vps_cpu'], req['vps_mem'], req['vps_hdd']),
|
|
'clientid': req['clientid'],
|
|
'clientname': req['clientname'],
|
|
'clientemail': req['clientemail'],
|
|
'os': req['vps_os'],
|
|
'ipv4': ipv4_list }
|
|
|
|
proxobject = auth(slave_name)
|
|
slave_name = proxobject.cluster.status.get()[0]['name']
|
|
|
|
ipv4_dict = {}
|
|
ipidx = 0
|
|
ioconfig.logger.info('slave[' + slave_name + ']> deploying %s on %s at %s with %s and %s', vm_id, slave_id , region_id, vm_ipv4, req)
|
|
for ip in vm_ipv4:
|
|
ipv4_dict[str(ipidx)] = str(ip)
|
|
ipidx += 1
|
|
|
|
response = { 'status':'CREATE', 'vmid':vm_id, 'name':vm_name, 'password':vm_pass, 'ipv4_0':vm_ipv4[0] }
|
|
description = vm_name + ' (' + vm_id + ')\n' + 'owned by ' + client_name + ' (' + client_id + ')\n' + 'master ip: ' + vm_ipv4[0]
|
|
|
|
#create partition
|
|
image_name = 'vm-' + vm_id + '-disk-0'
|
|
local_storage = proxobject.nodes(slave_name).storage('lvm')
|
|
local_storage.content.post(vmid=vm_id,
|
|
filename=image_name,
|
|
size=req['vps_disk'] + 'G')
|
|
|
|
if req['vps_type'] == 'KVM':
|
|
create_result = proxobject.nodes(slave_name).qemu.post(vmid=vm_id,
|
|
name=vm_name,
|
|
sockets=1,
|
|
cores=req['vps_cpu'],
|
|
memory=req['vps_ram'],
|
|
virtio0='lvm:' + image_name,
|
|
ide1='skyblue:iso/' + req['vps_os'] + ',media=cdrom',
|
|
net0='e1000,bridge=pub',
|
|
onboot=1,
|
|
description=description)
|
|
|
|
if req['vps_type'] == 'LXC':
|
|
create_result = proxobject.nodes(slave_name).lxc.post(vmid=vm_id,
|
|
hostname=vm_name,
|
|
password=vm_pass,
|
|
sockets=1,
|
|
cores=req['vps_cpu'],
|
|
memory=req['vps_ram'],
|
|
virtio0='lvm:' + image_name,
|
|
ip_address=vm_ipv4[0],
|
|
onboot=1,
|
|
description=description)
|
|
|
|
print('result:')
|
|
print(create_result)
|
|
|
|
#start the machihe
|
|
time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run
|
|
vmstart(vm_id)
|
|
return response
|
|
|
|
|
|
def vmstatus(cubeid):
|
|
""" returns the status of the machine """
|
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
|
proxobject = auth(slave_name)
|
|
vm_type = vm_type.lower()
|
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
|
ioconfig.logger.info('slave[%s]> get status of %s %s' % (slave_name, vm_type, vm_id))
|
|
if vm_type == 'kvm':
|
|
result = proxobject.nodes(slave_name).qemu(vm_id).status.current.get()
|
|
if vm_type == 'lxc':
|
|
result = proxobject.nodes(slave_name).lxc(vm_id).status.current.get()
|
|
return result
|
|
|
|
|
|
def vmstart(cubeid):
|
|
""" starts a machine """
|
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
|
proxobject = auth(slave_name)
|
|
vm_type = vm_type.lower()
|
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
|
ioconfig.logger.info('slave[%s]> starting %s %s' % (slave_name, vm_type, vm_id))
|
|
if vm_type == 'kvm':
|
|
result = proxobject.nodes(slave_name).qemu(vm_id).status.start.post()
|
|
if vm_type == 'lxc':
|
|
result = proxobject.nodes(slave_name).lxc(vm_id).status.start.post()
|
|
response = { 'status':'START' }
|
|
return response
|
|
|
|
|
|
def vmshutdown(cubeid):
|
|
""" acpi shutdown the machine.. """
|
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
|
proxobject = auth(slave_name)
|
|
vm_type = vm_type.lower()
|
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
|
ioconfig.logger.info('slave[%s]> acpi shutdown %s %s' % (slave_name, vm_type, vm_id))
|
|
|
|
if vm_type == 'kvm':
|
|
result = proxobject.nodes(slave_name).qemu(vm_id).status.shutdown.post()
|
|
if vm_type == 'lxc':
|
|
result = proxobject.nodes(slave_name).lxc(vm_id).status.shutdown.post()
|
|
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
|
|
response = { 'status':'SHUTDOWN', 'vmid':vm_id }
|
|
return response
|
|
|
|
|
|
def vmstop(cubeid):
|
|
""" poweroff the machine.. """
|
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
|
proxobject = auth(slave_name)
|
|
vm_type = vm_type.lower()
|
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
|
ioconfig.logger.info('slave[%s]> power off %s %s' % (slave_name, vm_type, vm_id))
|
|
|
|
if vm_type == 'kvm':
|
|
result = proxobject.nodes(slave_name).qemu(vm_id).status.stop.post()
|
|
if vm_type == 'lxc':
|
|
result = proxobject.nodes(slave_name).lxc(vm_id).status.stop.post()
|
|
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
|
|
response = { 'status':'STOP', 'vmid':vm_id }
|
|
return response
|
|
|
|
|
|
def vmsuspend(cubeid):
|
|
""" suspend machine """
|
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
|
proxobject = auth(slave_name)
|
|
vm_type = vm_type.lower()
|
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
|
ioconfig.logger.info('slave[%s]> suspending %s %s' % (slave_name, vm_type, vm_id))
|
|
|
|
if vm_type == 'kvm':
|
|
result = proxobject.nodes(slave_name).qemu(vm_id).status.suspend.post()
|
|
if vm_type == 'lxc':
|
|
result = proxobject.nodes(slave_name).lxc(vm_id).status.suspend.post()
|
|
response = { 'status':'SUSPEND', 'vmid':vm_id }
|
|
return response
|
|
|
|
|
|
def vmresume(cubeid):
|
|
""" resume machine """
|
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
|
proxobject = auth(slave_name)
|
|
vm_type = vm_type.lower()
|
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
|
ioconfig.logger.info('slave[%s]> resuming %s %s' % (slave_name, vm_type, vm_id))
|
|
|
|
if vm_type == 'kvm':
|
|
result = proxobject.nodes(slave_name).qemu(vm_id).status.resume.post()
|
|
if vm_type == 'lxc':
|
|
result = proxobject.nodes(slave_name).lxc(vm_id).status.resume.post()
|
|
response = { 'status':'RESUME', 'vmid':vm_id }
|
|
return response
|
|
|
|
|
|
def vmrrd(cubeid):
|
|
""" retrieve rrd graphs (PNG) """
|
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
|
proxobject = auth(slave_name)
|
|
vm_type = vm_type.lower()
|
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
|
ioconfig.logger.info('slave[%s]> query rrd of %s %s' % (slave_name, vm_type, vm_id))
|
|
|
|
result = {}
|
|
if vm_type == 'kvm':
|
|
rcpu = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
|
|
rmem = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
|
|
rnet = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
|
|
rhdd = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
|
|
|
|
if vm_type == 'lxc':
|
|
rcpu = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
|
|
rmem = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
|
|
rnet = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
|
|
rhdd = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
|
|
response = { 'status':'RRD', 'vmid':vm_id, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd }
|
|
return response
|
|
|
|
|
|
def vmvnc(vm_id):
|
|
""" invoke vnc ticket """
|
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
|
proxobject = auth(slave_name)
|
|
vm_type = vm_type.lower()
|
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
|
ioconfig.logger.info('slave[%s]> invoking vnc ticket for %s %s' % (slave_name, vm_type, vm_id))
|
|
|
|
if vm_type == 'kvm':
|
|
ticket = proxobject.nodes(slave_name).qemu(vm_id).vncproxy.post(websocket=1)
|
|
#socket = proxobject.nodes(slave_name).qemu(vm_id).vncwebsocket.get(port=ticket['port'],
|
|
# vncticket=ticket['ticket'])
|
|
if vm_type == 'lxc':
|
|
ticket = proxobject.nodes(slave_name).lxc(vm_id).vncproxy.post()
|
|
#socket = proxobject.nodes(slave_name).lxc(vm_id).vncwebsocket.get(port=ticket['port'],
|
|
# vncticket=ticket['ticket'])
|
|
|
|
slaveip = ioconfig.parser.get(str(slave_name), 'ipv4')
|
|
#slaveport = socket['port']
|
|
slaveport = ticket['port']
|
|
listenport = str(int(slaveport) + 1000 + (int(slave_id) * 100)) #TODO: max 100 parallel connections/slave.
|
|
myip = getmyip()
|
|
|
|
vnc_target = { 'target_host': slaveip,
|
|
'target_port': slaveport,
|
|
'listen_host': myip,
|
|
'listen_port': listenport
|
|
}
|
|
|
|
vnc_options = { 'idle-timeout': 20,
|
|
'verbose': True
|
|
}
|
|
|
|
novnc.spawn(vnc_target, vnc_options)
|
|
|
|
external_url = ioconfig.parser.get('general', 'novnc_url')
|
|
prefix = external_url + "?host=" + myip + "&port=" + listenport + "&encrypt=0&true_color=1&password="
|
|
vnc_url = prefix + ticket['ticket']
|
|
|
|
ioconfig.logger.info('slave[{}]> vnc port {} ready'.format(slave_name, listenport))
|
|
#response = { 'status':'VNC', 'fqdn':external_url, 'host':myip, 'port':listenport, 'encrypt':'0', 'true_color':'1', 'ticket':ticket['ticket'] }
|
|
response = { 'status':'VNC', 'url':vnc_url }
|
|
return response
|
|
|
|
|
|
def getmyip():
|
|
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
s.connect(("gmail.com",80))
|
|
myip = s.getsockname()[0]
|
|
s.close
|
|
return myip
|
|
|
|
|
|
if __name__ == '__main__':
|
|
#internal module tests
|
|
time.sleep(1)
|
|
vmvnc(656758)
|
|
|