proxmaster/plugin.py
2017-09-21 03:07:56 +03:00

327 lines
14 KiB
Python

#. -*- coding: utf-8 -
# required proxmox permissions: PVEAdmin
#
# afx 2015-2017
# site
from proxmoxer import ProxmoxAPI
import base64
import json
import time
import socket
import random
from unidecode import unidecode
#local
import grid
import utils
import ioconfig
import novnc
def auth(slave_name):
""" return control object from config slave names """
adminuser = ioconfig.parser.get('general', 'adminuser')
slaveip = ioconfig.parser.get(str(slave_name), 'ipv4')
slavepass = ioconfig.parser.get(str(slave_name), 'password')
slavetype = ioconfig.parser.get(str(slave_name), 'type')
#vendor specific
#if slavetype == 'proxmoxia':
# connection = lib_proxmoxia.Connector(slaveip)
# auth_token = connection.get_auth_token(adminuser, slavepass)
# proxobject = lib_proxmoxia.Proxmox(connection)
if slavetype == 'proxmox':
proxobject = ProxmoxAPI(slaveip, user=adminuser, password=slavepass, verify_ssl=False)
return proxobject
def vmlist(proxobject):
""" get vmlist """
#slave_name = proxobject.get('cluster/status')#'name']
#we keep a single node proxmoxes so node id = 0
slave_name = proxobject.cluster.status.get()[0]['name']
#query_kvm = proxobject.get('nodes/%s/qemu' % slave_name)
query_kvm = proxobject.nodes(slave_name).qemu.get()
query_lxc = proxobject.nodes(slave_name).lxc.get()
for kvm_dict in query_kvm:
kvm_dict['vmtype'] = 'kvm'
for lxc_dict in query_lxc:
lxc_dict['vmtype'] = 'lxc'
vmlist = query_kvm + query_lxc #merge machine list
return vmlist
def vmcreate(req):
""" create vm. returns JSON with data """
try:
region_id = ioconfig.parser.get(str(req['region']), 'regionid')
region_fullname = ioconfig.parser.get(str(req['region']), 'fullname')
except:
ioconfig.logger.error('grid> no region found')
return None
vm_name_utf8 = req['hostname']
vm_name = unidecode(vm_name_utf8)
try:
vm_pass = req['rootpass']
except:
vm_pass = 'datapoint'
#slave_name = str(grid.query_happiness(region_id, weight)) #TODO: provide weight parameters here and calculate route
#slave_name = 'lexx'
slave_name = 'warrior'
vm_id = random.randint(1000, 9999)
cubeid = int(time.time() * 10000 * 10000)
deploy = { 'cube': int(cubeid),
'type': req['type'],
'clientid': req['clientid'],
'clientemail': req['clientemail'],
'hostname': vm_name,
'region': region_fullname,
'slave': slave_name,
'vmid': vm_id,
'cpu': req['cpu'],
'mem': req['mem'],
'hdd': req['hdd']
}
proxobject = auth(slave_name)
real_slave_name = proxobject.cluster.status.get()[0]['name']
description = vm_name + ' (' + str(cubeid) + '/' + str(vm_id) + ')\n' + 'owned by ' + req['clientemail'] + ' (' + req['clientid'] + ')\n'
if req['type'] == 'kvm':
#create partition
image_name = 'vm-' + str(vm_id) + '-disk-1'
try:
local_storage = proxobject.nodes(real_slave_name).storage('lvm')
storage_create_result = local_storage.content.post(vmid=vm_id, filename=image_name, size=req['hdd'] + 'G')
ioconfig.logger.info('slave[%s]> allocated %s as %s' % (slave_name, req['hdd'], image_name))
except:
ioconfig.logger.info('slave[%s]> unable to allocate %s' % (slave_name, image_name))
response = { 'status':'FAIL' }
return response
create_result = proxobject.nodes(real_slave_name).qemu.post(vmid=int(vm_id),
name=vm_name,
sockets=1,
cores=req['cpu'],
memory=req['mem'],
virtio0='file=lvm:' + image_name,
onboot=1,
description=description)
if req['type'] == 'lxc':
create_result = proxobject.nodes(real_slave_name).lxc.post(vmid=int(vm_id),
cpus=req['cpu'],
memory=req['mem'],
swap=16,
ostemplate='backup:vztmpl/ubuntu-16.04-standard_16.04-1_amd64.tar.gz',
hostname=vm_name,
password=vm_pass,
rootfs='lvm:' + req['hdd'],
virtio0='file=lvm:' + image_name,
onboot=1,
description=description)
print(str(create_result))
#start the machihe
time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run
response = { 'status': 'CREATE', 'cube': cubeid, 'hostname': vm_name, 'password': vm_pass, 'slave': real_slave_name }
grid.writedb(deploy)
return response
def vmremove(cubeid):
""" terminate a vm """
slave_name, vm_type, vm_id, vmhost, vmowner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
ioconfig.logger.info('%s[%s]> deleting %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).delete()
if vm_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(vm_id).delete()
grid.deletedb(cubeid)
return result
def vmstatus(cubeid):
""" returns the status of the machine """
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('%s[%s]> status of %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.current.get()
if vm_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(vm_id).status.current.get()
return result
def vmstart(cubeid):
""" starts a machine """
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('%s[%s]> starting %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.start.post()
if vm_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(vm_id).status.start.post()
#TODO: SET START AT BOOT FLAG
response = { 'status':'START' }
return response
def vmshutdown(cubeid):
""" acpi shutdown the machine.. """
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('%s[%s]> acpi shutdown %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.shutdown.post()
if vm_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(vm_id).status.shutdown.post()
#TODO: REMOVE START AT BOOT FLAG
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
response = { 'status':'SHUTDOWN', 'vmid':vm_id }
return response
def vmstop(cubeid):
""" poweroff the machine.. """
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('%s[%s]> power off %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.stop.post()
if vm_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(vm_id).status.stop.post()
#TODO: REMOVE START AT BOOT FLAG
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
response = { 'status':'STOP', 'vmid':vm_id }
return response
def vmsuspend(cubeid):
""" suspend machine """
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('%s[%s]> suspending %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.suspend.post()
if vm_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(vm_id).status.suspend.post()
response = { 'status':'SUSPEND', 'vmid':vm_id }
return response
def vmresume(cubeid):
""" resume machine """
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('%s[%s]> resuming %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.resume.post()
if vm_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(vm_id).status.resume.post()
response = { 'status':'RESUME', 'vmid':vm_id }
return response
def vmrrd(cubeid):
""" retrieve rrd graphs (PNG) """
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
proxobject.cluster.status.get()[0]['name']
result = {}
if vm_type == 'kvm':
statusquery = proxobject.nodes(slave_name).qemu(vm_id).status.current.get()
rcpu = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
rmem = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
rnet = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
rhdd = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
status = str(statusquery['qmpstatus'])
if vm_type == 'lxc':
status = proxobject.nodes(slave_name).lxc(vm_id).status.current.get()
rcpu = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
rmem = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
rnet = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
rhdd = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
status = str(statusquery['qmpstatus']) #TODO: maybe change this?
#ioconfig.logger.info('%s[%s]> rrd of %s %s (%s). status: %s' % (vm_owner, slave_name, vm_type, vm_id, vm_host, status))
response = { 'status':status, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd }
return response
def vmvnc(cubeid):
""" invoke vnc ticket """
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('%s[%s]> invoking vnc ticket for %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
ticket = proxobject.nodes(slave_name).qemu(vm_id).vncproxy.post(websocket=1)
#socket = proxobject.nodes(slave_name).qemu(vm_id).vncwebsocket.get(port=ticket['port'],
# vncticket=ticket['ticket'])
if vm_type == 'lxc':
ticket = proxobject.nodes(slave_name).lxc(vm_id).vncproxy.post()
#socket = proxobject.nodes(slave_name).lxc(vm_id).vncwebsocket.get(port=ticket['port'],
# vncticket=ticket['ticket'])
slaveip = ioconfig.parser.get(str(slave_name), 'ipv4')
#slaveport = socket['port']
slaveport = ticket['port']
slave_id = 1 #TODO: fix this
vnchost = ioconfig.parser.get('general', 'novnc_host')
listenport = str(int(slaveport) + 1000 + (int(slave_id) * 100)) #TODO: max 100 parallel connections/slave.
vnc_target = { 'target_host': slaveip,
'target_port': slaveport,
'listen_host': vnchost,
'listen_port': listenport
}
vnc_options = { 'idle-timeout': 20,
'verbose': True,
'cert': ioconfig.parser.get('general', 'ssl_cert'),
'key': ioconfig.parser.get('general', 'ssl_key'),
'ssl-only': True
}
novnc.spawn(vnc_target, vnc_options)
external_url = ioconfig.parser.get('general', 'novnc_url')
prefix = external_url + "?host=" + vnchost + "&port=" + listenport + "&view_only=false&encrypt=1&true_color=1&password="
vnc_url = prefix + ticket['ticket']
time.sleep(3) #wait few seconds for the parallel vncwebsocket
ioconfig.logger.info('{}[{}]> vnc port {} ready'.format(vm_owner, slave_name, listenport))
#response = { 'status':'VNC', 'fqdn':external_url, 'host':myip, 'port':listenport, 'encrypt':'0', 'true_color':'1', 'ticket':ticket['ticket'] }
response = { 'status':'VNC', 'url':vnc_url }
#print(vnc_url)
return response
#def getmyip():
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# s.connect(("gmail.com",80))
# myip = s.getsockname()[0]
# s.close
# return myip
if __name__ == '__main__':
#internal module tests
time.sleep(1)
#vmvnc(656758)