proxmaster/plugin.py

339 lines
15 KiB
Python
Raw Normal View History

2016-02-15 05:30:43 -05:00
#. -*- coding: utf-8 -
2016-11-11 09:56:35 -05:00
# required proxmox permissions: PVEAdmin
2016-02-15 05:30:43 -05:00
#
2017-09-20 20:07:56 -04:00
# afx 2015-2017
2016-02-15 05:30:43 -05:00
# site
from proxmoxer import ProxmoxAPI
import base64
import json
import time
import socket
2016-11-03 00:25:01 -04:00
import random
2016-05-08 08:44:19 -04:00
from unidecode import unidecode
2016-02-15 05:30:43 -05:00
#local
import grid
import utils
import ioconfig
import novnc
2016-11-03 00:05:51 -04:00
def auth(slave_name):
""" return control object from config slave names """
2016-02-15 05:30:43 -05:00
adminuser = ioconfig.parser.get('general', 'adminuser')
2016-11-03 00:05:51 -04:00
slaveip = ioconfig.parser.get(str(slave_name), 'ipv4')
slavepass = ioconfig.parser.get(str(slave_name), 'password')
slavetype = ioconfig.parser.get(str(slave_name), 'type')
2016-02-15 05:30:43 -05:00
2016-11-03 00:05:51 -04:00
if slavetype == 'proxmox':
proxobject = ProxmoxAPI(slaveip, user=adminuser, password=slavepass, verify_ssl=False)
2016-11-03 12:56:26 -04:00
return proxobject
2016-02-15 05:30:43 -05:00
2017-10-19 11:55:09 -04:00
def create(json):
""" create an unit. returns JSON with data """
2016-11-03 00:05:51 -04:00
try:
2017-10-19 11:55:09 -04:00
region_id = ioconfig.parser.get(str(json['region']), 'regionid')
region_fullname = ioconfig.parser.get(str(json['region']), 'fullname')
2016-11-03 00:05:51 -04:00
except:
2017-10-19 11:55:09 -04:00
ioconfig.logger.error('grid> region not found')
2016-11-03 00:05:51 -04:00
return None
2017-10-19 11:55:09 -04:00
vm_name_utf8 = json['hostname']
2016-05-08 08:44:19 -04:00
vm_name = unidecode(vm_name_utf8)
2016-11-03 00:05:51 -04:00
try:
2017-10-19 11:55:09 -04:00
vm_pass = json['rootpass']
2016-11-03 00:05:51 -04:00
except:
2017-10-19 11:55:09 -04:00
vm_pass = '!%%^)@&&(K3B'
2016-11-03 00:05:51 -04:00
#slave_name = str(grid.query_happiness(region_id, weight)) #TODO: provide weight parameters here and calculate route
2017-10-19 11:55:09 -04:00
#slave_name = 'lexx'
slave_name = 'warrior'
unit_id = int(time.time() * 10000 * 10000)
phy_id = grid.phyidgen(json['type'])
2016-11-03 00:05:51 -04:00
proxobject = auth(slave_name)
2016-11-03 12:56:26 -04:00
real_slave_name = proxobject.cluster.status.get()[0]['name']
2017-10-19 11:55:09 -04:00
description = vm_name + ' (' + str(unit_id) + '-' + str(phy_id) + ')\n' + 'owned by ' + json['clientemail'] + ' (' + json['clientid'] + ')\n'
2016-02-15 05:30:43 -05:00
2017-10-19 11:55:09 -04:00
if json['type'] == 'deploy':
2017-02-28 19:56:03 -05:00
#create partition
2017-10-19 11:55:09 -04:00
image_name = 'vm-' + str(phy_id) + '-disk-1'
2017-04-08 19:56:09 -04:00
try:
local_storage = proxobject.nodes(real_slave_name).storage('lvm')
2017-10-19 11:55:09 -04:00
storage_create_result = local_storage.content.post(vmid=phy_id, filename=image_name, size=json['hdd'] + 'G')
ioconfig.logger.info('%s[%s]> allocated %s as %s' % (json['clientemail'], slave_name, json['hdd'], image_name))
2017-04-08 19:56:09 -04:00
except:
2017-10-19 11:55:09 -04:00
ioconfig.logger.info('%s[%s]> unable to allocate %s' % (json['clientemail'], slave_name, image_name))
2017-04-08 19:56:09 -04:00
response = { 'status':'FAIL' }
return response
2017-02-28 19:56:03 -05:00
2017-10-19 11:55:09 -04:00
create_result = proxobject.nodes(real_slave_name).qemu.post(vmid=int(phy_id),
name=vm_name,
onboot=1,
sockets=1,
cores=json['cpu'],
memory=json['mem'],
scsihw='virtio-scsi-pci',
scsi0='file=lvm:' + image_name + ',discard=on',
description=description)
data = { 'unit_id': int(unit_id),
'type': 'kvm',
'clientid': json['clientid'],
'clientemail': json['clientemail'],
'hostname': vm_name,
'region': region_fullname,
'slave': slave_name,
'phyid': phy_id
}
response = { 'status': 'deploy_created', 'unit_id': unit_id, 'hostname': vm_name, 'password': vm_pass, 'slave': real_slave_name }
2017-10-19 11:55:09 -04:00
if json['type'] == 'router':
create_result = proxobject.nodes(real_slave_name).lxc.post(vmid=int(phy_id),
2016-11-03 00:05:51 -04:00
hostname=vm_name,
onboot=1,
2017-10-19 11:55:09 -04:00
unprivileged=1,
password=vm_pass,
cores=json['cpu'],
memory=json['mem'],
net0='name=eth0,bridge=' + json['bridge_id'] + ',gw=' + json['region_gw'] + ',hwaddr=' + json['macaddr'] + ',ip=' + json['ipv4addr'] + '/' + json['region_netmask'],
ostemplate='backup:vztmpl/debian-9.0-standard_9.0-2_amd64.tar.gz',
rootfs='volume=lvm:' + image_name,
swap=32,
2016-11-03 00:05:51 -04:00
description=description)
2017-10-19 11:55:09 -04:00
data = { 'unit_id': int(unit_id),
'type': 'lxc',
'clientid': json['clientid'],
'clientemail': json['clientemail'],
'hostname': vm_name,
'region': region_fullname,
'slave': slave_name,
'phyid': phy_id
}
response = { 'status': 'router_created', 'unit_id': unit_id, 'hostname': vm_name, 'password': vm_pass, 'slave': real_slave_name }
2017-10-19 11:55:09 -04:00
if json['type'] == 'bridge':
data = { 'unit_id': int(unit_id),
'type': 'vmbr',
'clientid': json['clientid'],
'clientemail': json['clientemail'],
'region': region_fullname,
'slave': slave_name,
'phyid': phy_id
}
#TODO: CREATE BRIDGE
response = { 'status': 'bridge_created', 'unit_id': unit_id, 'hostname': vm_name, 'password': vm_pass, 'slave': real_slave_name }
2016-11-03 00:05:51 -04:00
2017-09-20 20:07:56 -04:00
time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run
2016-11-03 14:13:47 -04:00
2017-10-19 11:55:09 -04:00
grid.create(data)
2016-02-15 05:30:43 -05:00
return response
2017-10-19 11:55:09 -04:00
def remove(json):
""" terminate an unit """
unit_type = json['type']
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
2017-09-20 20:07:56 -04:00
proxobject = auth(slave_name)
2017-10-19 11:55:09 -04:00
ioconfig.logger.info('%s[%s]> deleting %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
if unit_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(phy_id).delete()
grid.delete(json)
if unit_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(phy_id).delete()
grid.delete(json)
response = { 'status':'{}_deleted'.format(unit_type) }
2017-09-25 10:55:01 -04:00
return response
2017-09-20 20:07:56 -04:00
def query(json):
""" return the db info of an unit """
return grid.query(json)
2017-10-19 11:55:09 -04:00
def status(json):
""" returns the status of an unit """
unit_type = json['type']
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
2016-11-03 00:05:51 -04:00
proxobject = auth(slave_name)
2017-10-19 11:55:09 -04:00
#slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('%s[%s]> status of %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
if unit_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(phy_id).status.current.get()
if unit_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(phy_id).status.current.get()
2016-02-15 05:30:43 -05:00
return result
2017-10-19 11:55:09 -04:00
def start(json):
2016-02-15 05:30:43 -05:00
""" starts a machine """
unit_type = json['type']
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
2016-11-03 00:05:51 -04:00
proxobject = auth(slave_name)
2017-10-19 11:55:09 -04:00
#slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('%s[%s]> starting %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
if unit_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(phy_id).status.start.post()
if unit_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(phy_id).status.start.post()
2017-09-20 20:07:56 -04:00
#TODO: SET START AT BOOT FLAG
2016-02-16 14:12:12 -05:00
response = { 'status':'START' }
2016-02-15 05:30:43 -05:00
return response
2017-10-19 11:55:09 -04:00
def shutdown(json):
2016-02-15 05:30:43 -05:00
""" acpi shutdown the machine.. """
unit_type = json['type']
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
2016-11-03 00:05:51 -04:00
proxobject = auth(slave_name)
2017-10-19 11:55:09 -04:00
#slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('%s[%s]> acpi shutdown %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
2016-02-15 05:30:43 -05:00
2017-10-19 11:55:09 -04:00
if unit_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(phy_id).status.shutdown.post()
if unit_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(phy_id).status.shutdown.post()
2017-09-20 20:07:56 -04:00
#TODO: REMOVE START AT BOOT FLAG
2016-03-31 10:40:40 -04:00
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
2017-10-19 11:55:09 -04:00
response = { 'status':'SHUTDOWN', 'vmid':phy_id }
2016-02-15 05:30:43 -05:00
return response
2017-10-19 11:55:09 -04:00
def stop(json):
2016-02-15 05:30:43 -05:00
""" poweroff the machine.. """
unit_type = json['type']
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
2016-11-03 00:05:51 -04:00
proxobject = auth(slave_name)
2017-10-19 11:55:09 -04:00
#slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('%s[%s]> power off %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
2016-02-15 05:30:43 -05:00
2017-10-19 11:55:09 -04:00
if unit_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(phy_id).status.stop.post()
if unit_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(phy_id).status.stop.post()
2017-09-20 20:07:56 -04:00
#TODO: REMOVE START AT BOOT FLAG
2016-03-31 10:40:40 -04:00
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
2017-10-19 11:55:09 -04:00
response = { 'status':'STOP', 'vmid':phy_id }
2016-02-15 05:30:43 -05:00
return response
2017-10-19 11:55:09 -04:00
def suspend(json):
2016-02-15 05:30:43 -05:00
""" suspend machine """
unit_type = json['type']
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
2016-11-03 00:05:51 -04:00
proxobject = auth(slave_name)
2017-10-19 11:55:09 -04:00
#slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('%s[%s]> suspending %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
if unit_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(phy_id).status.suspend.post()
if unit_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(phy_id).status.suspend.post()
response = { 'status':'SUSPEND', 'vmid':phy_id }
2016-02-15 05:30:43 -05:00
return response
2017-10-19 11:55:09 -04:00
def resume(json):
2016-02-15 05:30:43 -05:00
""" resume machine """
unit_type = json['type']
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
2016-11-03 00:05:51 -04:00
proxobject = auth(slave_name)
2017-10-19 11:55:09 -04:00
#slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('%s[%s]> resuming %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
if unit_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(phy_id).status.resume.post()
if unit_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(phy_id).status.resume.post()
response = { 'status':'RESUME', 'vmid':phy_id }
2016-02-15 05:30:43 -05:00
return response
2017-10-19 11:55:09 -04:00
def vmrrd(json):
2016-06-26 11:09:22 -04:00
""" retrieve rrd graphs (PNG) """
unit_type = json['type']
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
2016-11-03 00:05:51 -04:00
proxobject = auth(slave_name)
2016-11-03 14:28:42 -04:00
proxobject.cluster.status.get()[0]['name']
2016-06-26 11:09:22 -04:00
result = {}
2017-10-19 11:55:09 -04:00
if unit_type == 'kvm':
statusquery = proxobject.nodes(slave_name).qemu(phy_id).status.current.get()
rcpu = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
rmem = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
rnet = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
rhdd = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
2016-11-11 09:56:35 -05:00
status = str(statusquery['qmpstatus'])
2016-06-26 11:09:22 -04:00
2017-10-19 11:55:09 -04:00
if unit_type == 'lxc':
status = proxobject.nodes(slave_name).lxc(phy_id).status.current.get()
rcpu = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
rmem = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
rnet = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
rhdd = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
2017-02-28 19:56:03 -05:00
status = str(statusquery['qmpstatus']) #TODO: maybe change this?
2017-10-19 11:55:09 -04:00
#ioconfig.logger.info('%s[%s]> rrd of %s %s (%s). status: %s' % (vm_owner, slave_name, unit_type, phy_id, vm_host, status))
2016-11-11 09:56:35 -05:00
response = { 'status':status, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd }
2016-06-26 11:54:27 -04:00
return response
2017-10-19 11:55:09 -04:00
def vmvnc(json):
2016-02-15 05:30:43 -05:00
""" invoke vnc ticket """
unit_type = json['type']
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
2016-11-03 00:05:51 -04:00
proxobject = auth(slave_name)
2017-10-19 11:55:09 -04:00
#slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('%s[%s]> invoking vnc ticket for %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
2016-02-15 05:30:43 -05:00
2017-10-19 11:55:09 -04:00
if unit_type == 'kvm':
ticket = proxobject.nodes(slave_name).qemu(phy_id).vncproxy.post(websocket=1)
#socket = proxobject.nodes(slave_name).qemu(phy_id).vncwebsocket.get(port=ticket['port'],
2016-02-15 05:30:43 -05:00
# vncticket=ticket['ticket'])
2017-10-19 11:55:09 -04:00
if unit_type == 'lxc':
ticket = proxobject.nodes(slave_name).lxc(phy_id).vncproxy.post()
#socket = proxobject.nodes(slave_name).lxc(phy_id).vncwebsocket.get(port=ticket['port'],
2016-02-15 05:30:43 -05:00
# vncticket=ticket['ticket'])
2016-11-03 00:05:51 -04:00
slaveip = ioconfig.parser.get(str(slave_name), 'ipv4')
2016-02-15 05:30:43 -05:00
#slaveport = socket['port']
slaveport = ticket['port']
2016-11-04 22:07:04 -04:00
slave_id = 1 #TODO: fix this
2016-11-11 09:56:35 -05:00
vnchost = ioconfig.parser.get('general', 'novnc_host')
2016-02-15 05:30:43 -05:00
listenport = str(int(slaveport) + 1000 + (int(slave_id) * 100)) #TODO: max 100 parallel connections/slave.
vnc_target = { 'target_host': slaveip,
'target_port': slaveport,
2016-11-11 09:56:35 -05:00
'listen_host': vnchost,
2016-02-15 06:14:19 -05:00
'listen_port': listenport
}
2016-07-03 21:33:16 -04:00
vnc_options = { 'idle-timeout': 20,
2016-11-11 09:56:35 -05:00
'verbose': True,
'cert': ioconfig.parser.get('general', 'ssl_cert'),
'key': ioconfig.parser.get('general', 'ssl_key'),
'ssl-only': True
2016-02-15 06:14:19 -05:00
}
2016-02-15 05:30:43 -05:00
novnc.spawn(vnc_target, vnc_options)
external_url = ioconfig.parser.get('general', 'novnc_url')
2016-11-11 09:56:35 -05:00
prefix = external_url + "?host=" + vnchost + "&port=" + listenport + "&view_only=false&encrypt=1&true_color=1&password="
2016-02-16 14:12:12 -05:00
vnc_url = prefix + ticket['ticket']
2016-02-15 05:30:43 -05:00
2017-09-20 20:07:56 -04:00
time.sleep(3) #wait few seconds for the parallel vncwebsocket
2017-07-19 15:28:26 -04:00
ioconfig.logger.info('{}[{}]> vnc port {} ready'.format(vm_owner, slave_name, listenport))
2016-06-26 11:09:22 -04:00
#response = { 'status':'VNC', 'fqdn':external_url, 'host':myip, 'port':listenport, 'encrypt':'0', 'true_color':'1', 'ticket':ticket['ticket'] }
response = { 'status':'VNC', 'url':vnc_url }
2016-11-11 09:56:35 -05:00
#print(vnc_url)
2016-02-15 05:30:43 -05:00
return response
2017-10-19 11:55:09 -04:00
#DEPRECATED
def vmlist(proxobject):
""" get unit list """
#we keep a single node proxmoxes so node id = 0
slave_name = proxobject.cluster.status.get()[0]['name']
query_kvm = proxobject.nodes(slave_name).qemu.get()
query_lxc = proxobject.nodes(slave_name).lxc.get()
for kvm_dict in query_kvm:
kvm_dict['vmtype'] = 'kvm'
for lxc_dict in query_lxc:
lxc_dict['vmtype'] = 'lxc'
vmlist = query_kvm + query_lxc #merge machine list
return vmlist
2016-02-15 05:30:43 -05:00
if __name__ == '__main__':
#internal module tests
2016-05-08 08:44:19 -04:00
time.sleep(1)
#vmvnc(656758)
2016-02-15 05:30:43 -05:00