proxmaster/plugin.py

303 lines
12 KiB
Python

#. -*- coding: utf-8 -
# required proxmox permissions: PVESysAdmin, PVEVMAdmin
#
# afx 2015-2016
# site
from proxmoxer import ProxmoxAPI
import base64
import json
import time
import socket
#local
import grid
import clientsdb
import utils
import ioconfig
import novnc
def auth(slave_id, masterip=None, enc_password=None):
""" captures slave we want to auth from the cache and extract the credentials """
adminuser = ioconfig.parser.get('general', 'adminuser')
if masterip is None:
result_slave = grid.query_slave_data(slave_id)
masterip = result_slave['masterip']
enc_password = result_slave['password']
adminpassword = base64.b64decode(enc_password).decode('ascii')
#vendor specific
#connection = lib_proxmoxia.Connector(masterip)
#auth_token = connection.get_auth_token(adminuser, adminpassword)
#proxobject = lib_proxmoxia.Proxmox(connection)
proxobject = ProxmoxAPI(masterip, user=adminuser, password=adminpassword, verify_ssl=False)
return proxobject
def vmlist(proxobject):
""" get vmlist """
#we keep a single node proxmoxes so node id = 0
#slave_name = proxobject.get('cluster/status')#'name']
slave_name = proxobject.cluster.status.get()[0]['name']
#query_kvm = proxobject.get('nodes/%s/qemu' % slave_name)
query_kvm = proxobject.nodes(slave_name).qemu.get()
query_lxc = proxobject.nodes(slave_name).lxc.get()
for kvm_dict in query_kvm:
kvm_dict['vmtype'] = 'kvm'
for lxc_dict in query_lxc:
lxc_dict['vmtype'] = 'lxc'
vmlist = query_kvm + query_lxc #merge machine list
return vmlist
def vmcreate(req):
""" create vm. returns JSON with data for whmcs """
grid.sync()
region_id = grid.query_region(req['region'])
if region_id == "-1":
logger.error('grid> no region found')
response = 'NO REGION FOUND'
return response
slave_id = str(grid.query_happiness(region_id))
vm_id = str(grid.generate_vmid())
vm_ipv4 = grid.generate_ipv4(region_id, req['vps_ipv4'])
vm_name = req['hostname']
vm_pass = req['vmpass']
client_id = req['clientid']
client_name = req['clientname']
proxobject = auth(slave_id) #we dont know the ip of slave_id so we leave the auth function to find it itself.
slave_name = proxobject.cluster.status.get()[0]['name']
ipv4_dict = {}
ipidx = 0
#ioconfig.logger.info('grid[' + slave_name + ']> recieved data: %s, %s, %s, %s, %s', region_id, slave_id, vm_id, vm_ipv4, req)
for ip in vm_ipv4:
ipv4_dict[str(ipidx)] = str(ip)
ipidx += 1
response = { 'status':'CREATE', 'vmid':vm_id, 'name':vm_name, 'password':vm_pass, 'ipv4_0':vm_ipv4[0] }
disk_filename = 'vm-' + vm_id + '-disk-1'
description = vm_name + ' (' + vm_id + ')\n'
description += 'owned by ' + client_name + ' (' + client_id + ')\n'
description += 'master ip: ' + vm_ipv4[0]
#create partition
image_name = 'vm-' + vm_id + '-disk-0'
local_storage = proxobject.nodes(slave_name).storage('lvm')
local_storage.content.post(vmid=vm_id,
filename=image_name,
size=req['vps_disk'] + 'G')
if req['vps_type'] == 'KVM':
create_result = proxobject.nodes(slave_name).qemu.post(vmid=vm_id,
name=vm_name,
sockets=1,
cores=req['vps_cpu'],
memory=req['vps_ram'],
virtio0='lvm:' + image_name,
ide1='skyblue:iso/' + req['vps_os'] + ',media=cdrom',
net0='e1000,bridge=pub',
onboot=1,
description=description)
if req['vps_type'] == 'LXC':
create_result = proxobject.nodes(slave_name).lxc.post(vmid=vm_id,
hostname=vm_name,
password=vm_pass,
sockets=1,
cores=req['vps_cpu'],
memory=req['vps_ram'],
virtio0='lvm:' + image_name,
ip_address=vm_ipv4[0],
onboot=1,
description=description)
#populate the client db
client_id = req['clientid']
client_name = req['clientname']
client_email = req['clientemail']
clientsdb.addclient(vm_id, vm_name, client_id, client_name, client_email, vm_pass)
#start the machihe
time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run
vmstart(vm_id)
return response
def vmstatus(vm_id):
""" returns the status of the machine """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('grid[%s]> get status of %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.current.get()
if vm_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(vm_id).status.current.get()
return result
def vmstart(vm_id):
""" starts a machine """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> starting %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.start.post()
if vm_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(vm_id).status.start.post()
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
response = { 'status':'START' }
return response
def vmshutdown(vm_id):
""" acpi shutdown the machine.. """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> acpi shutdown %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.stop.post()
if vm_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(vm_id).status.stop.post()
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
response = { 'status':'SHUTDOWN', 'vmid':vm_id }
return response
def vmstop(vm_id):
""" poweroff the machine.. """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> power off %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.stop.post()
if vm_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(vm_id).status.stop.post()
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
response = { 'status':'STOP', 'vmid':vm_id }
return response
def vmshutdown(vm_id):
""" graceful stop """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> acpi shutdown sent to %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.shutdown.post()
if vm_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(vm_id).status.shutdown.post()
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
response = { 'status':'SHUTDOWN', 'vmid':vm_id }
return response
def vmsuspend(vm_id):
""" suspend machine """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> suspending %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.suspend.post()
if vm_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(vm_id).status.suspend.post()
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
response = { 'status':'SUSPEND', 'vmid':vm_id }
return response
def vmresume(vm_id):
""" resume machine """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> resuming %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.resume.post()
if vm_type == 'lxc':
result = proxobject.nodes(slave_name).lxc(vm_id).status.resume.post()
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
response = { 'status':'RESUME', 'vmid':vm_id }
return response
def vmvnc(vm_id):
""" invoke vnc ticket """
slave_id, vm_type = grid.query_vm(vm_id)
proxobject = auth(slave_id)
vm_type = vm_type.lower()
slave_name = proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> invoking vnc ticket for %s %s' % (slave_name, vm_type, vm_id))
if vm_type == 'kvm':
ticket = proxobject.nodes(slave_name).qemu(vm_id).vncproxy.post(websocket=1)
#socket = proxobject.nodes(slave_name).qemu(vm_id).vncwebsocket.get(port=ticket['port'],
# vncticket=ticket['ticket'])
if vm_type == 'lxc':
ticket = proxobject.nodes(slave_name).lxc(vm_id).vncproxy.post()
#socket = proxobject.nodes(slave_name).lxc(vm_id).vncwebsocket.get(port=ticket['port'],
# vncticket=ticket['ticket'])
slaveip = grid.query_slave_data(slave_id)['masterip']
#slaveport = socket['port']
slaveport = ticket['port']
listenport = str(int(slaveport) + 1000 + (int(slave_id) * 100)) #TODO: max 100 parallel connections/slave.
myip = getmyip()
vnc_target = { 'target_host': slaveip,
'target_port': slaveport,
'listen_host': myip,
'listen_port': listenport
}
vnc_options = { 'idle-timeout': 10,
'verbose': True
}
novnc.spawn(vnc_target, vnc_options)
external_url = ioconfig.parser.get('general', 'novnc_url')
prefix = external_url + "/?host=" + myip + "&port=" + listenport + "&encrypt=0&true_color=1&password="
vnc_url = prefix + ticket['ticket']
ioconfig.logger.info('slave[{}]> {}'.format(slave_name, vnc_url))
response = { 'status':'VNC', 'fqdn':external_url, 'host':myip, 'port':listenport, 'encrypt':'0', 'true_color':'1', 'ticket':ticket['ticket'] }
return response
def getmyip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("gmail.com",80))
myip = s.getsockname()[0]
s.close
return myip
if __name__ == '__main__':
#internal module tests
time.sleep(30)