2016-02-15 05:30:43 -05:00
|
|
|
#. -*- coding: utf-8 -
|
|
|
|
|
|
|
|
# site
|
|
|
|
from proxmoxer import ProxmoxAPI
|
|
|
|
import base64
|
|
|
|
import json
|
|
|
|
import time
|
|
|
|
import socket
|
2016-11-03 00:25:01 -04:00
|
|
|
import random
|
2016-05-08 08:44:19 -04:00
|
|
|
from unidecode import unidecode
|
2016-02-15 05:30:43 -05:00
|
|
|
|
|
|
|
#local
|
|
|
|
import grid
|
|
|
|
import utils
|
|
|
|
import ioconfig
|
|
|
|
import novnc
|
|
|
|
|
2016-11-03 00:05:51 -04:00
|
|
|
def auth(slave_name):
|
|
|
|
""" return control object from config slave names """
|
2016-02-15 05:30:43 -05:00
|
|
|
adminuser = ioconfig.parser.get('general', 'adminuser')
|
2016-11-03 00:05:51 -04:00
|
|
|
slaveip = ioconfig.parser.get(str(slave_name), 'ipv4')
|
|
|
|
slavepass = ioconfig.parser.get(str(slave_name), 'password')
|
2018-04-02 08:30:42 -04:00
|
|
|
proxobject = ProxmoxAPI(slaveip, user=adminuser, password=slavepass, verify_ssl=False)
|
|
|
|
return proxobject
|
2016-02-15 05:30:43 -05:00
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
def create(json):
|
|
|
|
""" create an unit. returns JSON with data """
|
2018-04-02 08:30:42 -04:00
|
|
|
region_name = json['region'] #just for the record. slaves are equal in the grid, as long as they are quieried
|
|
|
|
slave_name = json['slave']
|
2016-11-03 00:05:51 -04:00
|
|
|
proxobject = auth(slave_name)
|
2016-11-03 12:56:26 -04:00
|
|
|
real_slave_name = proxobject.cluster.status.get()[0]['name']
|
2017-10-27 11:17:01 -04:00
|
|
|
|
|
|
|
unit_id = int(time.time() * 10000 * 10000) #currently unit_id is just a timestamp
|
|
|
|
phy_id = grid.phyidgen(slave_name, json['type'])
|
2018-01-24 18:09:30 -05:00
|
|
|
if phy_id == 99999:
|
|
|
|
response = { 'status': 'phy_id_alloc_failed' }
|
|
|
|
return response
|
2017-10-27 11:17:01 -04:00
|
|
|
description = ' (' + str(unit_id) + ' - ' + str(phy_id) + ')\n' + 'owned by ' + json['clientemail'] + ' (' + json['clientid'] + ')\n'
|
|
|
|
|
|
|
|
if json['type'] == 'kvm':
|
|
|
|
vm_name_utf8 = json['hostname']
|
|
|
|
vm_name = unidecode(vm_name_utf8)
|
2017-10-19 11:55:09 -04:00
|
|
|
image_name = 'vm-' + str(phy_id) + '-disk-1'
|
2017-04-08 19:56:09 -04:00
|
|
|
try:
|
2018-02-04 13:30:32 -05:00
|
|
|
local_storage = proxobject.nodes(real_slave_name).storage('lvm')
|
2017-10-19 11:55:09 -04:00
|
|
|
storage_create_result = local_storage.content.post(vmid=phy_id, filename=image_name, size=json['hdd'] + 'G')
|
|
|
|
ioconfig.logger.info('%s[%s]> allocated %s as %s' % (json['clientemail'], slave_name, json['hdd'], image_name))
|
2017-04-08 19:56:09 -04:00
|
|
|
except:
|
2017-10-19 11:55:09 -04:00
|
|
|
ioconfig.logger.info('%s[%s]> unable to allocate %s' % (json['clientemail'], slave_name, image_name))
|
2017-10-27 11:17:01 -04:00
|
|
|
response = { 'status':'vol_alloc_failed' }
|
2017-04-08 19:56:09 -04:00
|
|
|
return response
|
2017-10-27 11:17:01 -04:00
|
|
|
try:
|
|
|
|
create_result = proxobject.nodes(real_slave_name).qemu.post(vmid=int(phy_id),
|
|
|
|
name=vm_name,
|
|
|
|
onboot=1,
|
|
|
|
sockets=1,
|
|
|
|
cores=json['cpu'],
|
|
|
|
memory=json['mem'],
|
|
|
|
scsihw='virtio-scsi-pci',
|
2018-02-04 13:30:32 -05:00
|
|
|
scsi0='file=lvm:' + image_name + ',discard=on',
|
2018-01-31 19:40:23 -05:00
|
|
|
net0='virtio=8A:32:CD:E4:EE:11,bridge=' + json['net0if'] + ',tag=' + str(phy_id),
|
2017-10-27 11:17:01 -04:00
|
|
|
description=description)
|
2017-12-14 18:00:17 -05:00
|
|
|
except Exception as e:
|
|
|
|
print(e)
|
2017-11-05 15:21:06 -05:00
|
|
|
response = { 'status': 'kvm_create_failed' }
|
|
|
|
return response
|
2017-10-19 11:55:09 -04:00
|
|
|
data = { 'unit_id': int(unit_id),
|
|
|
|
'type': 'kvm',
|
|
|
|
'clientid': json['clientid'],
|
|
|
|
'clientemail': json['clientemail'],
|
|
|
|
'hostname': vm_name,
|
2017-10-27 11:17:01 -04:00
|
|
|
'region': region_name,
|
2018-01-24 18:09:30 -05:00
|
|
|
'slave': real_slave_name,
|
2017-11-05 15:21:06 -05:00
|
|
|
'phy_id': phy_id,
|
2017-10-27 11:17:01 -04:00
|
|
|
'net0if': json['net0if']
|
2017-10-19 11:55:09 -04:00
|
|
|
}
|
2017-10-27 11:17:01 -04:00
|
|
|
grid.create(data)
|
2018-01-24 18:09:30 -05:00
|
|
|
response = { 'status': 'kvm_created', 'unit_id': unit_id, 'region': region_name, 'slave': real_slave_name, 'vlanid': phy_id}
|
2017-10-19 11:55:09 -04:00
|
|
|
|
2017-10-27 11:17:01 -04:00
|
|
|
if json['type'] == 'lxc':
|
|
|
|
vm_name_utf8 = json['hostname']
|
|
|
|
vm_name = unidecode(vm_name_utf8)
|
|
|
|
image_name = 'vm-' + str(phy_id) + '-disk-1'
|
2017-12-14 18:00:17 -05:00
|
|
|
#try:
|
2018-02-04 13:30:32 -05:00
|
|
|
# local_storage = proxobject.nodes(real_slave_name).storage('lvm')
|
2017-12-14 18:00:17 -05:00
|
|
|
# storage_create_result = local_storage.content.post(vmid=phy_id, filename=image_name, size=json['hdd'] + 'G')
|
|
|
|
# ioconfig.logger.info('%s[%s]> allocated %s as %s' % (json['clientemail'], slave_name, json['hdd'], image_name))
|
|
|
|
#except:
|
|
|
|
# ioconfig.logger.info('%s[%s]> unable to allocate %s' % (json['clientemail'], slave_name, image_name))
|
|
|
|
# response = { 'status':'vol_alloc_failed' }
|
|
|
|
# return response
|
2017-10-27 11:17:01 -04:00
|
|
|
try:
|
|
|
|
vm_pass = json['rootpass']
|
|
|
|
except:
|
|
|
|
vm_pass = '!%%^)@&&(K3B'
|
|
|
|
try:
|
|
|
|
create_result = proxobject.nodes(real_slave_name).lxc.post(vmid=int(phy_id),
|
|
|
|
hostname=vm_name,
|
|
|
|
onboot=1,
|
|
|
|
unprivileged=1,
|
|
|
|
password=vm_pass,
|
|
|
|
cores=json['cpu'],
|
|
|
|
memory=json['mem'],
|
2017-12-14 18:00:17 -05:00
|
|
|
net0='name=eth0,bridge=' + json['net0if'] + ',ip=' + json['net0ip'] + '/' + json['net0mask'],
|
|
|
|
net1='name=eth1,bridge=' + json['net1if'] + ',ip=' + json['net1ip'] + '/' + json['net1mask'] + ',gw=' + json['net1gw'],
|
2017-10-27 11:17:01 -04:00
|
|
|
ostemplate='backup:vztmpl/debian-9.0-standard_9.0-2_amd64.tar.gz',
|
2018-02-04 13:30:32 -05:00
|
|
|
rootfs='volume=lvm:' + str(json['hdd']),
|
2017-10-27 11:17:01 -04:00
|
|
|
swap=32,
|
|
|
|
description=description)
|
|
|
|
except:
|
|
|
|
return { 'status': 'lxc_create_failed' }
|
2017-10-19 11:55:09 -04:00
|
|
|
data = { 'unit_id': int(unit_id),
|
|
|
|
'type': 'lxc',
|
|
|
|
'clientid': json['clientid'],
|
|
|
|
'clientemail': json['clientemail'],
|
|
|
|
'hostname': vm_name,
|
2017-10-27 11:17:01 -04:00
|
|
|
'region': region_name,
|
2017-10-19 11:55:09 -04:00
|
|
|
'slave': slave_name,
|
2017-11-05 15:21:06 -05:00
|
|
|
'phy_id': phy_id,
|
2017-10-27 11:17:01 -04:00
|
|
|
'net0if': json['net0if']
|
2017-10-19 11:55:09 -04:00
|
|
|
}
|
2017-10-27 11:17:01 -04:00
|
|
|
grid.create(data)
|
|
|
|
response = { 'status': 'lxc_created', 'unit_id': unit_id, 'hostname': vm_name, 'region': region_name, 'slave': real_slave_name }
|
2017-10-19 11:55:09 -04:00
|
|
|
|
2018-01-24 18:09:30 -05:00
|
|
|
#if json['type'] == 'br':
|
|
|
|
# try:
|
|
|
|
# create_result = proxobject.nodes(real_slave_name).network.post(iface='vmbr' + str(phy_id),
|
|
|
|
# type='bridge',
|
|
|
|
# autostart=1)
|
|
|
|
# except Exception as e:
|
|
|
|
# print(e)
|
|
|
|
# return { 'status': 'br_create_failed' }
|
|
|
|
# data = { 'unit_id': int(unit_id),
|
|
|
|
# 'type': 'br',
|
|
|
|
# 'clientid': json['clientid'],
|
|
|
|
# 'clientemail': json['clientemail'],
|
|
|
|
# 'region': region_name,
|
|
|
|
# 'slave': slave_name,
|
|
|
|
# 'phy_id': phy_id
|
|
|
|
# }
|
|
|
|
# grid.create(data)
|
|
|
|
# response = { 'status': 'bridge_created', 'unit_id': unit_id, 'region': region_name, 'slave': real_slave_name, 'phy_id': phy_id }
|
2016-11-03 14:13:47 -04:00
|
|
|
|
2016-02-15 05:30:43 -05:00
|
|
|
return response
|
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
def remove(json):
|
|
|
|
""" terminate an unit """
|
2017-10-24 11:47:37 -04:00
|
|
|
unit_type = json['type']
|
|
|
|
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
|
2017-09-20 20:07:56 -04:00
|
|
|
proxobject = auth(slave_name)
|
2017-10-19 11:55:09 -04:00
|
|
|
ioconfig.logger.info('%s[%s]> deleting %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
|
|
|
if unit_type == 'kvm':
|
|
|
|
result = proxobject.nodes(slave_name).qemu(phy_id).delete()
|
|
|
|
grid.delete(json)
|
|
|
|
if unit_type == 'lxc':
|
|
|
|
result = proxobject.nodes(slave_name).lxc(phy_id).delete()
|
|
|
|
grid.delete(json)
|
2017-10-24 11:47:37 -04:00
|
|
|
response = { 'status':'{}_deleted'.format(unit_type) }
|
2017-09-25 10:55:01 -04:00
|
|
|
return response
|
2017-09-20 20:07:56 -04:00
|
|
|
|
2017-10-24 11:47:37 -04:00
|
|
|
def query(json):
|
|
|
|
""" return the db info of an unit """
|
2017-10-27 11:17:01 -04:00
|
|
|
query = grid.query(json)
|
|
|
|
query['status'] = 'query_success'
|
|
|
|
return query
|
2017-10-24 11:47:37 -04:00
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
def status(json):
|
|
|
|
""" returns the status of an unit """
|
2017-10-24 11:47:37 -04:00
|
|
|
unit_type = json['type']
|
|
|
|
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
|
2016-11-03 00:05:51 -04:00
|
|
|
proxobject = auth(slave_name)
|
2017-10-19 11:55:09 -04:00
|
|
|
#slave_name = proxobject.cluster.status.get()[0]['name']
|
|
|
|
ioconfig.logger.info('%s[%s]> status of %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
|
|
|
if unit_type == 'kvm':
|
|
|
|
result = proxobject.nodes(slave_name).qemu(phy_id).status.current.get()
|
|
|
|
if unit_type == 'lxc':
|
|
|
|
result = proxobject.nodes(slave_name).lxc(phy_id).status.current.get()
|
2016-02-15 05:30:43 -05:00
|
|
|
return result
|
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
def start(json):
|
2016-02-15 05:30:43 -05:00
|
|
|
""" starts a machine """
|
2017-10-24 11:47:37 -04:00
|
|
|
unit_type = json['type']
|
|
|
|
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
|
2016-11-03 00:05:51 -04:00
|
|
|
proxobject = auth(slave_name)
|
2017-10-19 11:55:09 -04:00
|
|
|
#slave_name = proxobject.cluster.status.get()[0]['name']
|
|
|
|
ioconfig.logger.info('%s[%s]> starting %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
|
|
|
if unit_type == 'kvm':
|
|
|
|
result = proxobject.nodes(slave_name).qemu(phy_id).status.start.post()
|
|
|
|
if unit_type == 'lxc':
|
|
|
|
result = proxobject.nodes(slave_name).lxc(phy_id).status.start.post()
|
2017-09-20 20:07:56 -04:00
|
|
|
#TODO: SET START AT BOOT FLAG
|
2016-02-16 14:12:12 -05:00
|
|
|
response = { 'status':'START' }
|
2016-02-15 05:30:43 -05:00
|
|
|
return response
|
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
def shutdown(json):
|
2016-02-15 05:30:43 -05:00
|
|
|
""" acpi shutdown the machine.. """
|
2017-10-24 11:47:37 -04:00
|
|
|
unit_type = json['type']
|
|
|
|
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
|
2016-11-03 00:05:51 -04:00
|
|
|
proxobject = auth(slave_name)
|
2017-10-19 11:55:09 -04:00
|
|
|
#slave_name = proxobject.cluster.status.get()[0]['name']
|
|
|
|
ioconfig.logger.info('%s[%s]> acpi shutdown %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
2016-02-15 05:30:43 -05:00
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
if unit_type == 'kvm':
|
|
|
|
result = proxobject.nodes(slave_name).qemu(phy_id).status.shutdown.post()
|
|
|
|
if unit_type == 'lxc':
|
|
|
|
result = proxobject.nodes(slave_name).lxc(phy_id).status.shutdown.post()
|
2017-09-20 20:07:56 -04:00
|
|
|
#TODO: REMOVE START AT BOOT FLAG
|
2016-03-31 10:40:40 -04:00
|
|
|
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
|
2017-10-19 11:55:09 -04:00
|
|
|
response = { 'status':'SHUTDOWN', 'vmid':phy_id }
|
2016-02-15 05:30:43 -05:00
|
|
|
return response
|
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
def stop(json):
|
2016-02-15 05:30:43 -05:00
|
|
|
""" poweroff the machine.. """
|
2017-10-24 11:47:37 -04:00
|
|
|
unit_type = json['type']
|
|
|
|
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
|
2016-11-03 00:05:51 -04:00
|
|
|
proxobject = auth(slave_name)
|
2017-10-19 11:55:09 -04:00
|
|
|
#slave_name = proxobject.cluster.status.get()[0]['name']
|
|
|
|
ioconfig.logger.info('%s[%s]> power off %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
2016-02-15 05:30:43 -05:00
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
if unit_type == 'kvm':
|
|
|
|
result = proxobject.nodes(slave_name).qemu(phy_id).status.stop.post()
|
|
|
|
if unit_type == 'lxc':
|
|
|
|
result = proxobject.nodes(slave_name).lxc(phy_id).status.stop.post()
|
2017-09-20 20:07:56 -04:00
|
|
|
#TODO: REMOVE START AT BOOT FLAG
|
2016-03-31 10:40:40 -04:00
|
|
|
#ioconfig.logger.info('slave[{}]> {}'.format(slave_name, result))
|
2017-10-19 11:55:09 -04:00
|
|
|
response = { 'status':'STOP', 'vmid':phy_id }
|
2016-02-15 05:30:43 -05:00
|
|
|
return response
|
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
def suspend(json):
|
2016-02-15 05:30:43 -05:00
|
|
|
""" suspend machine """
|
2017-10-24 11:47:37 -04:00
|
|
|
unit_type = json['type']
|
|
|
|
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
|
2016-11-03 00:05:51 -04:00
|
|
|
proxobject = auth(slave_name)
|
2017-10-19 11:55:09 -04:00
|
|
|
#slave_name = proxobject.cluster.status.get()[0]['name']
|
|
|
|
ioconfig.logger.info('%s[%s]> suspending %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
|
|
|
|
|
|
|
if unit_type == 'kvm':
|
|
|
|
result = proxobject.nodes(slave_name).qemu(phy_id).status.suspend.post()
|
|
|
|
if unit_type == 'lxc':
|
|
|
|
result = proxobject.nodes(slave_name).lxc(phy_id).status.suspend.post()
|
|
|
|
response = { 'status':'SUSPEND', 'vmid':phy_id }
|
2016-02-15 05:30:43 -05:00
|
|
|
return response
|
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
def resume(json):
|
2016-02-15 05:30:43 -05:00
|
|
|
""" resume machine """
|
2017-10-24 11:47:37 -04:00
|
|
|
unit_type = json['type']
|
|
|
|
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
|
2016-11-03 00:05:51 -04:00
|
|
|
proxobject = auth(slave_name)
|
2017-10-19 11:55:09 -04:00
|
|
|
#slave_name = proxobject.cluster.status.get()[0]['name']
|
|
|
|
ioconfig.logger.info('%s[%s]> resuming %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
|
|
|
|
|
|
|
if unit_type == 'kvm':
|
|
|
|
result = proxobject.nodes(slave_name).qemu(phy_id).status.resume.post()
|
|
|
|
if unit_type == 'lxc':
|
|
|
|
result = proxobject.nodes(slave_name).lxc(phy_id).status.resume.post()
|
|
|
|
response = { 'status':'RESUME', 'vmid':phy_id }
|
2016-02-15 05:30:43 -05:00
|
|
|
return response
|
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
def vmrrd(json):
|
2016-06-26 11:09:22 -04:00
|
|
|
""" retrieve rrd graphs (PNG) """
|
2017-10-24 11:47:37 -04:00
|
|
|
unit_type = json['type']
|
|
|
|
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
|
2016-11-03 00:05:51 -04:00
|
|
|
proxobject = auth(slave_name)
|
2016-11-03 14:28:42 -04:00
|
|
|
proxobject.cluster.status.get()[0]['name']
|
2016-06-26 11:09:22 -04:00
|
|
|
|
|
|
|
result = {}
|
2017-10-19 11:55:09 -04:00
|
|
|
if unit_type == 'kvm':
|
|
|
|
statusquery = proxobject.nodes(slave_name).qemu(phy_id).status.current.get()
|
|
|
|
rcpu = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
|
|
|
|
rmem = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
|
|
|
|
rnet = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
|
|
|
|
rhdd = proxobject.nodes(slave_name).qemu(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
|
2016-11-11 09:56:35 -05:00
|
|
|
status = str(statusquery['qmpstatus'])
|
2016-06-26 11:09:22 -04:00
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
if unit_type == 'lxc':
|
2017-11-05 15:21:06 -05:00
|
|
|
statusquery = proxobject.nodes(slave_name).lxc(phy_id).status.current.get()
|
2017-10-19 11:55:09 -04:00
|
|
|
rcpu = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
|
|
|
|
rmem = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
|
|
|
|
rnet = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
|
|
|
|
rhdd = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
|
2017-02-28 19:56:03 -05:00
|
|
|
status = str(statusquery['qmpstatus']) #TODO: maybe change this?
|
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
#ioconfig.logger.info('%s[%s]> rrd of %s %s (%s). status: %s' % (vm_owner, slave_name, unit_type, phy_id, vm_host, status))
|
2016-11-11 09:56:35 -05:00
|
|
|
|
|
|
|
response = { 'status':status, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd }
|
2016-06-26 11:54:27 -04:00
|
|
|
return response
|
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
def vmvnc(json):
|
2016-02-15 05:30:43 -05:00
|
|
|
""" invoke vnc ticket """
|
2017-10-24 11:47:37 -04:00
|
|
|
unit_type = json['type']
|
|
|
|
slave_name, phy_id, vm_host, vm_owner = grid.query(json)
|
2016-11-03 00:05:51 -04:00
|
|
|
proxobject = auth(slave_name)
|
2017-10-19 11:55:09 -04:00
|
|
|
#slave_name = proxobject.cluster.status.get()[0]['name']
|
|
|
|
ioconfig.logger.info('%s[%s]> invoking vnc ticket for %s %s (%s)' % (vm_owner, slave_name, unit_type, phy_id, vm_host))
|
2016-02-15 05:30:43 -05:00
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
if unit_type == 'kvm':
|
|
|
|
ticket = proxobject.nodes(slave_name).qemu(phy_id).vncproxy.post(websocket=1)
|
|
|
|
#socket = proxobject.nodes(slave_name).qemu(phy_id).vncwebsocket.get(port=ticket['port'],
|
2016-02-15 05:30:43 -05:00
|
|
|
# vncticket=ticket['ticket'])
|
2017-10-19 11:55:09 -04:00
|
|
|
if unit_type == 'lxc':
|
|
|
|
ticket = proxobject.nodes(slave_name).lxc(phy_id).vncproxy.post()
|
|
|
|
#socket = proxobject.nodes(slave_name).lxc(phy_id).vncwebsocket.get(port=ticket['port'],
|
2016-02-15 05:30:43 -05:00
|
|
|
# vncticket=ticket['ticket'])
|
|
|
|
|
2016-11-03 00:05:51 -04:00
|
|
|
slaveip = ioconfig.parser.get(str(slave_name), 'ipv4')
|
2016-02-15 05:30:43 -05:00
|
|
|
#slaveport = socket['port']
|
|
|
|
slaveport = ticket['port']
|
2016-11-11 09:56:35 -05:00
|
|
|
|
|
|
|
vnchost = ioconfig.parser.get('general', 'novnc_host')
|
2018-03-09 21:40:58 -05:00
|
|
|
listenport = random.randint(7000, 9999)
|
|
|
|
#listenport = random.randint(7000, 7001)
|
|
|
|
|
2016-02-15 05:30:43 -05:00
|
|
|
|
|
|
|
vnc_target = { 'target_host': slaveip,
|
|
|
|
'target_port': slaveport,
|
2016-11-11 09:56:35 -05:00
|
|
|
'listen_host': vnchost,
|
2016-02-15 06:14:19 -05:00
|
|
|
'listen_port': listenport
|
|
|
|
}
|
2016-07-03 21:33:16 -04:00
|
|
|
vnc_options = { 'idle-timeout': 20,
|
2016-11-11 09:56:35 -05:00
|
|
|
'verbose': True,
|
|
|
|
'cert': ioconfig.parser.get('general', 'ssl_cert'),
|
|
|
|
'key': ioconfig.parser.get('general', 'ssl_key'),
|
|
|
|
'ssl-only': True
|
2016-02-15 06:14:19 -05:00
|
|
|
}
|
2016-02-15 05:30:43 -05:00
|
|
|
|
|
|
|
novnc.spawn(vnc_target, vnc_options)
|
|
|
|
|
|
|
|
external_url = ioconfig.parser.get('general', 'novnc_url')
|
2018-03-09 21:40:58 -05:00
|
|
|
prefix = external_url + "?host=" + vnchost + "&port=" + str(listenport) + "&view_only=false&encrypt=1&true_color=1&password="
|
2016-02-16 14:12:12 -05:00
|
|
|
vnc_url = prefix + ticket['ticket']
|
2016-02-15 05:30:43 -05:00
|
|
|
|
2017-09-20 20:07:56 -04:00
|
|
|
time.sleep(3) #wait few seconds for the parallel vncwebsocket
|
2018-03-09 21:40:58 -05:00
|
|
|
ioconfig.logger.info('{}[{}]> vnc port {} ready'.format(vm_owner, slave_name, str(listenport)))
|
2016-06-26 11:09:22 -04:00
|
|
|
#response = { 'status':'VNC', 'fqdn':external_url, 'host':myip, 'port':listenport, 'encrypt':'0', 'true_color':'1', 'ticket':ticket['ticket'] }
|
|
|
|
response = { 'status':'VNC', 'url':vnc_url }
|
2016-11-11 09:56:35 -05:00
|
|
|
#print(vnc_url)
|
2016-02-15 05:30:43 -05:00
|
|
|
return response
|
|
|
|
|
2017-10-19 11:55:09 -04:00
|
|
|
#DEPRECATED
|
|
|
|
def vmlist(proxobject):
|
|
|
|
""" get unit list """
|
|
|
|
#we keep a single node proxmoxes so node id = 0
|
|
|
|
slave_name = proxobject.cluster.status.get()[0]['name']
|
|
|
|
query_kvm = proxobject.nodes(slave_name).qemu.get()
|
|
|
|
query_lxc = proxobject.nodes(slave_name).lxc.get()
|
|
|
|
for kvm_dict in query_kvm:
|
|
|
|
kvm_dict['vmtype'] = 'kvm'
|
|
|
|
for lxc_dict in query_lxc:
|
|
|
|
lxc_dict['vmtype'] = 'lxc'
|
|
|
|
vmlist = query_kvm + query_lxc #merge machine list
|
|
|
|
return vmlist
|
2016-02-15 05:30:43 -05:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
#internal module tests
|
2016-05-08 08:44:19 -04:00
|
|
|
time.sleep(1)
|
2017-07-19 15:15:51 -04:00
|
|
|
#vmvnc(656758)
|
2016-02-15 05:30:43 -05:00
|
|
|
|