vnc ssl support

This commit is contained in:
deflax 2016-11-11 16:56:35 +02:00
parent 4cb5d21bad
commit 45a4f722f8
7 changed files with 85 additions and 40 deletions

View file

@ -10,8 +10,10 @@ Python RESTful API for managing a grid of vm slaves
- config.ini.dist
- nginx_example_vhost.txt
5. make sure this iptables rule is included:
iptables -A tcp_inbound -p TCP --match multiport --dports 6900:8000 -j ACCEPT #vnc range
6. o/
iptables -A tcp_inbound -p TCP --match multiport --dports 6900:8000 -j ACCEPT #vnc range
6. generate self signed cert for ssl:
openssl req -new -x509 -days 365 -nodes -out self.pem -keyout self.pem
7. o/
```
##Proxmox permissions:
1. Datacenter -> Permissions -> Add -> User Permission

View file

@ -8,7 +8,10 @@ adminuser = masteradmin@pve
apipass = sakdlsadas
vmid_min = 1000
vmid_max = 999999
novnc_url = http://#/novnc/vnc_auto.html
novnc_url = https://panel.example.com/novnc/vnc_auto.html
ssl_cert = /etc/letsencrypt/live/api.example.com/fullchain.pem
ssl_key = /etc/letsencrypt/live/api.example.com/privkey.pem
[Region]
regionid = 0

11
grid.py
View file

@ -19,10 +19,10 @@ logger = ioconfig.logger
config = ioconfig.parser
def query_vm(req_cube_id):
def queryvm(req_cube_id):
""" returns slave_name, vm_id and vm_type for the requested cubeid """
data = querydb(req_cube_id)
print(data)
#print(data)
return data['slave'], data['type'], data['vmid']
@ -33,7 +33,7 @@ def querydb(cubeid):
dbf = open(dbfile, 'r')
data = json.load(dbf)
dbf.close()
logger.info('{}> <-- {}'.format(dbfile, data))
#logger.info('{}> --> {}'.format(dbfile, data))
return data
except Exception as e:
logger.critical('{}> '.format(e))
@ -95,11 +95,14 @@ def generate_ipv4(region_name, how_many=1):
region_ips = []
for ip in region_ipset:
region_ips.append(ip)
ip_min = 0
ip_max = len(region_ips) - 1
tested_ips = [] #initialize ip cache
requested_ips = []
#all_ips = utils.find_rec(grid_data, 'ipaddr') #TODO: we cant list that for KVM so we should use another approach. perhaps as separate macaddress - ipaddress table which we will use to manipulate a static lease dhcp server. at this point this function is useless because plugins do not or cant track the address of the actual machine. proxmaster should divide each region to segments and each segment should export a static lease config which will be quieried when we search for unused addresses.
all_ips = [] #TODO: replace with db searching function
for ips in range(int(how_many)):
@ -351,5 +354,5 @@ if __name__ == '__main__':
#print(query_happiness(0))
#print(generate_ipv4(0,3))
#print(generate_vmid())
print(query_vm(147344))
print(queryvm(147344))

26
nginx-vhost-ssl.txt Normal file
View file

@ -0,0 +1,26 @@
server {
listen 443 ssl;
server_name api.example.com;
ssl_certificate /etc/letsencrypt/live/api.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/api.example.com/privkey.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_dhparam /etc/letsencrypt/dhparam.pem;
ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_stapling on;
ssl_stapling_verify on;
add_header Strict-Transport-Security max-age=15768000;
location / {
uwsgi_pass 127.0.0.1:5117;
include uwsgi_params;
uwsgi_param UWSGI_SCRIPT proxmaster;
uwsgi_param UWSGI_PYHOME /home/master/proxmaster;
}
}

11
nginx-vhost.txt Normal file
View file

@ -0,0 +1,11 @@
server {
listen 80;
server_name api.example.com;
root /var/www/html;
location / {
}
}

View file

@ -1,10 +0,0 @@
server {
listen 80;
server_name EXAMPLE.com;
location / {
uwsgi_pass 127.0.0.1:5117;
include uwsgi_params;
uwsgi_param UWSGI_SCRIPT proxmaster;
uwsgi_param UWSGI_PYHOME /home/USER/proxmaster;
}
}

View file

@ -1,5 +1,5 @@
#. -*- coding: utf-8 -
# required proxmox permissions: PVESysAdmin, PVEVMAdmin
# required proxmox permissions: PVEAdmin
#
# afx 2015-2016
@ -103,7 +103,7 @@ def vmcreate(req):
proxobject = auth(slave_name)
real_slave_name = proxobject.cluster.status.get()[0]['name']
print(real_slave_name)
#print(real_slave_name)
description = vm_name + ' (' + str(cubeid) + '/' + str(vm_id) + ')\n' + 'owned by ' + req['clientname'] + ' (' + req['clientid'] + ')\n' + 'master ip: ' + ipv4_list[0]
@ -139,11 +139,11 @@ def vmcreate(req):
description=description)
#TODO: setup based onreq['vps_recipe']
#TODO: setup based on req['vps_recipe']
#start the machihe
#time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run
#vmstart(cubeid)
print(str(create_result))
#print(str(create_result))
response = { 'status':'CREATE', 'cube':cubeid, 'name':vm_name, 'password':vm_pass, 'ipv4_0':ipv4_list[0] }
grid.writedb(deploy)
@ -152,7 +152,7 @@ def vmcreate(req):
def vmstatus(cubeid):
""" returns the status of the machine """
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> get status of %s %s' % (slave_name, vm_type, vm_id))
@ -165,7 +165,7 @@ def vmstatus(cubeid):
def vmstart(cubeid):
""" starts a machine """
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> starting %s %s' % (slave_name, vm_type, vm_id))
@ -179,7 +179,7 @@ def vmstart(cubeid):
def vmshutdown(cubeid):
""" acpi shutdown the machine.. """
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> acpi shutdown %s %s' % (slave_name, vm_type, vm_id))
@ -195,7 +195,7 @@ def vmshutdown(cubeid):
def vmstop(cubeid):
""" poweroff the machine.. """
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> power off %s %s' % (slave_name, vm_type, vm_id))
@ -211,7 +211,7 @@ def vmstop(cubeid):
def vmsuspend(cubeid):
""" suspend machine """
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> suspending %s %s' % (slave_name, vm_type, vm_id))
@ -226,7 +226,7 @@ def vmsuspend(cubeid):
def vmresume(cubeid):
""" resume machine """
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> resuming %s %s' % (slave_name, vm_type, vm_id))
@ -241,30 +241,35 @@ def vmresume(cubeid):
def vmrrd(cubeid):
""" retrieve rrd graphs (PNG) """
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
proxobject = auth(slave_name)
proxobject.cluster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> query rrd of %s %s' % (slave_name, vm_type, vm_id))
result = {}
if vm_type == 'kvm':
statusquery = proxobject.nodes(slave_name).qemu(vm_id).status.current.get()
rcpu = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
rmem = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
rnet = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
rhdd = proxobject.nodes(slave_name).qemu(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
status = str(statusquery['qmpstatus'])
if vm_type == 'lxc':
status = proxobject.nodes(slave_name).lxc(vm_id).status.current.get()
rcpu = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
rmem = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
rnet = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
rhdd = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
response = { 'status':'RRD', 'vmid':vm_id, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd }
status = star(statusquery['qmpstatus']) #TODO: maybe change this?
response = { 'status':status, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd }
return response
def vmvnc(cubeid):
""" invoke vnc ticket """
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> invoking vnc ticket for %s %s' % (slave_name, vm_type, vm_id))
@ -282,37 +287,42 @@ def vmvnc(cubeid):
#slaveport = socket['port']
slaveport = ticket['port']
slave_id = 1 #TODO: fix this
vnchost = ioconfig.parser.get('general', 'novnc_host')
listenport = str(int(slaveport) + 1000 + (int(slave_id) * 100)) #TODO: max 100 parallel connections/slave.
myip = getmyip()
vnc_target = { 'target_host': slaveip,
'target_port': slaveport,
'listen_host': myip,
'listen_host': vnchost,
'listen_port': listenport
}
vnc_options = { 'idle-timeout': 20,
'verbose': True
'verbose': True,
'cert': ioconfig.parser.get('general', 'ssl_cert'),
'key': ioconfig.parser.get('general', 'ssl_key'),
'ssl-only': True
}
novnc.spawn(vnc_target, vnc_options)
external_url = ioconfig.parser.get('general', 'novnc_url')
prefix = external_url + "?host=" + myip + "&port=" + listenport + "&encrypt=0&true_color=1&password="
prefix = external_url + "?host=" + vnchost + "&port=" + listenport + "&view_only=false&encrypt=1&true_color=1&password="
vnc_url = prefix + ticket['ticket']
ioconfig.logger.info('slave[{}]> vnc port {} ready'.format(slave_name, listenport))
#response = { 'status':'VNC', 'fqdn':external_url, 'host':myip, 'port':listenport, 'encrypt':'0', 'true_color':'1', 'ticket':ticket['ticket'] }
response = { 'status':'VNC', 'url':vnc_url }
#print(vnc_url)
return response
def getmyip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("gmail.com",80))
myip = s.getsockname()[0]
s.close
return myip
#def getmyip():
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# s.connect(("gmail.com",80))
# myip = s.getsockname()[0]
# s.close
# return myip
if __name__ == '__main__':