logger update

This commit is contained in:
deflax 2017-07-19 22:28:26 +03:00
parent f27f26de6f
commit 4ce4eea855
2 changed files with 10 additions and 11 deletions

View file

@ -23,7 +23,7 @@ def queryvm(req_cube_id):
""" returns slave_name, vm_id and vm_type for the requested cubeid """
data = querydb(req_cube_id)
#print(data)
return data['slave'], data['type'], data['vmid'], data['host'], data['email'], data['clientemail']
return data['slave'], data['type'], data['vmid'], data['host'], data['clientemail']
def querydb(cubeid):

View file

@ -162,7 +162,7 @@ def vmstatus(cubeid):
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> get status of %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
ioconfig.logger.info('%s[%s]> status of %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.current.get()
if vm_type == 'lxc':
@ -175,7 +175,7 @@ def vmstart(cubeid):
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> starting %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
ioconfig.logger.info('%s[%s]> starting %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.start.post()
if vm_type == 'lxc':
@ -189,7 +189,7 @@ def vmshutdown(cubeid):
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> acpi shutdown %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
ioconfig.logger.info('%s[%s]> acpi shutdown %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.shutdown.post()
@ -205,7 +205,7 @@ def vmstop(cubeid):
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> power off %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
ioconfig.logger.info('%s[%s]> power off %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.stop.post()
@ -221,7 +221,7 @@ def vmsuspend(cubeid):
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> suspending %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
ioconfig.logger.info('%s[%s]> suspending %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.suspend.post()
@ -236,7 +236,7 @@ def vmresume(cubeid):
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> resuming %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
ioconfig.logger.info('%s[%s]> resuming %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
result = proxobject.nodes(slave_name).qemu(vm_id).status.resume.post()
@ -269,7 +269,7 @@ def vmrrd(cubeid):
rhdd = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
status = str(statusquery['qmpstatus']) #TODO: maybe change this?
ioconfig.logger.info('slave[%s]> rrd of %s %s (%s). status: %s' % (slave_name, vm_type, vm_id, vm_host, status))
ioconfig.logger.info('%s[%s]> rrd of %s %s (%s). status: %s' % (vm_owner, slave_name, vm_type, vm_id, vm_host, status))
response = { 'status':status, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd }
return response
@ -280,7 +280,7 @@ def vmvnc(cubeid):
slave_name, vm_type, vm_id, vm_host, vm_owner = grid.queryvm(cubeid)
proxobject = auth(slave_name)
#slave_name = proxobject.c:luster.status.get()[0]['name']
ioconfig.logger.info('slave[%s]> invoking vnc ticket for %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
ioconfig.logger.info('%s[%s]> invoking vnc ticket for %s %s (%s)' % (vm_owner, slave_name, vm_type, vm_id, vm_host))
if vm_type == 'kvm':
ticket = proxobject.nodes(slave_name).qemu(vm_id).vncproxy.post(websocket=1)
@ -304,7 +304,6 @@ def vmvnc(cubeid):
'listen_host': vnchost,
'listen_port': listenport
}
vnc_options = { 'idle-timeout': 20,
'verbose': True,
'cert': ioconfig.parser.get('general', 'ssl_cert'),
@ -318,7 +317,7 @@ def vmvnc(cubeid):
prefix = external_url + "?host=" + vnchost + "&port=" + listenport + "&view_only=false&encrypt=1&true_color=1&password="
vnc_url = prefix + ticket['ticket']
ioconfig.logger.info('slave[{}]> vnc port {} ready'.format(slave_name, listenport))
ioconfig.logger.info('{}[{}]> vnc port {} ready'.format(vm_owner, slave_name, listenport))
#response = { 'status':'VNC', 'fqdn':external_url, 'host':myip, 'port':listenport, 'encrypt':'0', 'true_color':'1', 'ticket':ticket['ticket'] }
response = { 'status':'VNC', 'url':vnc_url }
#print(vnc_url)