add vm_host to system log
This commit is contained in:
parent
ae3f806036
commit
1b6e7e629e
4 changed files with 38 additions and 32 deletions
|
@ -20,7 +20,7 @@ start:
|
|||
2. @reboot /usr/bin/screen -dmS proxmaster /home/master/proxmaster/start.sh
|
||||
|
||||
```
|
||||
##Proxmox permissions:
|
||||
##Proxmox slave install instructions:
|
||||
1. Datacenter -> Permissions -> Add -> User Permission
|
||||
2. Path: / User: masteradmin@pve / Role: PVEAdmin
|
||||
3. $IPT -A tcp_inbound -p TCP -s $PROXIP -j ACCEPT #enable proxmaster
|
||||
|
@ -28,3 +28,5 @@ start:
|
|||
5. ID: lvm / Volume Group: vm / Content: Disk image, Container
|
||||
6. Datacenter -> Storage -> Add -> NFS
|
||||
7. ID: skyblue / Server: 1.2.3.5 / Export: /srv/proxmox/slavename / Content: ISO Image, VZDump backup site
|
||||
8. echo 1 > /proc/sys/kernel/dmesg_restrict
|
||||
|
||||
|
|
2
grid.py
2
grid.py
|
@ -23,7 +23,7 @@ def queryvm(req_cube_id):
|
|||
""" returns slave_name, vm_id and vm_type for the requested cubeid """
|
||||
data = querydb(req_cube_id)
|
||||
#print(data)
|
||||
return data['slave'], data['type'], data['vmid']
|
||||
return data['slave'], data['type'], data['vmid'], data['host']
|
||||
|
||||
|
||||
def querydb(cubeid):
|
||||
|
|
52
plugin.py
52
plugin.py
|
@ -71,8 +71,8 @@ def vmcreate(req):
|
|||
|
||||
#generators
|
||||
#slave_name = str(grid.query_happiness(region_id, weight)) #TODO: provide weight parameters here and calculate route
|
||||
#slave_name = 'lexx' #staic route
|
||||
slave_name = 'warrior'
|
||||
slave_name = 'lexx' #staic route
|
||||
#slave_name = 'warrior'
|
||||
|
||||
#vmid = str(grid.generate_vmid()) #TODO: this should be between 100 and 65000
|
||||
vm_id = random.randint(1000, 9999)
|
||||
|
@ -109,6 +109,8 @@ def vmcreate(req):
|
|||
|
||||
description = vm_name + ' (' + str(cubeid) + '/' + str(vm_id) + ')\n' + 'owned by ' + req['clientname'] + ' (' + req['clientid'] + ')\n' + 'master ip: ' + ipv4_list[0]
|
||||
|
||||
|
||||
if req['vps_type'] == 'kvm':
|
||||
#create partition
|
||||
image_name = 'vm-' + str(vm_id) + '-disk-1'
|
||||
local_storage = proxobject.nodes(real_slave_name).storage('lvm')
|
||||
|
@ -116,25 +118,26 @@ def vmcreate(req):
|
|||
filename=image_name,
|
||||
size=req['vps_hdd'] + 'G')
|
||||
|
||||
if req['vps_type'] == 'kvm':
|
||||
create_result = proxobject.nodes(real_slave_name).qemu.post(vmid=int(vm_id),
|
||||
name=vm_name,
|
||||
sockets=1,
|
||||
cores=req['vps_cpu'],
|
||||
memory=req['vps_mem'],
|
||||
virtio0='file=lvm:' + image_name,
|
||||
ide1='skyblue:iso/' + deploy['iso9660'] + ',media=cdrom',
|
||||
ide1='backup:iso/' + deploy['iso9660'] + ',media=cdrom',
|
||||
net0='virtio,bridge=vmbr0,macaddr=' + macaddr,
|
||||
onboot=1,
|
||||
description=description)
|
||||
|
||||
if req['vps_type'] == 'lxc':
|
||||
create_result = proxobject.nodes(real_slave_name).lxc.post(vmid=int(vm_id),
|
||||
cpus=req['vps_cpu'],
|
||||
memory=req['vps_mem'],
|
||||
swap=16,
|
||||
ostemplate='backup:vztmpl/ubuntu-16.04-standard_16.04-1_amd64.tar.gz',
|
||||
hostname=vm_name,
|
||||
password=vm_pass,
|
||||
sockets=1,
|
||||
cores=req['vps_cpu'],
|
||||
memory=req['vps_mem'],
|
||||
rootfs='lvm:' + req['vps_hdd'],
|
||||
virtio0='file=lvm:' + image_name,
|
||||
ip_address=ipv4_list[0],
|
||||
onboot=1,
|
||||
|
@ -154,10 +157,10 @@ def vmcreate(req):
|
|||
|
||||
def vmstatus(cubeid):
|
||||
""" returns the status of the machine """
|
||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
||||
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||
proxobject = auth(slave_name)
|
||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||
ioconfig.logger.info('slave[%s]> get status of %s %s' % (slave_name, vm_type, vm_id))
|
||||
ioconfig.logger.info('slave[%s]> get status of %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
|
||||
if vm_type == 'kvm':
|
||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.current.get()
|
||||
if vm_type == 'lxc':
|
||||
|
@ -167,10 +170,10 @@ def vmstatus(cubeid):
|
|||
|
||||
def vmstart(cubeid):
|
||||
""" starts a machine """
|
||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
||||
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||
proxobject = auth(slave_name)
|
||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||
ioconfig.logger.info('slave[%s]> starting %s %s' % (slave_name, vm_type, vm_id))
|
||||
ioconfig.logger.info('slave[%s]> starting %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
|
||||
if vm_type == 'kvm':
|
||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.start.post()
|
||||
if vm_type == 'lxc':
|
||||
|
@ -181,10 +184,10 @@ def vmstart(cubeid):
|
|||
|
||||
def vmshutdown(cubeid):
|
||||
""" acpi shutdown the machine.. """
|
||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
||||
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||
proxobject = auth(slave_name)
|
||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||
ioconfig.logger.info('slave[%s]> acpi shutdown %s %s' % (slave_name, vm_type, vm_id))
|
||||
ioconfig.logger.info('slave[%s]> acpi shutdown %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
|
||||
|
||||
if vm_type == 'kvm':
|
||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.shutdown.post()
|
||||
|
@ -197,10 +200,10 @@ def vmshutdown(cubeid):
|
|||
|
||||
def vmstop(cubeid):
|
||||
""" poweroff the machine.. """
|
||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
||||
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||
proxobject = auth(slave_name)
|
||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||
ioconfig.logger.info('slave[%s]> power off %s %s' % (slave_name, vm_type, vm_id))
|
||||
ioconfig.logger.info('slave[%s]> power off %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
|
||||
|
||||
if vm_type == 'kvm':
|
||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.stop.post()
|
||||
|
@ -213,10 +216,10 @@ def vmstop(cubeid):
|
|||
|
||||
def vmsuspend(cubeid):
|
||||
""" suspend machine """
|
||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
||||
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||
proxobject = auth(slave_name)
|
||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||
ioconfig.logger.info('slave[%s]> suspending %s %s' % (slave_name, vm_type, vm_id))
|
||||
ioconfig.logger.info('slave[%s]> suspending %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
|
||||
|
||||
if vm_type == 'kvm':
|
||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.suspend.post()
|
||||
|
@ -228,10 +231,10 @@ def vmsuspend(cubeid):
|
|||
|
||||
def vmresume(cubeid):
|
||||
""" resume machine """
|
||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
||||
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||
proxobject = auth(slave_name)
|
||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||
ioconfig.logger.info('slave[%s]> resuming %s %s' % (slave_name, vm_type, vm_id))
|
||||
ioconfig.logger.info('slave[%s]> resuming %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
|
||||
|
||||
if vm_type == 'kvm':
|
||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.resume.post()
|
||||
|
@ -243,10 +246,9 @@ def vmresume(cubeid):
|
|||
|
||||
def vmrrd(cubeid):
|
||||
""" retrieve rrd graphs (PNG) """
|
||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
||||
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||
proxobject = auth(slave_name)
|
||||
proxobject.cluster.status.get()[0]['name']
|
||||
ioconfig.logger.info('slave[%s]> query rrd of %s %s' % (slave_name, vm_type, vm_id))
|
||||
|
||||
result = {}
|
||||
if vm_type == 'kvm':
|
||||
|
@ -263,7 +265,9 @@ def vmrrd(cubeid):
|
|||
rmem = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
|
||||
rnet = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
|
||||
rhdd = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
|
||||
status = star(statusquery['qmpstatus']) #TODO: maybe change this?
|
||||
status = str(statusquery['qmpstatus']) #TODO: maybe change this?
|
||||
|
||||
ioconfig.logger.info('slave[%s]> rrd of %s %s (%s). status: %s' % (slave_name, vm_type, vm_id, vm_host, status))
|
||||
|
||||
response = { 'status':status, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd }
|
||||
return response
|
||||
|
@ -271,10 +275,10 @@ def vmrrd(cubeid):
|
|||
|
||||
def vmvnc(cubeid):
|
||||
""" invoke vnc ticket """
|
||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
||||
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||
proxobject = auth(slave_name)
|
||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||
ioconfig.logger.info('slave[%s]> invoking vnc ticket for %s %s' % (slave_name, vm_type, vm_id))
|
||||
ioconfig.logger.info('slave[%s]> invoking vnc ticket for %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
|
||||
|
||||
if vm_type == 'kvm':
|
||||
ticket = proxobject.nodes(slave_name).qemu(vm_id).vncproxy.post(websocket=1)
|
||||
|
|
|
@ -2,8 +2,8 @@ uwsgi
|
|||
pyOpenSSL
|
||||
requests
|
||||
falcon
|
||||
urllib
|
||||
netaddr
|
||||
proxmoxer
|
||||
proxmox-deploy
|
||||
websockify
|
||||
unidecode
|
||||
|
|
Loading…
Reference in a new issue