add vm_host to system log
This commit is contained in:
parent
ae3f806036
commit
1b6e7e629e
4 changed files with 38 additions and 32 deletions
|
@ -20,7 +20,7 @@ start:
|
||||||
2. @reboot /usr/bin/screen -dmS proxmaster /home/master/proxmaster/start.sh
|
2. @reboot /usr/bin/screen -dmS proxmaster /home/master/proxmaster/start.sh
|
||||||
|
|
||||||
```
|
```
|
||||||
##Proxmox permissions:
|
##Proxmox slave install instructions:
|
||||||
1. Datacenter -> Permissions -> Add -> User Permission
|
1. Datacenter -> Permissions -> Add -> User Permission
|
||||||
2. Path: / User: masteradmin@pve / Role: PVEAdmin
|
2. Path: / User: masteradmin@pve / Role: PVEAdmin
|
||||||
3. $IPT -A tcp_inbound -p TCP -s $PROXIP -j ACCEPT #enable proxmaster
|
3. $IPT -A tcp_inbound -p TCP -s $PROXIP -j ACCEPT #enable proxmaster
|
||||||
|
@ -28,3 +28,5 @@ start:
|
||||||
5. ID: lvm / Volume Group: vm / Content: Disk image, Container
|
5. ID: lvm / Volume Group: vm / Content: Disk image, Container
|
||||||
6. Datacenter -> Storage -> Add -> NFS
|
6. Datacenter -> Storage -> Add -> NFS
|
||||||
7. ID: skyblue / Server: 1.2.3.5 / Export: /srv/proxmox/slavename / Content: ISO Image, VZDump backup site
|
7. ID: skyblue / Server: 1.2.3.5 / Export: /srv/proxmox/slavename / Content: ISO Image, VZDump backup site
|
||||||
|
8. echo 1 > /proc/sys/kernel/dmesg_restrict
|
||||||
|
|
||||||
|
|
2
grid.py
2
grid.py
|
@ -23,7 +23,7 @@ def queryvm(req_cube_id):
|
||||||
""" returns slave_name, vm_id and vm_type for the requested cubeid """
|
""" returns slave_name, vm_id and vm_type for the requested cubeid """
|
||||||
data = querydb(req_cube_id)
|
data = querydb(req_cube_id)
|
||||||
#print(data)
|
#print(data)
|
||||||
return data['slave'], data['type'], data['vmid']
|
return data['slave'], data['type'], data['vmid'], data['host']
|
||||||
|
|
||||||
|
|
||||||
def querydb(cubeid):
|
def querydb(cubeid):
|
||||||
|
|
62
plugin.py
62
plugin.py
|
@ -71,8 +71,8 @@ def vmcreate(req):
|
||||||
|
|
||||||
#generators
|
#generators
|
||||||
#slave_name = str(grid.query_happiness(region_id, weight)) #TODO: provide weight parameters here and calculate route
|
#slave_name = str(grid.query_happiness(region_id, weight)) #TODO: provide weight parameters here and calculate route
|
||||||
#slave_name = 'lexx' #staic route
|
slave_name = 'lexx' #staic route
|
||||||
slave_name = 'warrior'
|
#slave_name = 'warrior'
|
||||||
|
|
||||||
#vmid = str(grid.generate_vmid()) #TODO: this should be between 100 and 65000
|
#vmid = str(grid.generate_vmid()) #TODO: this should be between 100 and 65000
|
||||||
vm_id = random.randint(1000, 9999)
|
vm_id = random.randint(1000, 9999)
|
||||||
|
@ -109,32 +109,35 @@ def vmcreate(req):
|
||||||
|
|
||||||
description = vm_name + ' (' + str(cubeid) + '/' + str(vm_id) + ')\n' + 'owned by ' + req['clientname'] + ' (' + req['clientid'] + ')\n' + 'master ip: ' + ipv4_list[0]
|
description = vm_name + ' (' + str(cubeid) + '/' + str(vm_id) + ')\n' + 'owned by ' + req['clientname'] + ' (' + req['clientid'] + ')\n' + 'master ip: ' + ipv4_list[0]
|
||||||
|
|
||||||
#create partition
|
|
||||||
image_name = 'vm-' + str(vm_id) + '-disk-1'
|
|
||||||
local_storage = proxobject.nodes(real_slave_name).storage('lvm')
|
|
||||||
local_storage.content.post(vmid=vm_id,
|
|
||||||
filename=image_name,
|
|
||||||
size=req['vps_hdd'] + 'G')
|
|
||||||
|
|
||||||
if req['vps_type'] == 'kvm':
|
if req['vps_type'] == 'kvm':
|
||||||
|
#create partition
|
||||||
|
image_name = 'vm-' + str(vm_id) + '-disk-1'
|
||||||
|
local_storage = proxobject.nodes(real_slave_name).storage('lvm')
|
||||||
|
local_storage.content.post(vmid=vm_id,
|
||||||
|
filename=image_name,
|
||||||
|
size=req['vps_hdd'] + 'G')
|
||||||
|
|
||||||
create_result = proxobject.nodes(real_slave_name).qemu.post(vmid=int(vm_id),
|
create_result = proxobject.nodes(real_slave_name).qemu.post(vmid=int(vm_id),
|
||||||
name=vm_name,
|
name=vm_name,
|
||||||
sockets=1,
|
sockets=1,
|
||||||
cores=req['vps_cpu'],
|
cores=req['vps_cpu'],
|
||||||
memory=req['vps_mem'],
|
memory=req['vps_mem'],
|
||||||
virtio0='file=lvm:' + image_name,
|
virtio0='file=lvm:' + image_name,
|
||||||
ide1='skyblue:iso/' + deploy['iso9660'] + ',media=cdrom',
|
ide1='backup:iso/' + deploy['iso9660'] + ',media=cdrom',
|
||||||
net0='virtio,bridge=vmbr0,macaddr=' + macaddr,
|
net0='virtio,bridge=vmbr0,macaddr=' + macaddr,
|
||||||
onboot=1,
|
onboot=1,
|
||||||
description=description)
|
description=description)
|
||||||
|
|
||||||
if req['vps_type'] == 'lxc':
|
if req['vps_type'] == 'lxc':
|
||||||
create_result = proxobject.nodes(real_slave_name).lxc.post(vmid=int(vm_id),
|
create_result = proxobject.nodes(real_slave_name).lxc.post(vmid=int(vm_id),
|
||||||
|
cpus=req['vps_cpu'],
|
||||||
|
memory=req['vps_mem'],
|
||||||
|
swap=16,
|
||||||
|
ostemplate='backup:vztmpl/ubuntu-16.04-standard_16.04-1_amd64.tar.gz',
|
||||||
hostname=vm_name,
|
hostname=vm_name,
|
||||||
password=vm_pass,
|
password=vm_pass,
|
||||||
sockets=1,
|
rootfs='lvm:' + req['vps_hdd'],
|
||||||
cores=req['vps_cpu'],
|
|
||||||
memory=req['vps_mem'],
|
|
||||||
virtio0='file=lvm:' + image_name,
|
virtio0='file=lvm:' + image_name,
|
||||||
ip_address=ipv4_list[0],
|
ip_address=ipv4_list[0],
|
||||||
onboot=1,
|
onboot=1,
|
||||||
|
@ -154,10 +157,10 @@ def vmcreate(req):
|
||||||
|
|
||||||
def vmstatus(cubeid):
|
def vmstatus(cubeid):
|
||||||
""" returns the status of the machine """
|
""" returns the status of the machine """
|
||||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> get status of %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> get status of %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
|
||||||
if vm_type == 'kvm':
|
if vm_type == 'kvm':
|
||||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.current.get()
|
result = proxobject.nodes(slave_name).qemu(vm_id).status.current.get()
|
||||||
if vm_type == 'lxc':
|
if vm_type == 'lxc':
|
||||||
|
@ -167,10 +170,10 @@ def vmstatus(cubeid):
|
||||||
|
|
||||||
def vmstart(cubeid):
|
def vmstart(cubeid):
|
||||||
""" starts a machine """
|
""" starts a machine """
|
||||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> starting %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> starting %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
|
||||||
if vm_type == 'kvm':
|
if vm_type == 'kvm':
|
||||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.start.post()
|
result = proxobject.nodes(slave_name).qemu(vm_id).status.start.post()
|
||||||
if vm_type == 'lxc':
|
if vm_type == 'lxc':
|
||||||
|
@ -181,10 +184,10 @@ def vmstart(cubeid):
|
||||||
|
|
||||||
def vmshutdown(cubeid):
|
def vmshutdown(cubeid):
|
||||||
""" acpi shutdown the machine.. """
|
""" acpi shutdown the machine.. """
|
||||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> acpi shutdown %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> acpi shutdown %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
|
||||||
|
|
||||||
if vm_type == 'kvm':
|
if vm_type == 'kvm':
|
||||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.shutdown.post()
|
result = proxobject.nodes(slave_name).qemu(vm_id).status.shutdown.post()
|
||||||
|
@ -197,10 +200,10 @@ def vmshutdown(cubeid):
|
||||||
|
|
||||||
def vmstop(cubeid):
|
def vmstop(cubeid):
|
||||||
""" poweroff the machine.. """
|
""" poweroff the machine.. """
|
||||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> power off %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> power off %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
|
||||||
|
|
||||||
if vm_type == 'kvm':
|
if vm_type == 'kvm':
|
||||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.stop.post()
|
result = proxobject.nodes(slave_name).qemu(vm_id).status.stop.post()
|
||||||
|
@ -213,10 +216,10 @@ def vmstop(cubeid):
|
||||||
|
|
||||||
def vmsuspend(cubeid):
|
def vmsuspend(cubeid):
|
||||||
""" suspend machine """
|
""" suspend machine """
|
||||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> suspending %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> suspending %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
|
||||||
|
|
||||||
if vm_type == 'kvm':
|
if vm_type == 'kvm':
|
||||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.suspend.post()
|
result = proxobject.nodes(slave_name).qemu(vm_id).status.suspend.post()
|
||||||
|
@ -228,10 +231,10 @@ def vmsuspend(cubeid):
|
||||||
|
|
||||||
def vmresume(cubeid):
|
def vmresume(cubeid):
|
||||||
""" resume machine """
|
""" resume machine """
|
||||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> resuming %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> resuming %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
|
||||||
|
|
||||||
if vm_type == 'kvm':
|
if vm_type == 'kvm':
|
||||||
result = proxobject.nodes(slave_name).qemu(vm_id).status.resume.post()
|
result = proxobject.nodes(slave_name).qemu(vm_id).status.resume.post()
|
||||||
|
@ -243,10 +246,9 @@ def vmresume(cubeid):
|
||||||
|
|
||||||
def vmrrd(cubeid):
|
def vmrrd(cubeid):
|
||||||
""" retrieve rrd graphs (PNG) """
|
""" retrieve rrd graphs (PNG) """
|
||||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
proxobject.cluster.status.get()[0]['name']
|
proxobject.cluster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> query rrd of %s %s' % (slave_name, vm_type, vm_id))
|
|
||||||
|
|
||||||
result = {}
|
result = {}
|
||||||
if vm_type == 'kvm':
|
if vm_type == 'kvm':
|
||||||
|
@ -263,7 +265,9 @@ def vmrrd(cubeid):
|
||||||
rmem = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
|
rmem = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
|
||||||
rnet = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
|
rnet = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')
|
||||||
rhdd = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
|
rhdd = proxobject.nodes(slave_name).lxc(vm_id).rrd.get(timeframe='day', cf='AVERAGE', ds='diskread,diskwrite')
|
||||||
status = star(statusquery['qmpstatus']) #TODO: maybe change this?
|
status = str(statusquery['qmpstatus']) #TODO: maybe change this?
|
||||||
|
|
||||||
|
ioconfig.logger.info('slave[%s]> rrd of %s %s (%s). status: %s' % (slave_name, vm_type, vm_id, vm_host, status))
|
||||||
|
|
||||||
response = { 'status':status, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd }
|
response = { 'status':status, 'cpu':rcpu, 'mem':rmem, 'net':rnet, 'hdd':rhdd }
|
||||||
return response
|
return response
|
||||||
|
@ -271,10 +275,10 @@ def vmrrd(cubeid):
|
||||||
|
|
||||||
def vmvnc(cubeid):
|
def vmvnc(cubeid):
|
||||||
""" invoke vnc ticket """
|
""" invoke vnc ticket """
|
||||||
slave_name, vm_type, vm_id = grid.queryvm(cubeid)
|
slave_name, vm_type, vm_id, vm_host = grid.queryvm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> invoking vnc ticket for %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> invoking vnc ticket for %s %s (%s)' % (slave_name, vm_type, vm_id, vm_host))
|
||||||
|
|
||||||
if vm_type == 'kvm':
|
if vm_type == 'kvm':
|
||||||
ticket = proxobject.nodes(slave_name).qemu(vm_id).vncproxy.post(websocket=1)
|
ticket = proxobject.nodes(slave_name).qemu(vm_id).vncproxy.post(websocket=1)
|
||||||
|
|
|
@ -2,8 +2,8 @@ uwsgi
|
||||||
pyOpenSSL
|
pyOpenSSL
|
||||||
requests
|
requests
|
||||||
falcon
|
falcon
|
||||||
urllib
|
|
||||||
netaddr
|
netaddr
|
||||||
proxmoxer
|
proxmoxer
|
||||||
|
proxmox-deploy
|
||||||
websockify
|
websockify
|
||||||
unidecode
|
unidecode
|
||||||
|
|
Loading…
Reference in a new issue