fixing vmcreate
This commit is contained in:
parent
0f968b8fb6
commit
87faee1287
3 changed files with 16 additions and 16 deletions
|
@ -13,3 +13,9 @@ Python RESTful API for managing a grid of vm slaves
|
||||||
iptables -A tcp_inbound -p TCP --match multiport --dports 6900:8000 -j ACCEPT #vnc range
|
iptables -A tcp_inbound -p TCP --match multiport --dports 6900:8000 -j ACCEPT #vnc range
|
||||||
6. o/
|
6. o/
|
||||||
```
|
```
|
||||||
|
##Proxmox permissions:
|
||||||
|
1. Datacenter -> Permissions -> Add -> User Permission
|
||||||
|
2. Path: / User: masteradmin@pve / Role: PVEAdmin
|
||||||
|
3. $IPT -A tcp_inbound -p TCP -s $PROXIP -j ACCEPT #enable proxmaster
|
||||||
|
4. Datacenter -> Storage -> Add -> LVM
|
||||||
|
5. ID: lvm / Volume Group: vm / Content: Disk image, Container
|
||||||
|
|
1
grid.py
1
grid.py
|
@ -22,6 +22,7 @@ config = ioconfig.parser
|
||||||
def query_vm(req_cube_id):
|
def query_vm(req_cube_id):
|
||||||
""" returns slave_name, vm_id and vm_type for the requested cubeid """
|
""" returns slave_name, vm_id and vm_type for the requested cubeid """
|
||||||
data = querydb(req_cube_id)
|
data = querydb(req_cube_id)
|
||||||
|
print(data)
|
||||||
return data['slave'], data['type'], data['vmid']
|
return data['slave'], data['type'], data['vmid']
|
||||||
|
|
||||||
|
|
||||||
|
|
25
plugin.py
25
plugin.py
|
@ -72,9 +72,9 @@ def vmcreate(req):
|
||||||
|
|
||||||
#generators
|
#generators
|
||||||
#slave_name = str(grid.query_happiness(region_id, weight)) #TODO: provide weight parameters here and calculate route
|
#slave_name = str(grid.query_happiness(region_id, weight)) #TODO: provide weight parameters here and calculate route
|
||||||
slave_name = 'warrior' #staic route
|
slave_name = 'lexx' #staic route
|
||||||
#vmid = str(grid.generate_vmid()) #TODO: this should be between 100 and 65000
|
#vmid = str(grid.generate_vmid()) #TODO: this should be between 100 and 65000
|
||||||
vm_id = random.randint(200, 62000)
|
vm_id = random.randint(200, 10200)
|
||||||
cubeid = int(time.time() * 10000 * 10000)
|
cubeid = int(time.time() * 10000 * 10000)
|
||||||
|
|
||||||
ipv4_list = grid.generate_ipv4(req['region'], req['vps_ipv4'])
|
ipv4_list = grid.generate_ipv4(req['region'], req['vps_ipv4'])
|
||||||
|
@ -104,7 +104,7 @@ def vmcreate(req):
|
||||||
ipv4_dict[str(ipidx)] = str(ip)
|
ipv4_dict[str(ipidx)] = str(ip)
|
||||||
ipidx += 1
|
ipidx += 1
|
||||||
|
|
||||||
response = { 'status':'CREATE', 'cube':vm_id, 'name':vm_name, 'password':vm_pass, 'ipv4_0':ipv4_list[0] }
|
response = { 'status':'CREATE', 'cube':cubeid, 'name':vm_name, 'password':vm_pass, 'ipv4_0':ipv4_list[0] }
|
||||||
description = vm_name + ' (' + str(cubeid) + '/' + str(vm_id) + ')\n' + 'owned by ' + req['clientname'] + ' (' + req['clientid'] + ')\n' + 'master ip: ' + ipv4_list[0]
|
description = vm_name + ' (' + str(cubeid) + '/' + str(vm_id) + ')\n' + 'owned by ' + req['clientname'] + ' (' + req['clientid'] + ')\n' + 'master ip: ' + ipv4_list[0]
|
||||||
|
|
||||||
#create partition
|
#create partition
|
||||||
|
@ -114,7 +114,7 @@ def vmcreate(req):
|
||||||
filename=image_name,
|
filename=image_name,
|
||||||
size=req['vps_hdd'] + 'G')
|
size=req['vps_hdd'] + 'G')
|
||||||
|
|
||||||
if req['vps_type'] == 'KVM':
|
if req['vps_type'] == 'kvm':
|
||||||
create_result = proxobject.nodes(slave_name).qemu.post(vmid=vm_id,
|
create_result = proxobject.nodes(slave_name).qemu.post(vmid=vm_id,
|
||||||
name=vm_name,
|
name=vm_name,
|
||||||
sockets=1,
|
sockets=1,
|
||||||
|
@ -126,7 +126,7 @@ def vmcreate(req):
|
||||||
onboot=1,
|
onboot=1,
|
||||||
description=description)
|
description=description)
|
||||||
|
|
||||||
if req['vps_type'] == 'LXC':
|
if req['vps_type'] == 'lxc':
|
||||||
create_result = proxobject.nodes(slave_name).lxc.post(vmid=vm_id,
|
create_result = proxobject.nodes(slave_name).lxc.post(vmid=vm_id,
|
||||||
hostname=vm_name,
|
hostname=vm_name,
|
||||||
password=vm_pass,
|
password=vm_pass,
|
||||||
|
@ -139,8 +139,9 @@ def vmcreate(req):
|
||||||
description=description)
|
description=description)
|
||||||
|
|
||||||
#start the machihe
|
#start the machihe
|
||||||
time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run
|
#time.sleep(7) #wait few seconds for the slave to prepare the machine for initial run
|
||||||
vmstart(cubeid)
|
#vmstart(cubeid)
|
||||||
|
print(str(create_result))
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
@ -148,7 +149,6 @@ def vmstatus(cubeid):
|
||||||
""" returns the status of the machine """
|
""" returns the status of the machine """
|
||||||
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
vm_type = vm_type.lower()
|
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> get status of %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> get status of %s %s' % (slave_name, vm_type, vm_id))
|
||||||
if vm_type == 'kvm':
|
if vm_type == 'kvm':
|
||||||
|
@ -162,7 +162,6 @@ def vmstart(cubeid):
|
||||||
""" starts a machine """
|
""" starts a machine """
|
||||||
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
vm_type = vm_type.lower()
|
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> starting %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> starting %s %s' % (slave_name, vm_type, vm_id))
|
||||||
if vm_type == 'kvm':
|
if vm_type == 'kvm':
|
||||||
|
@ -177,7 +176,6 @@ def vmshutdown(cubeid):
|
||||||
""" acpi shutdown the machine.. """
|
""" acpi shutdown the machine.. """
|
||||||
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
vm_type = vm_type.lower()
|
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> acpi shutdown %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> acpi shutdown %s %s' % (slave_name, vm_type, vm_id))
|
||||||
|
|
||||||
|
@ -194,7 +192,6 @@ def vmstop(cubeid):
|
||||||
""" poweroff the machine.. """
|
""" poweroff the machine.. """
|
||||||
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
vm_type = vm_type.lower()
|
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> power off %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> power off %s %s' % (slave_name, vm_type, vm_id))
|
||||||
|
|
||||||
|
@ -211,7 +208,6 @@ def vmsuspend(cubeid):
|
||||||
""" suspend machine """
|
""" suspend machine """
|
||||||
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
vm_type = vm_type.lower()
|
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> suspending %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> suspending %s %s' % (slave_name, vm_type, vm_id))
|
||||||
|
|
||||||
|
@ -227,7 +223,6 @@ def vmresume(cubeid):
|
||||||
""" resume machine """
|
""" resume machine """
|
||||||
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
vm_type = vm_type.lower()
|
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> resuming %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> resuming %s %s' % (slave_name, vm_type, vm_id))
|
||||||
|
|
||||||
|
@ -243,8 +238,7 @@ def vmrrd(cubeid):
|
||||||
""" retrieve rrd graphs (PNG) """
|
""" retrieve rrd graphs (PNG) """
|
||||||
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
vm_type = vm_type.lower()
|
object.c:luster.status.get()[0]['name']
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
|
||||||
ioconfig.logger.info('slave[%s]> query rrd of %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> query rrd of %s %s' % (slave_name, vm_type, vm_id))
|
||||||
|
|
||||||
result = {}
|
result = {}
|
||||||
|
@ -267,7 +261,6 @@ def vmvnc(vm_id):
|
||||||
""" invoke vnc ticket """
|
""" invoke vnc ticket """
|
||||||
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
slave_name, vm_type, vm_id = grid.query_vm(cubeid)
|
||||||
proxobject = auth(slave_name)
|
proxobject = auth(slave_name)
|
||||||
vm_type = vm_type.lower()
|
|
||||||
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
#slave_name = proxobject.c:luster.status.get()[0]['name']
|
||||||
ioconfig.logger.info('slave[%s]> invoking vnc ticket for %s %s' % (slave_name, vm_type, vm_id))
|
ioconfig.logger.info('slave[%s]> invoking vnc ticket for %s %s' % (slave_name, vm_type, vm_id))
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue