create-activate phase 2

This commit is contained in:
deflax 2017-11-05 22:21:06 +02:00
parent 4f43446eef
commit c5440e311e
4 changed files with 32 additions and 277 deletions

View file

@ -24,7 +24,7 @@ start:
``` ```
##Proxmox slave install instructions: ##Proxmox slave install instructions:
1. Datacenter -> Permissions -> Add -> User Permission 1. Datacenter -> Permissions -> Add -> User Permission
2. Path: / User: masteradmin@pve / Role: PVEAdmin 2. Path: / User: masteradmin@pve / Role: Administrator
3. $IPT -A tcp_inbound -p TCP -s $PROXIP -j ACCEPT #enable proxmaster 3. $IPT -A tcp_inbound -p TCP -s $PROXIP -j ACCEPT #enable proxmaster
4. Datacenter -> Storage -> Add -> LVM 4. Datacenter -> Storage -> Add -> LVM
5. ID: lvm / Volume Group: vm / Content: Disk image, Container 5. ID: lvm / Volume Group: vm / Content: Disk image, Container

278
grid.py
View file

@ -1,6 +1,6 @@
#. -*- coding: utf-8 #. -*- coding: utf-8
# #
# vdc # vdc grid
#import site packages #import site packages
import base64 import base64
@ -21,41 +21,40 @@ config = ioconfig.parser
def query(json): def query(json):
data = read(json) data = read(json)
if json['type'] == 'kvm' or json['type'] == 'lxc': if json['type'] == 'kvm' or json['type'] == 'lxc':
return data['slave'], data['phyid'], data['hostname'], data['clientemail'] return data['slave'], data['phy_id'], data['hostname'], data['clientemail']
if json['type'] == 'vmbr': if json['type'] == 'br':
return data['slave'], data['phyid'], data['clientemail'] return data['slave'], data['phy_id'], data['clientemail']
def read(data): def read(data):
""" open a metadata file """ """ read a metadata file """
try: try:
dbfile = 'db/{}.{}.json'.format(data['type'], data['unit_id']) dbfile = 'db/{}.{}.json'.format(data['type'], data['unit_id'])
dbf = open(dbfile, 'r') dbf = open(dbfile, 'r')
data = json.load(dbf) data = json.load(dbf)
dbf.close() dbf.close()
logger.info('grid> --> {}'.format(dbfile)) #logger.info('grid> {}'.format(dbfile))
data['status'] = 'query_success'
return data return data
except Exception as e: except Exception as e:
logger.critical('{}> '.format(e)) logger.critical('grid> read error: {}'.format(e))
pass pass
return None return None
def create(json): def create(data):
""" create new metadata file """ """ write new metadata file """
try: try:
dbfile = 'db/{}.{}.json'.format(data['type'], data['unit_id']) dbfile = 'db/{}.{}.json'.format(data['type'], data['unit_id'])
logger.info('{}'.format(data))
dbf = open(dbfile, 'w') dbf = open(dbfile, 'w')
json.dump(data, dbf) json.dump(data, dbf)
dbf.close() dbf.close()
logger.info('grid> <-- {}'.format(data)) logger.info('grid> {} successfully writen.'.format(dbfile))
return data
except Exception as e: except Exception as e:
logger.critical('grid> {}'.format(e)) logger.critical('grid> write error: {}'.format(e))
pass
return None
def delete(unit_type, unit_id): def delete(data):
""" remove metadata file """ """ remove metadata file """
dbfile = 'db/{}.{}.json'.format(unit_type, unit_id) dbfile = 'db/{}.{}.json'.format(data['type'], data['unit_id'])
#TODO: perhaps just move the datafile to an archive directory #TODO: perhaps just move the datafile to an archive directory
os.remove(dbfile) os.remove(dbfile)
return None return None
@ -72,15 +71,15 @@ def phyidgen(slave_name, unit_type):
dbf = open(db_fullpath, 'r') dbf = open(db_fullpath, 'r')
data = json.load(dbf) data = json.load(dbf)
if data['slave'] == str(slave_name): if data['slave'] == str(slave_name):
exclude_list.append(data['phyid']) exclude_list.append(data['phy_id'])
dbf.close() dbf.close()
valid_list = list(set(full_list) - set(exclude_list)) valid_list = list(set(full_list) - set(exclude_list))
if len(valid_list) > 1: if len(valid_list) > 1:
choice = random.choice(valid_list) choice = random.choice(valid_list)
logger.info('grid> physical id generated: {}'.format(choice)) logger.info('{}> physical id generated: {}'.format(slave_name, choice))
return choice return choice
else: else:
logger.critical('grid> no free physical ids!') logger.critical('{}> no free physical ids!'.format(slave_name))
return none return none
def analyze_happiness(region_id): def analyze_happiness(region_id):
@ -113,248 +112,7 @@ def analyze_happiness(region_id):
return happy_slave return happy_slave
### DEPRECATED
def generate_vmid():
""" analyzes cached grid data and return proposed vmid for new machines """
grid_data = readcache()
tested_vmids = [] #initialize id cache
id_min = grid_data['vmid_min']
id_max = grid_data['vmid_max']
#all_vmid = utils.find_rec(grid_data, 'vmid') #get all vmid values from the nested grid
all_vmid = [] #TODO: see generate_ipv4 remark
all_vmid = [ int(x) for x in all_vmid ] #convert values from str to int (its a vmid right?)
counter = 0
while True:
if counter == 50:
logger.error('grid> vmid range full')
return None
else:
counter += 1
requested_vmid = random.randint(int(id_min), int(id_max)) #max 90k machines
if requested_vmid in tested_vmids:
logger.warning('grid> vmid ' + str(requested_vmid) + ' already tested. cache:' + str(tested_vmids))
continue
if requested_vmid in all_vmid:
position = all_vmid.index(requested_vmid)
logger.warning('grid> vmid ' + str(requested_vmid) + ' already exist. location:' + str(position))
tested_vmids.append(requested_vmid)
else:
tested_vmids = [] #clear tested vmid cache
break
logger.info('grid> vmid ' + str(requested_vmid) + ' selected')
return requested_vmid
def findDiff(d1, d2, path=""):
for k in d1.keys():
if not k in d2.keys():
logger.warning('cache> ' + str(k) + ' as key not in d2')
else:
if type(d1[k]) is dict:
if path == "":
path = k
else:
path = path + "->" + k
findDiff(d1[k],d2[k], path)
else:
if d1[k] != d2[k]:
logger.warning('cache> ' + str(k) + ' ' + str(d1[k]) + ' [-]')
logger.warning('cache> ' + str(k) + ' ' + str(d2[k]) + ' [+]')
def generate_ipv4(region_name, how_many=1):
""" this function should check the range, exclude deployed machines and return a list of available ips """
ip_range_min = ioconfig.parser.get(str(region_name), 'ipv4_min')
ip_range_max = ioconfig.parser.get(str(region_name), 'ipv4_max')
#region_ipset = netaddr.IPSet(netaddr.IPRange(ip_range_min, ip_range_max))
region_ips = []
for ip in region_ipset:
region_ips.append(ip)
ip_min = 0
ip_max = len(region_ips) - 1
tested_ips = [] #initialize ip cache
requested_ips = []
#all_ips = utils.find_rec(grid_data, 'ipaddr') #TODO: we cant list that for KVM so we should use another approach. perhaps as separate macaddress - ipaddress table which we will use to manipulate a static lease dhcp server. at this point this function is useless because plugins do not or cant track the address of the actual machine. proxmaster should divide each region to segments and each segment should export a static lease config which will be quieried when we search for unused addresses.
all_ips = [] #TODO: replace with db searching function
for ips in range(int(how_many)):
counter = 0
while True:
if counter == 50:
logger.error('region[{}]> ip range full'.format(str(region_name)))
return None
else:
counter += 1
requested_ip_index = random.randint(ip_min, ip_max)
requested_ip = str(region_ips[requested_ip_index])
if requested_ip in tested_ips:
logger.warning('region[{}]> ip addres {} already tested. cache: {}'.format(str(region_name), str(requested_ip), str(tested_ips)))
continue
if requested_ip in requested_ips:
logger.warning('region[{}]> ip address {} already generated.'.format(str(region_name), str(requested_ip)))
tested_ips.append(requested_ip)
continue
if requested_ip in all_ips:
position = used_ips.index(requested_ip)
logger.warning('region[{}]> ip address {} already exist. location: {}'.format(str(region_name), str(position)))
tested_ips.append(requested_ip)
continue
else:
tested_ips = [] #clear ip cache
break
logger.info('region[{}]> ip address {} selected.'.format(str(region_name), str(requested_ip)))
requested_ips.append(requested_ip)
logger.info('region[{}]> ip addresses {} selected.'.format(str(region_name), str(requested_ips)))
return requested_ips
def readreal():
""" read the current state and return its contents """
try:
with open('grid-real.json') as gridfile:
grid_data = json.load(gridfile)
gridfile.close()
resulttime = grid_data['synctime']
logger.info('grid> sync for ' + resulttime)
except:
grid_data = {}
logger.error('cache> cannot read temp file')
return grid_data
def sync(cached=True):
""" calls slave objects and mix their nodes in a common cluster grid """
a = datetime.datetime.now()
grid_vmid_min = config.get('general', 'vmid_min')
grid_vmid_max = config.get('general', 'vmid_max')
real_grid = {'name':'real', "vmid_min":grid_vmid_min, "vmid_max":grid_vmid_max }
cache_grid = {'name':'cache', "vmid_min":grid_vmid_min, "vmid_max":grid_vmid_max }
regionselector = [i for i, x in enumerate(config.sections()) if re.match(r'\W*' + 'region' + r'\W*', x)]
for ri in range(len(regionselector)):
region_section = config.sections()[int(regionselector[ri])]
region_id = region_section.split("_")[1]
region_name = config.get(region_section, 'name')
region_range_min = config.get(region_section, 'ipv4_min')
region_range_max = config.get(region_section, 'ipv4_max')
slaveselector = [i for i, x in enumerate(config.sections()) if re.match(r'\W*' + 'slave' + r'\W*', x)]
real_region = { "id":region_id, "region":region_name, "ipv4_min":region_range_min, "ipv4_max":region_range_max }
cache_region = real_region.copy() #same region both in cache nad status
for si in range(len(slaveselector)):
slave_section = config.sections()[int(slaveselector[si])]
slave_id = slave_section.split("_")[1]
slave_name = config.get(slave_section, 'name')
slave_masterip = config.get(slave_section, 'masterip')
slave_password = config.get(slave_section, 'password')
slave_regionid = config.get(slave_section, 'regionid')
enc_slave_password = base64.b64encode(slave_password.encode('ascii')) #encode base64 in grid
decoded_password = enc_slave_password.decode('ascii')
real_slave = { "id":slave_id, "slave":slave_name, "masterip":slave_masterip, "password":decoded_password }
optional_slave = {}
cache_file = 'cache-slave-' + slave_id + '.json'
prefix = 'slave[' + slave_name + ']> '
# check if slave is in current region and include it in current dict if it is
if slave_regionid == region_id:
try:
#trying to connect to slave host
#vmlist = plugin.vmlist(slave_id, slave_masterip, enc_slave_password.decode('utf-8'))
proxobject = plugin.auth(slave_id, slave_masterip, enc_slave_password)
vmlist = plugin.vmlist(proxobject)
real_slave['alive'] = 'up'
logger.info(prefix + 'is online')
except:
#raise
#slave cant be read so it will be marked down.
real_slave['alive'] = 'down'
logger.warning(prefix + 'is down')
if real_slave['alive'] == 'up':
#populate grid with vms then
for vm in vmlist:
#static parameters that CAN go to to cache:
vm_id = vm['vmid']
vm_name = vm['name']
#vm_owner = clientsdb.vmowner(vm_id, vm_name, cached) #read clientsdb cache
#static_vm = { "vmid":str(vm_id), "hostname":vm_name, 'type':vm['vmtype'], 'owner':vm_owner }
static_vm = { "vmid":str(vm_id), "hostname":vm_name, 'type':vm['vmtype'] }
real_slave[str(vm_id)] = static_vm
#dynamic parameters that SHOULD NOT go to the cache:
dynamic_vm = { "uptime":vm['uptime'] }
optional_slave[str(vm_id)] = dynamic_vm
#check current cache
cache_slave = real_slave.copy() #fallback to the current state
try:
with open(cache_file) as fcr:
cache_slave = json.load(fcr)
fcr.close()
except:
logger.info(prefix + 'does not exist in cache. Initializing...')
if cache_slave['alive'] == 'up':
#slave was not down so it must be up...
cache_slave = updatedb(real_slave, cache_file, prefix, 'up')
logger.info(prefix + 'sync success o/')
else:
#if the slave was down before, compare the state before overwriting the cache
cache_slave['alive'] = 'up' #even if alive status in cache is still down we ignore it by forcing it to up
logger.info(prefix + 'was down')
#show the differences in log for manual (or maybe automatic at some point fixing)
findDiff(cache_slave, real_slave)
if cache_slave != real_slave:
logger.warning(prefix + 'cache != current status. please restore host!')
cache_slave = updatedb(cache_slave, cache_file, prefix, 'down')
else:
logger.info(prefix + 'cache == current status. host restored. o/')
cache_slave = updatedb(cache_slave, cache_file, prefix, 'up')
#what to do with cache if host is down
if real_slave['alive'] == 'down':
try:
logger.warning(prefix + 'loading cache...')
with open(cache_file) as fscr:
cache_slave = json.load(fscr)
fscr.close()
logger.warning(prefix + '...done')
cache_slave = updatedb(cache_slave, cache_file, prefix, 'down')
except:
logger.error(prefix + 'sync failure!')
cache_slave = real_slave.copy()
#raise
#we safely mix the dynamic ids now that we dont deal with cache anymore
mergedslave = utils.dict_merge({}, real_slave, optional_slave)
real_region[slave_id] = mergedslave
cache_region[slave_id] = cache_slave
#the region is finally included in the grid
real_grid[region_id] = real_region
cache_grid[region_id] = cache_region
b = datetime.datetime.now()
real_grid["synctime"] = str(b-a)
#dump all data to json
WriteCache(cache_grid, 'grid-cache.json')
WriteCache(real_grid, 'grid-real.json')
if cached == True:
return cache_grid
else:
return real_grid
if __name__ == '__main__': if __name__ == '__main__':
#print(query_happiness(0)) #print(query_happiness(0))
#print(generate_ipv4(0,3))
#print(generate_vmid())
print(phyidgen('warrior', 'kvm')) print(phyidgen('warrior', 'kvm'))

View file

@ -1,6 +1,4 @@
#. -*- coding: utf-8 - #. -*- coding: utf-8 -
# required proxmox permissions: PVEAdmin
#
# site # site
from proxmoxer import ProxmoxAPI from proxmoxer import ProxmoxAPI
@ -45,7 +43,7 @@ def create(json):
#slave_name = str(grid.query_happiness(region_id, weight)) #slave_name = str(grid.query_happiness(region_id, weight))
#slave_name = 'lexx' #slave_name = 'lexx'
slave_name = 'warrior' slave_name = 'warrior'
ioconfig.logger.info('grid> slave [{}] selected'.format(slave_name)) ioconfig.logger.info('{}> slave selected'.format(slave_name))
proxobject = auth(slave_name) proxobject = auth(slave_name)
real_slave_name = proxobject.cluster.status.get()[0]['name'] real_slave_name = proxobject.cluster.status.get()[0]['name']
@ -76,7 +74,8 @@ def create(json):
scsi0='file=lvm:' + image_name + ',discard=on', scsi0='file=lvm:' + image_name + ',discard=on',
description=description) description=description)
except: except:
return { 'status': 'kvm_create_failed' } response = { 'status': 'kvm_create_failed' }
return response
data = { 'unit_id': int(unit_id), data = { 'unit_id': int(unit_id),
'type': 'kvm', 'type': 'kvm',
'clientid': json['clientid'], 'clientid': json['clientid'],
@ -84,7 +83,7 @@ def create(json):
'hostname': vm_name, 'hostname': vm_name,
'region': region_name, 'region': region_name,
'slave': slave_name, 'slave': slave_name,
'phyid': phy_id, 'phy_id': phy_id,
'net0if': json['net0if'] 'net0if': json['net0if']
} }
grid.create(data) grid.create(data)
@ -128,7 +127,7 @@ def create(json):
'hostname': vm_name, 'hostname': vm_name,
'region': region_name, 'region': region_name,
'slave': slave_name, 'slave': slave_name,
'phyid': phy_id, 'phy_id': phy_id,
'net0if': json['net0if'] 'net0if': json['net0if']
} }
grid.create(data) grid.create(data)
@ -136,10 +135,11 @@ def create(json):
if json['type'] == 'br': if json['type'] == 'br':
try: try:
create_result = proxobject.nodes(real_slave_name).network.post(iface='vmbr' + int(phy_id), create_result = proxobject.nodes(real_slave_name).network.post(iface='vmbr' + str(phy_id),
type='bridge', type='bridge',
autostart=1) autostart=1)
except: except Exception as e:
print(e)
return { 'status': 'br_create_failed' } return { 'status': 'br_create_failed' }
data = { 'unit_id': int(unit_id), data = { 'unit_id': int(unit_id),
'type': 'br', 'type': 'br',
@ -147,10 +147,10 @@ def create(json):
'clientemail': json['clientemail'], 'clientemail': json['clientemail'],
'region': region_name, 'region': region_name,
'slave': slave_name, 'slave': slave_name,
'phyid': phy_id 'phy_id': phy_id
} }
grid.create(data) grid.create(data)
response = { 'status': 'bridge_created', 'unit_id': unit_id, 'region': region_name, 'slave': real_slave_name, 'phyid': phy_id } response = { 'status': 'bridge_created', 'unit_id': unit_id, 'region': region_name, 'slave': real_slave_name, 'phy_id': phy_id }
return response return response
@ -284,7 +284,7 @@ def vmrrd(json):
status = str(statusquery['qmpstatus']) status = str(statusquery['qmpstatus'])
if unit_type == 'lxc': if unit_type == 'lxc':
status = proxobject.nodes(slave_name).lxc(phy_id).status.current.get() statusquery = proxobject.nodes(slave_name).lxc(phy_id).status.current.get()
rcpu = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu') rcpu = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='cpu')
rmem = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem') rmem = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='mem,maxmem')
rnet = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout') rnet = proxobject.nodes(slave_name).lxc(phy_id).rrd.get(timeframe='day', cf='AVERAGE', ds='netin,netout')

View file

@ -1,7 +1,4 @@
#. -*- coding: utf-8 - #. -*- coding: utf-8 -
# required proxmox permissions: PVESysAdmin, PVEVMAdmin
#
# afx 2015-2016
# import site packages # import site packages
import logging import logging
@ -42,7 +39,7 @@ def selector(fn, req):
elif fn == 'status': elif fn == 'status':
body = plugin.status(json) body = plugin.status(json)
elif fn == 'query': elif fn == 'query':
body = grid.query(json) body = grid.read(json)
elif fn == 'start': elif fn == 'start':
body = plugin.start(json) body = plugin.start(json)