From eb5e774f31ed3c183052d7a18d63c48cd9f75889 Mon Sep 17 00:00:00 2001 From: Louis PERDEREAU Date: Tue, 29 Oct 2024 23:07:54 +0100 Subject: [PATCH] fix(clusterstatus): Update cli output --- src/pvecontrol/actions/cluster.py | 55 ++++++++++++++++++++++------- src/pvecontrol/cluster.py | 57 +++++++++++++++++++++++++++++++ src/pvecontrol/node.py | 5 ++- src/pvecontrol/vm.py | 10 ++++-- 4 files changed, 111 insertions(+), 16 deletions(-) diff --git a/src/pvecontrol/actions/cluster.py b/src/pvecontrol/actions/cluster.py index 9e6d120..81b8016 100644 --- a/src/pvecontrol/actions/cluster.py +++ b/src/pvecontrol/actions/cluster.py @@ -1,20 +1,51 @@ -import logging +from humanize import naturalsize from pvecontrol.config import get_config -from pvecontrol.utils import filter_keys, print_tableoutput +from pvecontrol.node import NodeStatus def action_clusterstatus(proxmox, args): - logging.debug(proxmox.status) - logging.debug(proxmox.resources) - status = filter_keys(proxmox.status[0], ['name', 'nodes', 'quorate']) - nodes = [ filter_keys(n, ['name', 'ip', 'online']) for n in proxmox.status[1:] ] -# FIXME get cluster maxmem -# FIXME get cluster maxcpu -# FIXME get cluster allocatedmem -# FIXME get cluster allocatedcpu - print_tableoutput([status]) - print_tableoutput(nodes) + status = "healthy" if proxmox.is_healthy() else "not healthy" + + templates = sum([len(node.templates()) for node in proxmox.nodes]) + vms = sum([len(node.vms) for node in proxmox.nodes]) + metrics = proxmox.metrics() + + metrics_cpu_output = "{:.2f}/{}({:.1f}%), allocated: {}".format( + metrics['cpu']['usage'], + metrics['cpu']['total'], + metrics['cpu']['percent'], + metrics['cpu']['allocated'] + ) + + metrics_memory_output = "{}/{}({:.1f}%), allocated: {}".format( + naturalsize(metrics['memory']['usage'], binary=True, format="%.2f"), + naturalsize(metrics['memory']['total'], binary=True, format="%.2f"), + metrics['memory']['percent'], + naturalsize(metrics['memory']['allocated'], binary=True, format="%.2f"), + ) + + metrics_disk_output = "{}/{}({:.1f}%)".format( + naturalsize(metrics['disk']['usage'], binary=True, format="%.2f"), + naturalsize(metrics['disk']['total'], binary=True, format="%.2f"), + metrics['disk']['percent'] + ) + + output = f""" + Status: {status} + VMs: {vms - templates} + Templates: {templates} + Metrics: + CPU: {metrics_cpu_output} + Memory: {metrics_memory_output} + Disk: {metrics_disk_output} + Nodes: + Offline: {len([node for node in proxmox.nodes if node.status == NodeStatus.offline])} + Online: {len([node for node in proxmox.nodes if node.status == NodeStatus.online])} + Unknown: {len([node for node in proxmox.nodes if node.status == NodeStatus.unknown])} + """ + + print(output) def action_sanitycheck(proxmox, args): """Check status of proxmox Cluster""" diff --git a/src/pvecontrol/cluster.py b/src/pvecontrol/cluster.py index 05b3fb9..5188dbc 100644 --- a/src/pvecontrol/cluster.py +++ b/src/pvecontrol/cluster.py @@ -3,6 +3,7 @@ from pvecontrol.node import PVENode from pvecontrol.task import PVETask +from humanize import naturalsize class PVECluster: """Proxmox VE Cluster""" @@ -57,3 +58,59 @@ def find_task(self, upid): if task.upid == upid: return task return False + + def is_healthy(self): + return bool([item for item in self.status if item.get('type') == 'cluster'][0]['quorate']) + + def get_resources_nodes(self): + return [resource for resource in self.resources if resource["type"] == "node"] + + def get_resources_storages(self): + return [resource for resource in self.resources if resource["type"] == "storage"] + + def cpu_metrics(self): + nodes = self.get_resources_nodes() + total_cpu = sum([node['maxcpu'] for node in nodes]) + total_cpu_usage = sum([node['cpu'] for node in nodes]) + total_cpu_allocated = sum([node.allocatedcpu for node in self.nodes]) + cpu_percent = total_cpu_usage / total_cpu *100 + + return { + "total": total_cpu, + "usage": total_cpu_usage, + "allocated": total_cpu_allocated, + "percent": cpu_percent, + } + + def memory_metrics(self): + nodes = self.get_resources_nodes() + total_memory = sum([node['maxmem'] for node in nodes]) + total_memory_usage = sum([node['mem'] for node in nodes]) + total_memory_allocated = sum([node.allocatedmem for node in self.nodes]) + memory_percent = total_memory_usage / total_memory *100 + + return { + "total": total_memory, + "usage": total_memory_usage, + "allocated": total_memory_allocated, + "percent": memory_percent, + } + + def disk_metrics(self): + storages = self.get_resources_storages() + total_disk = sum([node['maxdisk'] for node in storages]) + total_disk_usage = sum([node['disk'] for node in storages]) + disk_percent = total_disk_usage / total_disk *100 + + return { + "total": total_disk, + "usage": total_disk_usage, + "percent": disk_percent, + } + + def metrics(self): + return { + "cpu": self.cpu_metrics(), + "memory": self.memory_metrics(), + "disk": self.disk_metrics() + } diff --git a/src/pvecontrol/node.py b/src/pvecontrol/node.py index ae58be1..a815c64 100644 --- a/src/pvecontrol/node.py +++ b/src/pvecontrol/node.py @@ -11,7 +11,7 @@ class NodeStatus(Enum): class PVENode: """A proxmox VE Node""" _api = None - + def __init__(self, api, node, status, input = {}): self.node = node self.status = NodeStatus[status] @@ -83,3 +83,6 @@ def _init_allocatedcpu(self): # if vm.vmid == item: # return True # return False + + def templates(self): + return [vm for vm in self.vms if vm.template] diff --git a/src/pvecontrol/vm.py b/src/pvecontrol/vm.py index 3f84fcb..e94bd99 100644 --- a/src/pvecontrol/vm.py +++ b/src/pvecontrol/vm.py @@ -22,6 +22,8 @@ def __init__(self, api, node, vmid, status, input = {}): self.maxmem = 0 self.uptime = 0 self.tags = "" + self.template = 0 + for k in input: if k == "name": self.name = input["name"] @@ -37,12 +39,14 @@ def __init__(self, api, node, vmid, status, input = {}): self.uptime = input["uptime"] elif k == "tags": self.tags = input["tags"] - + elif k == "template": + self.template = input["template"] + self.config = self._api.nodes(self.node).qemu(vmid).config.get() def __str__(self): - return("vmid: {}, status: {}, name: {}, lock: {}, cpus: {}, maxdisk: {}, maxmem: {}, uptime: {}, tags: {}" - .format(self.vmid, self.status, self.name, self.lock, self.cpus, self.maxdisk, self.maxmem, self.uptime, self.tags)) + return("vmid: {}, status: {}, name: {}, lock: {}, cpus: {}, maxdisk: {}, maxmem: {}, uptime: {}, tags: {}, template: {}" + .format(self.vmid, self.status, self.name, self.lock, self.cpus, self.maxdisk, self.maxmem, self.uptime, self.tags, self.template)) def migrate(self, target, online = False): options = {}