Created
January 22, 2026 13:29
-
-
Save greyhoundforty/b35f81f227d4891fa7712d4c8983fe38 to your computer and use it in GitHub Desktop.
Gather system information ahead of proxmox install
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env python3 | |
| """ | |
| Proxmox Deployment Information Gatherer | |
| ======================================== | |
| This script collects detailed system information needed to create a custom | |
| Proxmox installation and configuration script for IBM Cloud Classic bare metal servers. | |
| Focuses on: | |
| - Network configuration (bonded interfaces, static IPs) | |
| - Disk layout and storage options | |
| - CPU virtualization capabilities | |
| - Memory configuration | |
| - Current OS details | |
| Output: JSON file with all gathered information | |
| Author: Ryan | |
| Usage: sudo python3 proxmox-system-info.py | |
| """ | |
| import json | |
| import subprocess | |
| import re | |
| import os | |
| import sys | |
| from pathlib import Path | |
| from datetime import datetime | |
| from typing import Dict, List, Any, Optional | |
| class SystemInfoGatherer: | |
| """ | |
| Collects comprehensive system information for Proxmox deployment planning. | |
| This class runs various system commands to gather network, storage, CPU, | |
| and memory information that will be used to generate a custom Proxmox | |
| installation script. | |
| """ | |
| def __init__(self): | |
| """Initialize the gatherer with empty data structures.""" | |
| self.data: Dict[str, Any] = { | |
| "collection_timestamp": datetime.now().isoformat(), | |
| "hostname": self._get_hostname(), | |
| "network": {}, | |
| "storage": {}, | |
| "cpu": {}, | |
| "memory": {}, | |
| "os": {}, | |
| "virtualization": {} | |
| } | |
| def _run_command(self, cmd: List[str], check: bool = True) -> Optional[str]: | |
| """ | |
| Execute a shell command and return its output. | |
| Args: | |
| cmd: Command and arguments as a list | |
| check: Whether to raise exception on failure | |
| Returns: | |
| Command output as string, or None if command failed | |
| """ | |
| try: | |
| result = subprocess.run( | |
| cmd, | |
| capture_output=True, | |
| text=True, | |
| check=check | |
| ) | |
| return result.stdout.strip() | |
| except subprocess.CalledProcessError as e: | |
| print(f"Warning: Command failed: {' '.join(cmd)}", file=sys.stderr) | |
| print(f"Error: {e.stderr}", file=sys.stderr) | |
| return None | |
| except FileNotFoundError: | |
| print(f"Warning: Command not found: {cmd[0]}", file=sys.stderr) | |
| return None | |
| def _get_hostname(self) -> str: | |
| """Get the system hostname.""" | |
| output = self._run_command(["hostname"]) | |
| return output if output else "unknown" | |
| def gather_network_info(self) -> None: | |
| """ | |
| Collect detailed network configuration information. | |
| Gathers: | |
| - All network interfaces and their states | |
| - IP addresses (IPv4 and IPv6) | |
| - Bonding configuration (critical for IBM Classic) | |
| - Routes and gateways | |
| - DNS configuration | |
| """ | |
| print("Gathering network information...") | |
| network_data = { | |
| "interfaces": {}, | |
| "bonds": {}, | |
| "routes": {}, | |
| "dns": {} | |
| } | |
| # Get all network interfaces using ip command | |
| # This is more reliable than parsing /proc/net/dev | |
| ip_addr_output = self._run_command(["ip", "-details", "addr", "show"]) | |
| if ip_addr_output: | |
| network_data["raw_ip_addr"] = ip_addr_output | |
| network_data["interfaces"] = self._parse_ip_addr(ip_addr_output) | |
| # Get link information (includes bond details) | |
| ip_link_output = self._run_command(["ip", "-details", "link", "show"]) | |
| if ip_link_output: | |
| network_data["raw_ip_link"] = ip_link_output | |
| # Parse bonding information from /proc | |
| # IBM Classic typically uses bond0 (private) and bond1 (public) | |
| network_data["bonds"] = self._get_bonding_info() | |
| # Get routing table | |
| # Important for identifying default gateway | |
| ip_route_output = self._run_command(["ip", "route", "show"]) | |
| if ip_route_output: | |
| network_data["routes"]["ipv4"] = ip_route_output | |
| network_data["routes"]["parsed"] = self._parse_routes(ip_route_output) | |
| # Get IPv6 routes if present | |
| ip_route6_output = self._run_command(["ip", "-6", "route", "show"]) | |
| if ip_route6_output: | |
| network_data["routes"]["ipv6"] = ip_route6_output | |
| # Get DNS configuration | |
| resolv_conf = Path("/etc/resolv.conf") | |
| if resolv_conf.exists(): | |
| network_data["dns"]["resolv_conf"] = resolv_conf.read_text() | |
| network_data["dns"]["nameservers"] = self._parse_resolv_conf( | |
| network_data["dns"]["resolv_conf"] | |
| ) | |
| # Check for systemd-resolved (common in modern systems) | |
| if Path("/etc/systemd/resolved.conf").exists(): | |
| network_data["dns"]["systemd_resolved"] = True | |
| resolved_status = self._run_command( | |
| ["systemd-resolve", "--status"], | |
| check=False | |
| ) | |
| if resolved_status: | |
| network_data["dns"]["resolved_status"] = resolved_status | |
| # Get current network manager (NetworkManager, systemd-networkd, ifupdown) | |
| network_data["network_manager"] = self._detect_network_manager() | |
| # Get existing network configuration files | |
| network_data["config_files"] = self._find_network_configs() | |
| self.data["network"] = network_data | |
| def _parse_ip_addr(self, output: str) -> Dict[str, Any]: | |
| """ | |
| Parse 'ip addr' output to extract interface details. | |
| Args: | |
| output: Raw output from 'ip addr show' command | |
| Returns: | |
| Dictionary mapping interface names to their properties | |
| """ | |
| interfaces = {} | |
| current_iface = None | |
| for line in output.split('\n'): | |
| # Match interface line (e.g., "2: eth0: <BROADCAST,MULTICAST,UP>") | |
| iface_match = re.match(r'^\d+:\s+(\S+):\s+<([^>]+)>', line) | |
| if iface_match: | |
| iface_name = iface_match.group(1).rstrip(':') | |
| flags = iface_match.group(2).split(',') | |
| current_iface = iface_name | |
| interfaces[current_iface] = { | |
| "name": iface_name, | |
| "flags": flags, | |
| "state": "UP" if "UP" in flags else "DOWN", | |
| "ipv4_addresses": [], | |
| "ipv6_addresses": [], | |
| "mac_address": None | |
| } | |
| # Match MAC address line | |
| if current_iface and "link/ether" in line: | |
| mac_match = re.search(r'link/ether\s+([0-9a-f:]+)', line) | |
| if mac_match: | |
| interfaces[current_iface]["mac_address"] = mac_match.group(1) | |
| # Match IPv4 address | |
| if current_iface and "inet " in line: | |
| ipv4_match = re.search(r'inet\s+(\S+)', line) | |
| if ipv4_match: | |
| interfaces[current_iface]["ipv4_addresses"].append( | |
| ipv4_match.group(1) | |
| ) | |
| # Match IPv6 address | |
| if current_iface and "inet6" in line: | |
| ipv6_match = re.search(r'inet6\s+(\S+)', line) | |
| if ipv6_match: | |
| interfaces[current_iface]["ipv6_addresses"].append( | |
| ipv6_match.group(1) | |
| ) | |
| return interfaces | |
| def _get_bonding_info(self) -> Dict[str, Any]: | |
| """ | |
| Extract bonding configuration from /proc/net/bonding. | |
| IBM Classic typically uses: | |
| - bond0: Private network (eth0 + eth2) | |
| - bond1: Public network (eth1 + eth3) | |
| Returns: | |
| Dictionary with bonding configuration for each bond interface | |
| """ | |
| bonds = {} | |
| bonding_dir = Path("/proc/net/bonding") | |
| if not bonding_dir.exists(): | |
| return bonds | |
| # Iterate through each bond interface | |
| for bond_file in bonding_dir.iterdir(): | |
| bond_name = bond_file.name | |
| bond_content = bond_file.read_text() | |
| bonds[bond_name] = { | |
| "raw_config": bond_content, | |
| "mode": None, | |
| "slaves": [], | |
| "active_slave": None, | |
| "mii_status": None | |
| } | |
| # Parse bonding mode (e.g., "mode: active-backup (1)") | |
| mode_match = re.search(r'Bonding Mode:\s*(.+)', bond_content) | |
| if mode_match: | |
| bonds[bond_name]["mode"] = mode_match.group(1) | |
| # Parse slave interfaces | |
| # Look for "Slave Interface: eth0" lines | |
| for slave_match in re.finditer(r'Slave Interface:\s*(\S+)', bond_content): | |
| slave_name = slave_match.group(1) | |
| bonds[bond_name]["slaves"].append(slave_name) | |
| # Parse active slave in active-backup mode | |
| active_match = re.search(r'Currently Active Slave:\s*(\S+)', bond_content) | |
| if active_match: | |
| bonds[bond_name]["active_slave"] = active_match.group(1) | |
| # Parse MII status (link monitoring) | |
| mii_match = re.search(r'MII Status:\s*(\S+)', bond_content) | |
| if mii_match: | |
| bonds[bond_name]["mii_status"] = mii_match.group(1) | |
| return bonds | |
| def _parse_routes(self, output: str) -> List[Dict[str, str]]: | |
| """ | |
| Parse routing table to identify default gateway and routes. | |
| Args: | |
| output: Raw output from 'ip route show' command | |
| Returns: | |
| List of route dictionaries with destination, gateway, interface | |
| """ | |
| routes = [] | |
| for line in output.split('\n'): | |
| if not line.strip(): | |
| continue | |
| route = {"raw": line} | |
| # Parse default route | |
| if line.startswith("default"): | |
| route["type"] = "default" | |
| # Extract gateway: "default via 10.x.x.x dev bond0" | |
| via_match = re.search(r'via\s+(\S+)', line) | |
| if via_match: | |
| route["gateway"] = via_match.group(1) | |
| dev_match = re.search(r'dev\s+(\S+)', line) | |
| if dev_match: | |
| route["interface"] = dev_match.group(1) | |
| else: | |
| # Parse specific network routes | |
| route["type"] = "network" | |
| parts = line.split() | |
| if parts: | |
| route["destination"] = parts[0] | |
| if "via" in line: | |
| via_match = re.search(r'via\s+(\S+)', line) | |
| if via_match: | |
| route["gateway"] = via_match.group(1) | |
| dev_match = re.search(r'dev\s+(\S+)', line) | |
| if dev_match: | |
| route["interface"] = dev_match.group(1) | |
| routes.append(route) | |
| return routes | |
| def _parse_resolv_conf(self, content: str) -> List[str]: | |
| """ | |
| Extract nameserver IPs from resolv.conf. | |
| Args: | |
| content: Contents of /etc/resolv.conf | |
| Returns: | |
| List of nameserver IP addresses | |
| """ | |
| nameservers = [] | |
| for line in content.split('\n'): | |
| if line.strip().startswith('nameserver'): | |
| parts = line.split() | |
| if len(parts) >= 2: | |
| nameservers.append(parts[1]) | |
| return nameservers | |
| def _detect_network_manager(self) -> Dict[str, Any]: | |
| """ | |
| Detect which network management system is in use. | |
| Returns: | |
| Dictionary with detected network manager and status | |
| """ | |
| manager_info = { | |
| "detected": [], | |
| "active": None | |
| } | |
| # Check for NetworkManager | |
| nm_status = self._run_command( | |
| ["systemctl", "is-active", "NetworkManager"], | |
| check=False | |
| ) | |
| if nm_status == "active": | |
| manager_info["detected"].append("NetworkManager") | |
| manager_info["active"] = "NetworkManager" | |
| # Check for systemd-networkd | |
| networkd_status = self._run_command( | |
| ["systemctl", "is-active", "systemd-networkd"], | |
| check=False | |
| ) | |
| if networkd_status == "active": | |
| manager_info["detected"].append("systemd-networkd") | |
| if not manager_info["active"]: | |
| manager_info["active"] = "systemd-networkd" | |
| # Check for traditional ifupdown (Debian/Ubuntu) | |
| if Path("/etc/network/interfaces").exists(): | |
| manager_info["detected"].append("ifupdown") | |
| if not manager_info["active"]: | |
| manager_info["active"] = "ifupdown" | |
| return manager_info | |
| def _find_network_configs(self) -> Dict[str, Any]: | |
| """ | |
| Locate and read existing network configuration files. | |
| Returns: | |
| Dictionary mapping config file paths to their contents | |
| """ | |
| configs = {} | |
| # Common network configuration locations | |
| config_paths = [ | |
| "/etc/network/interfaces", | |
| "/etc/netplan", | |
| "/etc/sysconfig/network-scripts", | |
| "/etc/NetworkManager/system-connections" | |
| ] | |
| for path_str in config_paths: | |
| path = Path(path_str) | |
| if path.is_file(): | |
| # Single config file | |
| try: | |
| configs[path_str] = path.read_text() | |
| except PermissionError: | |
| configs[path_str] = "Permission denied" | |
| elif path.is_dir(): | |
| # Directory of config files | |
| configs[path_str] = {} | |
| try: | |
| for config_file in path.iterdir(): | |
| if config_file.is_file(): | |
| try: | |
| configs[path_str][config_file.name] = config_file.read_text() | |
| except PermissionError: | |
| configs[path_str][config_file.name] = "Permission denied" | |
| except PermissionError: | |
| configs[path_str] = "Permission denied" | |
| return configs | |
| def gather_storage_info(self) -> None: | |
| """ | |
| Collect detailed storage and disk information. | |
| Gathers: | |
| - Physical disks and their sizes | |
| - Partition layout | |
| - RAID configuration | |
| - Filesystem usage | |
| - LVM configuration (if present) | |
| """ | |
| print("Gathering storage information...") | |
| storage_data = { | |
| "disks": {}, | |
| "partitions": {}, | |
| "filesystems": {}, | |
| "raid": {}, | |
| "lvm": {} | |
| } | |
| # Get block device information using lsblk | |
| # -b: bytes, -d: disks only, -o: output columns | |
| lsblk_output = self._run_command([ | |
| "lsblk", "-b", "-o", | |
| "NAME,SIZE,TYPE,MOUNTPOINT,FSTYPE,MODEL,SERIAL,STATE,ROTA" | |
| ]) | |
| if lsblk_output: | |
| storage_data["raw_lsblk"] = lsblk_output | |
| storage_data["disks"] = self._parse_lsblk(lsblk_output) | |
| # Get detailed partition information | |
| # This includes partition UUIDs, types, etc. | |
| blkid_output = self._run_command(["blkid"], check=False) | |
| if blkid_output: | |
| storage_data["raw_blkid"] = blkid_output | |
| # Get filesystem usage information | |
| df_output = self._run_command(["df", "-h"]) | |
| if df_output: | |
| storage_data["filesystems"]["usage"] = df_output | |
| # Check for software RAID (mdadm) | |
| mdstat_path = Path("/proc/mdstat") | |
| if mdstat_path.exists(): | |
| storage_data["raid"]["mdstat"] = mdstat_path.read_text() | |
| # Get RAID details if present | |
| mdadm_detail = self._run_command( | |
| ["mdadm", "--detail", "--scan"], | |
| check=False | |
| ) | |
| if mdadm_detail: | |
| storage_data["raid"]["mdadm_detail"] = mdadm_detail | |
| # Check for LVM (Logical Volume Manager) | |
| # Get Physical Volumes | |
| pvs_output = self._run_command(["pvs"], check=False) | |
| if pvs_output: | |
| storage_data["lvm"]["pvs"] = pvs_output | |
| # Get Volume Groups | |
| vgs_output = self._run_command(["vgs"], check=False) | |
| if vgs_output: | |
| storage_data["lvm"]["vgs"] = vgs_output | |
| # Get Logical Volumes | |
| lvs_output = self._run_command(["lvs"], check=False) | |
| if lvs_output: | |
| storage_data["lvm"]["lvs"] = lvs_output | |
| # Get detailed disk information using smartctl (if available) | |
| # This is useful for identifying disk health and capabilities | |
| storage_data["smart"] = self._get_smart_info() | |
| self.data["storage"] = storage_data | |
| def _parse_lsblk(self, output: str) -> Dict[str, Any]: | |
| """ | |
| Parse lsblk output to extract disk information. | |
| Args: | |
| output: Raw output from lsblk command | |
| Returns: | |
| Dictionary mapping disk names to their properties | |
| """ | |
| disks = {} | |
| lines = output.split('\n') | |
| # Skip header line | |
| for line in lines[1:]: | |
| if not line.strip(): | |
| continue | |
| parts = line.split() | |
| if len(parts) >= 2: | |
| name = parts[0] | |
| # Clean up the name (remove tree characters like ├─) | |
| clean_name = re.sub(r'[├─└│]', '', name).strip() | |
| disk_info = { | |
| "name": clean_name, | |
| "raw_line": line | |
| } | |
| # Try to parse additional fields if available | |
| if len(parts) > 1: | |
| disk_info["size_bytes"] = parts[1] if parts[1].isdigit() else None | |
| if len(parts) > 2: | |
| disk_info["type"] = parts[2] | |
| disks[clean_name] = disk_info | |
| return disks | |
| def _get_smart_info(self) -> Dict[str, Any]: | |
| """ | |
| Get SMART data for physical disks if smartmontools is installed. | |
| Returns: | |
| Dictionary mapping disk devices to their SMART information | |
| """ | |
| smart_data = {} | |
| # Check if smartctl is available | |
| if not self._run_command(["which", "smartctl"], check=False): | |
| smart_data["available"] = False | |
| return smart_data | |
| smart_data["available"] = True | |
| # Get list of disks | |
| # Look for /dev/sd* and /dev/nvme* devices | |
| dev_path = Path("/dev") | |
| disk_patterns = ["sd[a-z]", "nvme[0-9]n[0-9]"] | |
| for pattern in disk_patterns: | |
| for disk in dev_path.glob(pattern): | |
| disk_name = disk.name | |
| smart_output = self._run_command( | |
| ["smartctl", "-a", str(disk)], | |
| check=False | |
| ) | |
| if smart_output: | |
| smart_data[disk_name] = smart_output | |
| return smart_data | |
| def gather_cpu_info(self) -> None: | |
| """ | |
| Collect CPU information and virtualization capabilities. | |
| Gathers: | |
| - CPU model and specifications | |
| - Number of cores/threads | |
| - Virtualization support (VMX/SVM flags) | |
| - CPU frequency | |
| """ | |
| print("Gathering CPU information...") | |
| cpu_data = {} | |
| # Get CPU info from /proc/cpuinfo | |
| cpuinfo_path = Path("/proc/cpuinfo") | |
| if cpuinfo_path.exists(): | |
| cpuinfo_content = cpuinfo_path.read_text() | |
| cpu_data["raw_cpuinfo"] = cpuinfo_content | |
| cpu_data["parsed"] = self._parse_cpuinfo(cpuinfo_content) | |
| # Get CPU details using lscpu | |
| lscpu_output = self._run_command(["lscpu"]) | |
| if lscpu_output: | |
| cpu_data["lscpu"] = lscpu_output | |
| cpu_data["lscpu_parsed"] = self._parse_lscpu(lscpu_output) | |
| # Check for virtualization support | |
| # Intel: vmx flag, AMD: svm flag | |
| if "raw_cpuinfo" in cpu_data: | |
| flags = re.findall(r'flags\s*:\s*(.+)', cpu_data["raw_cpuinfo"]) | |
| if flags: | |
| flag_list = flags[0].split() | |
| cpu_data["virtualization"] = { | |
| "vmx": "vmx" in flag_list, # Intel VT-x | |
| "svm": "svm" in flag_list, # AMD-V | |
| "supported": ("vmx" in flag_list) or ("svm" in flag_list) | |
| } | |
| self.data["cpu"] = cpu_data | |
| def _parse_cpuinfo(self, content: str) -> Dict[str, Any]: | |
| """ | |
| Parse /proc/cpuinfo to extract CPU details. | |
| Args: | |
| content: Contents of /proc/cpuinfo | |
| Returns: | |
| Dictionary with CPU specifications | |
| """ | |
| cpu_info = { | |
| "model_name": None, | |
| "cores": 0, | |
| "threads": 0, | |
| "flags": [] | |
| } | |
| # Count processor entries to get thread count | |
| processor_count = len(re.findall(r'^processor\s*:', content, re.MULTILINE)) | |
| cpu_info["threads"] = processor_count | |
| # Get model name (all CPUs should have same model) | |
| model_match = re.search(r'model name\s*:\s*(.+)', content) | |
| if model_match: | |
| cpu_info["model_name"] = model_match.group(1) | |
| # Get CPU cores (physical cores) | |
| cpu_cores_match = re.search(r'cpu cores\s*:\s*(\d+)', content) | |
| if cpu_cores_match: | |
| cpu_info["cores"] = int(cpu_cores_match.group(1)) | |
| # Get flags from first processor entry | |
| flags_match = re.search(r'flags\s*:\s*(.+)', content) | |
| if flags_match: | |
| cpu_info["flags"] = flags_match.group(1).split() | |
| return cpu_info | |
| def _parse_lscpu(self, output: str) -> Dict[str, str]: | |
| """ | |
| Parse lscpu output into key-value pairs. | |
| Args: | |
| output: Raw output from lscpu command | |
| Returns: | |
| Dictionary mapping lscpu fields to values | |
| """ | |
| lscpu_data = {} | |
| for line in output.split('\n'): | |
| if ':' in line: | |
| key, value = line.split(':', 1) | |
| lscpu_data[key.strip()] = value.strip() | |
| return lscpu_data | |
| def gather_memory_info(self) -> None: | |
| """ | |
| Collect memory (RAM) information. | |
| Gathers: | |
| - Total memory | |
| - Available memory | |
| - Memory type and speed (from dmidecode if available) | |
| """ | |
| print("Gathering memory information...") | |
| memory_data = {} | |
| # Get memory info from /proc/meminfo | |
| meminfo_path = Path("/proc/meminfo") | |
| if meminfo_path.exists(): | |
| meminfo_content = meminfo_path.read_text() | |
| memory_data["raw_meminfo"] = meminfo_content | |
| memory_data["parsed"] = self._parse_meminfo(meminfo_content) | |
| # Get detailed memory info using dmidecode (requires root) | |
| dmidecode_output = self._run_command( | |
| ["dmidecode", "-t", "memory"], | |
| check=False | |
| ) | |
| if dmidecode_output: | |
| memory_data["dmidecode"] = dmidecode_output | |
| # Get memory usage using free command | |
| free_output = self._run_command(["free", "-h"]) | |
| if free_output: | |
| memory_data["free"] = free_output | |
| self.data["memory"] = memory_data | |
| def _parse_meminfo(self, content: str) -> Dict[str, str]: | |
| """ | |
| Parse /proc/meminfo to extract memory statistics. | |
| Args: | |
| content: Contents of /proc/meminfo | |
| Returns: | |
| Dictionary mapping memory fields to values | |
| """ | |
| meminfo = {} | |
| for line in content.split('\n'): | |
| if ':' in line: | |
| key, value = line.split(':', 1) | |
| meminfo[key.strip()] = value.strip() | |
| return meminfo | |
| def gather_os_info(self) -> None: | |
| """ | |
| Collect operating system information. | |
| Gathers: | |
| - Distribution name and version | |
| - Kernel version | |
| - System architecture | |
| """ | |
| print("Gathering OS information...") | |
| os_data = {} | |
| # Get OS release information | |
| os_release_path = Path("/etc/os-release") | |
| if os_release_path.exists(): | |
| os_release_content = os_release_path.read_text() | |
| os_data["os_release"] = os_release_content | |
| os_data["parsed"] = self._parse_os_release(os_release_content) | |
| # Get kernel version | |
| uname_output = self._run_command(["uname", "-a"]) | |
| if uname_output: | |
| os_data["uname"] = uname_output | |
| # Get specific kernel version | |
| kernel_version = self._run_command(["uname", "-r"]) | |
| if kernel_version: | |
| os_data["kernel_version"] = kernel_version | |
| # Get architecture | |
| arch = self._run_command(["uname", "-m"]) | |
| if arch: | |
| os_data["architecture"] = arch | |
| self.data["os"] = os_data | |
| def _parse_os_release(self, content: str) -> Dict[str, str]: | |
| """ | |
| Parse /etc/os-release file. | |
| Args: | |
| content: Contents of /etc/os-release | |
| Returns: | |
| Dictionary mapping OS release fields to values | |
| """ | |
| os_info = {} | |
| for line in content.split('\n'): | |
| if '=' in line: | |
| key, value = line.split('=', 1) | |
| # Remove quotes from value | |
| value = value.strip('"') | |
| os_info[key] = value | |
| return os_info | |
| def gather_virtualization_info(self) -> None: | |
| """ | |
| Check current virtualization status and capabilities. | |
| Gathers: | |
| - Whether system is already virtualized | |
| - KVM module status | |
| - IOMMU support (for PCIe passthrough) | |
| """ | |
| print("Gathering virtualization information...") | |
| virt_data = {} | |
| # Check if running in a VM | |
| systemd_detect = self._run_command( | |
| ["systemd-detect-virt"], | |
| check=False | |
| ) | |
| if systemd_detect: | |
| virt_data["running_in_vm"] = systemd_detect != "none" | |
| virt_data["vm_type"] = systemd_detect if systemd_detect != "none" else None | |
| # Check KVM module status | |
| lsmod_output = self._run_command(["lsmod"]) | |
| if lsmod_output: | |
| virt_data["kvm_module_loaded"] = "kvm" in lsmod_output | |
| virt_data["kvm_intel_loaded"] = "kvm_intel" in lsmod_output | |
| virt_data["kvm_amd_loaded"] = "kvm_amd" in lsmod_output | |
| # Check IOMMU support (for PCIe passthrough) | |
| dmesg_iommu = self._run_command( | |
| ["dmesg", "|", "grep", "-i", "iommu"], | |
| check=False | |
| ) | |
| if dmesg_iommu: | |
| virt_data["iommu_detected"] = True | |
| virt_data["iommu_info"] = dmesg_iommu | |
| # Check /dev/kvm existence | |
| virt_data["kvm_device_exists"] = Path("/dev/kvm").exists() | |
| self.data["virtualization"] = virt_data | |
| def gather_all(self) -> None: | |
| """Run all information gathering methods.""" | |
| print("\n=== Proxmox System Information Gatherer ===\n") | |
| self.gather_os_info() | |
| self.gather_cpu_info() | |
| self.gather_memory_info() | |
| self.gather_network_info() | |
| self.gather_storage_info() | |
| self.gather_virtualization_info() | |
| print("\n=== Information gathering complete ===\n") | |
| def save_to_file(self, filename: str = "proxmox-system-info.json") -> None: | |
| """ | |
| Save gathered information to a JSON file. | |
| Args: | |
| filename: Output filename (default: proxmox-system-info.json) | |
| """ | |
| output_path = Path(filename) | |
| print(f"Saving information to {output_path}...") | |
| with open(output_path, 'w') as f: | |
| json.dump(self.data, f, indent=2) | |
| print(f"✓ Saved to {output_path}") | |
| print(f" File size: {output_path.stat().st_size} bytes") | |
| def print_summary(self) -> None: | |
| """Print a human-readable summary of gathered information.""" | |
| print("\n=== System Information Summary ===\n") | |
| # OS Information | |
| if "os" in self.data and "parsed" in self.data["os"]: | |
| os_info = self.data["os"]["parsed"] | |
| print(f"OS: {os_info.get('PRETTY_NAME', 'Unknown')}") | |
| print(f"Kernel: {self.data['os'].get('kernel_version', 'Unknown')}") | |
| print(f"Architecture: {self.data['os'].get('architecture', 'Unknown')}") | |
| print() | |
| # CPU Information | |
| if "cpu" in self.data and "parsed" in self.data["cpu"]: | |
| cpu = self.data["cpu"]["parsed"] | |
| print(f"CPU: {cpu.get('model_name', 'Unknown')}") | |
| print(f"Cores: {cpu.get('cores', 'Unknown')}") | |
| print(f"Threads: {cpu.get('threads', 'Unknown')}") | |
| if "virtualization" in self.data["cpu"]: | |
| virt_support = "Yes" if self.data["cpu"]["virtualization"].get("supported") else "No" | |
| print(f"Virtualization Support: {virt_support}") | |
| print() | |
| # Memory Information | |
| if "memory" in self.data and "parsed" in self.data["memory"]: | |
| mem = self.data["memory"]["parsed"] | |
| total_mem = mem.get("MemTotal", "Unknown") | |
| print(f"Memory: {total_mem}") | |
| print() | |
| # Network Information | |
| if "network" in self.data: | |
| print("Network Interfaces:") | |
| if "interfaces" in self.data["network"]: | |
| for iface_name, iface_data in self.data["network"]["interfaces"].items(): | |
| state = iface_data.get("state", "UNKNOWN") | |
| ipv4 = iface_data.get("ipv4_addresses", []) | |
| ipv4_str = ipv4[0] if ipv4 else "No IP" | |
| print(f" - {iface_name}: {state} ({ipv4_str})") | |
| if "bonds" in self.data["network"] and self.data["network"]["bonds"]: | |
| print("\nBonded Interfaces:") | |
| for bond_name, bond_data in self.data["network"]["bonds"].items(): | |
| mode = bond_data.get("mode", "Unknown") | |
| slaves = ", ".join(bond_data.get("slaves", [])) | |
| print(f" - {bond_name}: {mode}") | |
| print(f" Slaves: {slaves}") | |
| print() | |
| # Storage Information | |
| if "storage" in self.data and "disks" in self.data["storage"]: | |
| print("Storage Devices:") | |
| disk_count = len([ | |
| d for d in self.data["storage"]["disks"].values() | |
| if d.get("type") == "disk" | |
| ]) | |
| print(f" Total disks: {disk_count}") | |
| print() | |
| def main(): | |
| """Main entry point for the script.""" | |
| # Check if running as root | |
| if os.geteuid() != 0: | |
| print("Warning: Some information requires root privileges.") | |
| print("Run with sudo for complete information gathering.") | |
| print() | |
| response = input("Continue anyway? (y/n): ") | |
| if response.lower() != 'y': | |
| print("Exiting.") | |
| sys.exit(0) | |
| # Create gatherer instance | |
| gatherer = SystemInfoGatherer() | |
| # Gather all information | |
| gatherer.gather_all() | |
| # Print summary | |
| gatherer.print_summary() | |
| # Save to file | |
| output_file = "proxmox-system-info.json" | |
| gatherer.save_to_file(output_file) | |
| print("\n=== Next Steps ===") | |
| print(f"1. Review the generated file: {output_file}") | |
| print("2. Share this file with the AI agent for Proxmox deployment planning") | |
| print("3. The agent will use this data to create a custom installation script") | |
| print() | |
| if __name__ == "__main__": | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment