Source code for revive.utils.sys_utils

import io
import subprocess
import psutil
try:
    import pynvml
except:
    pass
import torch
import pandas as pd
import numpy as np


[docs] def get_memory_usage(): memory_info = psutil.virtual_memory() used_memory = memory_info.used / (1024**3) return used_memory
[docs] def get_cpu_stats(): # 运行 top 命令并将输出捕获到字符串中 top_output = subprocess.check_output(['top', '-bn1'], universal_newlines=True) # 将 top 输出转换为 pandas DataFrame df = pd.read_csv( io.StringIO(top_output), skiprows=6, header=None, delim_whitespace=True, usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], names=['PID', 'USER', 'PR', 'NI', 'VIRT', 'RES', 'SHR', 'S', '%CPU', '%MEM'] ).iloc[1:] df['%CPU'] = df['%CPU'].str.rstrip('%').astype('float') cpu_stats = df['%CPU'].sum() #print(f"Cpu -> Used: {cpu_stats:.2f}") return cpu_stats
[docs] def get_gpu_stats(): devices = [torch.device("cuda:" + str(i)) for i in range(torch.cuda.device_count())] gpu_memory_usage = [] for device in devices: total_memory = torch.cuda.get_device_properties(device).total_memory memory_allocated = torch.cuda.memory_allocated(device) memory_cached = torch.cuda.memory_reserved(device) memory_used = torch.cuda.max_memory_allocated(device) gpu_memory_usage.append({"device": str(device), "memory_used(GB)": memory_used / 1073741824, "total_memory(GB)": total_memory / 1073741824}) result = pd.DataFrame(gpu_memory_usage) return result["memory_used(GB)"].values[0]
[docs] def get_allocated_cuda_memory(): try: pynvml.nvmlInit() handle = pynvml.nvmlDeviceGetHandleByIndex(0) # here, 0 is the GPU id meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle) allocated_memory = meminfo.used / 1024 / 1024 / 1024 # convert to GB return allocated_memory except: return np.nan
[docs] def get_sys_stats(): stats = {} stats["cpu_stats"] = [get_cpu_stats(),] stats["gpu_stats"] = [get_allocated_cuda_memory(),] stats["memory_stats"] = [get_memory_usage(),] stats = pd.DataFrame(stats) return stats
if __name__ == "__main__": print(get_sys_stats())