简体   繁体   中英

Get/View Memory & CPU usage via NodeJS

I see there are several node packages that allow you to look up a specific process's usage, such as https://www.npmjs.com/package/usage

I am trying to get the overall sever usage/stats (CPU and Memory), not just one specific process or another. Maybe even disk space usage.

I am currently unable to find anything like this, is this possible?

The native module os can give you some memory and cpu usage statistics.

var os = require('os');

console.log(os.cpus());
console.log(os.totalmem());
console.log(os.freemem())

The cpus() function gives you an average, but you can calculate the current usage by using a formula and an interval, as mentioned in this answer.

There is also a package that does this for you, called os-utils .

Taken from the example on github:

var os = require('os-utils');

os.cpuUsage(function(v){
    console.log( 'CPU Usage (%): ' + v );
});

For information about the disk you can use diskspace

Check node-os-utils

  • CPU average usage
  • Free and used drive space
  • Free and used memory space
  • Operating System
  • All processes running
  • TTY/SSH opened
  • Total opened files
  • Network speed (input and output)
 var osu = require('node-os-utils') var cpu = osu.cpu cpu.usage() .then(info => { console.log(info) })

As I found no code to solve this problem and don't want to rely on other packages just for some lines of code, I wrote a function that calculates the average CPU load between two successive function calls. I am assuming t_idle + t_user + t_sys = total cpu time and results are kind of similar to the ones of my Windows task manager, however, the usage seems little more sensitive to me (eg music playback increases the cpu load more than in the Windows task manager). Please corret me if my assumptions are wrong.

const os = require('os');

// Initial value; wait at little amount of time before making a measurement.
let timesBefore = os.cpus().map(c => c.times);

// Call this function periodically e.g. using setInterval, 
function getAverageUsage() {
    let timesAfter = os.cpus().map(c => c.times);
    let timeDeltas = timesAfter.map((t, i) => ({
        user: t.user - timesBefore[i].user,
        sys: t.sys - timesBefore[i].sys,
        idle: t.idle - timesBefore[i].idle
    }));

    timesBefore = timesAfter;

    return timeDeltas
        .map(times => 1 - times.idle / (times.user + times.sys + times.idle))
        .reduce((l1, l2) => l1 + l2) / timeDeltas.length;
}

You can also use process.cpuUsage() to return the system and user cpu time in microseconds. It can also calculate the difference to a previous call.

https://nodejs.org/api/process.html#process_process_cpuusage_previousvalue

Node.js has os.loadavg() method

// Require os module
const os = require('os');

// Printing os.loadavg() value
var avg_load = os.loadavg();

console.log("Load average (1 minute):"
            + String(avg_load[0]));

console.log("Load average (5 minute):"
            + String(avg_load[1]));

console.log("Load average (15 minute):"
            + String(avg_load[2]));

More info here

async function getinfo() {
 const cpu = await si.cpu();
 const disk = (await si.diskLayout())[0];
 const os = await si.osInfo();
 const versions = await si.versions();
 const ram = await si.mem();

 // CPU Info
 let info = `CPU: ${cpu.manufacturer} ${cpu.brand} ${cpu.speed}GHz\n`;
 info += `Cores: ${cpu.cores} (${cpu.physicalCores} Physical)\n`;

 // RAM Info
 const totalRam = Math.round(ram.total / 1024 / 1024 / 1024);
 info += `RAM: ${totalRam}GB\n`;

 // Disk Info
 const size = Math.round(disk.size / 1024 / 1024 / 1024);
 info += `Disk: ${disk.vendor} ${disk.name} ${size}GB ${disk.type} (${disk.interfaceType})\n`;

 //OS Info
 info += `OS: ${os.distro} ${os.codename} (${os.platform})\n`;
 info += `Kernel: ${os.kernel} ${os.arch}\n`;

 // Node Info
 info += `Node: v${versions.node}\n`;
 info += `V8: ${versions.v8}`;
 return info;
}

Of course it is possible. But you'll need a C++ native module to do that. And keep in mind that every OS has their own way of querying system resource usage.

For example, if you're on Windows (which might be what you're looking for since usage doesn't support Windows), you could do something like

performance.cpp

#include <node.h>
#include "performance_algorithm.hpp"

using namespace v8;

void InitAll(Handle<Object> exports) {
    PerformanceAlgorithm::Initialize();
    PerformanceAlgorithm::RegisterMethod(exports);
}

NODE_MODULE(Performance, InitAll)

performance_algorithm.cpp

#include <algorithm>

#include "baton.hpp"
#include "structs.hpp"
#include "performance_algorithm.hpp"

void PerformanceAlgorithm::Initialize() {
    PdhOpenQuery(NULL, NULL, &cpuQuery);
    PdhAddCounter(cpuQuery, "\\Processor(_Total)\\% Processor Time", NULL, &cpuTotal);
    PdhCollectQueryData(cpuQuery);
}

void PerformanceAlgorithm::RegisterMethod(Handle<Object> exports) {
    NODE_SET_METHOD(exports, "getPerformanceData", PerformanceAlgorithm::GetPerformanceDataAsync);
}

void PerformanceAlgorithm::GetPerformanceDataAsync(const FunctionCallbackInfo<Value>& args) {
    Isolate* isolate = Isolate::GetCurrent();
    HandleScope scope(isolate);

    if (args.Length() != 1) {
        isolate->ThrowException(Exception::TypeError(String::NewFromUtf8(isolate, "Wrong number of arguments")));
    } else {
        if (!args[0]->IsFunction()) {
            isolate->ThrowException(Exception::TypeError(String::NewFromUtf8(isolate, "Wrong arguments type")));
        } else {
            Local<Function> callbackFunction = Local<Function>::Cast(args[0]);

            Baton<string, PerformanceData>* baton = new Baton<string, PerformanceData>();
            baton->request.data = baton;
            baton->callbackFunction.Reset(isolate, callbackFunction);

            uv_queue_work(uv_default_loop(), &baton->request, PerformanceAlgorithm::GetPerformanceDataWork, PerformanceAlgorithm::GetPerformanceDataAsyncAfter);
        }
    }
}

void PerformanceAlgorithm::GetPerformanceDataWork(uv_work_t* request) {
    Baton<string, PerformanceData>* baton = static_cast<Baton<string, PerformanceData>*>(request->data);

    baton->result.memory_info.dwLength = sizeof(MEMORYSTATUSEX);
    GlobalMemoryStatusEx(&baton->result.memory_info);

    PDH_FMT_COUNTERVALUE counterVal;
    PdhCollectQueryData(cpuQuery);
    PdhGetFormattedCounterValue(cpuTotal, PDH_FMT_DOUBLE, NULL, &counterVal);
    baton->result.cpu_usage = counterVal.doubleValue;

    DWORD processIDs[1024], bytesReturned;
    EnumProcesses(processIDs, sizeof(processIDs), &bytesReturned);

    DWORD numberOfProcesses = bytesReturned / sizeof(DWORD);
    for (int i = 0; i < numberOfProcesses; i++) {
        HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, processIDs[i]);

        HMODULE hMods[1024];
        DWORD cbNeeded;
        if (EnumProcessModules(hProcess, hMods, sizeof(hMods), &cbNeeded)) {
            for (int j = 0; j < (cbNeeded / sizeof(HMODULE)); j++) {
                TCHAR szModName[MAX_PATH];
                GetModuleFileNameEx(hProcess, hMods[j], szModName, sizeof(szModName) / sizeof(TCHAR));

                ProcessInfo info;
                info.process_id = processIDs[i];
                info.path = string(szModName);

                baton->result.processes.push_back(info);

                break;
            }
        }

        CloseHandle(hProcess);
    }

    sort(baton->result.processes.begin(), baton->result.processes.end(), [](ProcessInfo a, ProcessInfo b) -> bool {
        return a.process_id < b.process_id;
    });

    GetPerformanceInfo(&baton->result.performance_info, sizeof(PERFORMACE_INFORMATION));
}

void PerformanceAlgorithm::GetPerformanceDataAsyncAfter(uv_work_t* request, int status) {
    Isolate* isolate = Isolate::GetCurrent();
    HandleScope scope(isolate);
    EscapableHandleScope escapableHandleScope(isolate);

    Baton<string, PerformanceData>* baton = static_cast<Baton<string, PerformanceData>*>(request->data);
    Local<Function> callbackFunction = Local<Function>::New(isolate, baton->callbackFunction);

    Local<Object> returnValue = Object::New(isolate);
    returnValue->Set(String::NewFromUtf8(isolate, "cpu_usage"), Number::New(isolate, baton->result.cpu_usage));
    returnValue->Set(String::NewFromUtf8(isolate, "ram_usage"), Number::New(isolate, baton->result.memory_info.dwMemoryLoad));
    returnValue->Set(String::NewFromUtf8(isolate, "total_physical_memory"), Number::New(isolate, baton->result.memory_info.ullTotalPhys));
    returnValue->Set(String::NewFromUtf8(isolate, "available_physical_memory"), Number::New(isolate, baton->result.memory_info.ullAvailPhys));
    returnValue->Set(String::NewFromUtf8(isolate, "total_page_file"), Number::New(isolate, baton->result.memory_info.ullTotalPageFile));
    returnValue->Set(String::NewFromUtf8(isolate, "available_page_file"), Number::New(isolate, baton->result.memory_info.ullAvailPageFile));
    returnValue->Set(String::NewFromUtf8(isolate, "total_virtual"), Number::New(isolate, baton->result.memory_info.ullTotalVirtual));
    returnValue->Set(String::NewFromUtf8(isolate, "available_virtual"), Number::New(isolate, baton->result.memory_info.ullAvailVirtual));

    Local<Array> processes = Array::New(isolate, baton->result.processes.size());
    for (int i = 0; i < baton->result.processes.size(); i++) {
        Local<Object> processInfo = Object::New(isolate);
        processInfo->Set(String::NewFromUtf8(isolate, "process_id"), Number::New(isolate, baton->result.processes[i].process_id));
        processInfo->Set(String::NewFromUtf8(isolate, "path"), String::NewFromUtf8(isolate, baton->result.processes[i].path.c_str()));

        processes->Set(i, processInfo);
    }
    returnValue->Set(String::NewFromUtf8(isolate, "running_processes"), processes);

    const unsigned int argc = 1;
    Handle<Value> argv[argc] = { escapableHandleScope.Escape(returnValue) };
    callbackFunction->Call(isolate->GetCurrentContext()->Global(), argc, argv);

    baton->callbackFunction.Reset();
    delete baton;
}

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM