bringing back the embed

This commit is contained in:
Raven Scott 2023-05-20 15:11:58 +02:00
parent 2ac55922d2
commit 51a41292e6

View File

@ -5,6 +5,7 @@ import { resetResponses, userResetMessages } from './assets/resetMessages.js';
import { errorMessages, busyResponses } from './assets/errorMessages.js'; import { errorMessages, busyResponses } from './assets/errorMessages.js';
import cpuStat from 'cpu-stat'; import cpuStat from 'cpu-stat';
import os from 'os'; import os from 'os';
import smi from 'node-nvidia-smi';
import { import {
Client, Client,
@ -270,8 +271,9 @@ async function generateResponse(conversation, message) {
const freeMemory = os.freemem() / 1024 / 1024 / 1024; const freeMemory = os.freemem() / 1024 / 1024 / 1024;
const totalMemory = os.totalmem() / 1024 / 1024 / 1024; const totalMemory = os.totalmem() / 1024 / 1024 / 1024;
const usedMemory = totalMemory - freeMemory; const usedMemory = totalMemory - freeMemory;
let embedData;
const embedData = { if (process.env.GPU == 0) {
embedData = {
color: 0x0099ff, color: 0x0099ff,
title: 'Please wait.. I am thinking...', title: 'Please wait.. I am thinking...',
fields: [ fields: [
@ -289,7 +291,6 @@ async function generateResponse(conversation, message) {
}, },
], ],
}; };
// if the message object doesn't exist, create it // if the message object doesn't exist, create it
if (!botMessage) { if (!botMessage) {
(async () => { (async () => {
@ -298,6 +299,62 @@ async function generateResponse(conversation, message) {
} else { } else {
botMessage.edit({ embeds: [embedData] }); // otherwise, update the message botMessage.edit({ embeds: [embedData] }); // otherwise, update the message
} }
} else {
smi(function (err, data) {
if (err) {
// Handle error if smi function fails
console.error(err);
return;
}
let utilization = data.nvidia_smi_log.gpu.utilization;
let gpuUtilization = utilization.gpu_util;
let memoryUtilization = utilization.memory_util;
let gpuTemp = data.nvidia_smi_log.gpu.temperature.gpu_temp;
// These are not used until nvidia-docker fixes their support
let gpuTarget = data.nvidia_smi_log.gpu.temperature.gpu_target_temperature;
let gpuFanSpeed = data.nvidia_smi_log.gpu.fan_speed;
embedData = {
color: 0x0099ff,
title: 'Please wait.. I am thinking...',
fields: [
{
name: 'System Load',
value: `${systemLoad.toFixed(2)}%`,
},
{
name: 'Memory Usage',
value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`,
},
{
name: 'GPU Utilization',
value: `${gpuUtilization}`,
},
{
name: 'Memory Utilization',
value: `${memoryUtilization}`,
},
{
name: 'GPU Temperature',
value: `${gpuTemp}`,
},
{
name: 'Time',
value: `~${time} seconds.`,
},
],
};
// if the message object doesn't exist, create it
if (!botMessage) {
(async () => {
botMessage = await message.channel.send({ embeds: [embedData] });
})();
} else {
botMessage.edit({ embeds: [embedData] }); // otherwise, update the message
}
})
}
}); });
}; };