From 51a41292e68014d13702ef2a3528d56d9d9f0010 Mon Sep 17 00:00:00 2001 From: Raven Scott Date: Sat, 20 May 2023 15:11:58 +0200 Subject: [PATCH] bringing back the embed --- llamabot.js | 115 +++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 86 insertions(+), 29 deletions(-) diff --git a/llamabot.js b/llamabot.js index 893d7cb..f716887 100644 --- a/llamabot.js +++ b/llamabot.js @@ -5,6 +5,7 @@ import { resetResponses, userResetMessages } from './assets/resetMessages.js'; import { errorMessages, busyResponses } from './assets/errorMessages.js'; import cpuStat from 'cpu-stat'; import os from 'os'; +import smi from 'node-nvidia-smi'; import { Client, @@ -154,9 +155,9 @@ client.on('messageCreate', async (message) => { // if we are over the discord char limit we need chunks... if (response.length > limit) { - + const chunks = response.match(new RegExp(`.{1,${limit}}`, "g")); - if (chunks.length >= 15) return await message.channel.send("Response chunks too large. Try again"); + if (chunks.length >= 15) return await message.channel.send("Response chunks too large. Try again"); for (let i = 0; i < chunks.length; i++) { @@ -240,7 +241,7 @@ async function generateResponse(conversation, message) { // Append a new line and the new content to the existing content of the last message conversation.messages[lastMessageIndex].content += "\n" + response; - + console.log("A URL was provided, response: " + response) } catch (err) { @@ -270,33 +271,89 @@ async function generateResponse(conversation, message) { const freeMemory = os.freemem() / 1024 / 1024 / 1024; const totalMemory = os.totalmem() / 1024 / 1024 / 1024; const usedMemory = totalMemory - freeMemory; - - const embedData = { - color: 0x0099ff, - title: 'Please wait.. I am thinking...', - fields: [ - { - name: 'System Load', - value: `${systemLoad.toFixed(2)}%`, - }, - { - name: 'Memory Usage', - value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`, - }, - { - name: 'Time', - value: `~${time} seconds.`, - }, - ], - }; - - // if the message object doesn't exist, create it - if (!botMessage) { - (async () => { - botMessage = await message.channel.send({ embeds: [embedData] }); - })(); + let embedData; + if (process.env.GPU == 0) { + embedData = { + color: 0x0099ff, + title: 'Please wait.. I am thinking...', + fields: [ + { + name: 'System Load', + value: `${systemLoad.toFixed(2)}%`, + }, + { + name: 'Memory Usage', + value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`, + }, + { + name: 'Time', + value: `~${time} seconds.`, + }, + ], + }; + // if the message object doesn't exist, create it + if (!botMessage) { + (async () => { + botMessage = await message.channel.send({ embeds: [embedData] }); + })(); + } else { + botMessage.edit({ embeds: [embedData] }); // otherwise, update the message + } } else { - botMessage.edit({ embeds: [embedData] }); // otherwise, update the message + smi(function (err, data) { + if (err) { + // Handle error if smi function fails + console.error(err); + return; + } + + let utilization = data.nvidia_smi_log.gpu.utilization; + let gpuUtilization = utilization.gpu_util; + let memoryUtilization = utilization.memory_util; + let gpuTemp = data.nvidia_smi_log.gpu.temperature.gpu_temp; + + // These are not used until nvidia-docker fixes their support + let gpuTarget = data.nvidia_smi_log.gpu.temperature.gpu_target_temperature; + let gpuFanSpeed = data.nvidia_smi_log.gpu.fan_speed; + embedData = { + color: 0x0099ff, + title: 'Please wait.. I am thinking...', + fields: [ + { + name: 'System Load', + value: `${systemLoad.toFixed(2)}%`, + }, + { + name: 'Memory Usage', + value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`, + }, + { + name: 'GPU Utilization', + value: `${gpuUtilization}`, + }, + { + name: 'Memory Utilization', + value: `${memoryUtilization}`, + }, + { + name: 'GPU Temperature', + value: `${gpuTemp}`, + }, + { + name: 'Time', + value: `~${time} seconds.`, + }, + ], + }; + // if the message object doesn't exist, create it + if (!botMessage) { + (async () => { + botMessage = await message.channel.send({ embeds: [embedData] }); + })(); + } else { + botMessage.edit({ embeds: [embedData] }); // otherwise, update the message + } + }) } }); };