bringing back the embed
This commit is contained in:
parent
2ac55922d2
commit
51a41292e6
115
llamabot.js
115
llamabot.js
@ -5,6 +5,7 @@ import { resetResponses, userResetMessages } from './assets/resetMessages.js';
|
||||
import { errorMessages, busyResponses } from './assets/errorMessages.js';
|
||||
import cpuStat from 'cpu-stat';
|
||||
import os from 'os';
|
||||
import smi from 'node-nvidia-smi';
|
||||
|
||||
import {
|
||||
Client,
|
||||
@ -154,9 +155,9 @@ client.on('messageCreate', async (message) => {
|
||||
// if we are over the discord char limit we need chunks...
|
||||
if (response.length > limit) {
|
||||
|
||||
|
||||
|
||||
const chunks = response.match(new RegExp(`.{1,${limit}}`, "g"));
|
||||
if (chunks.length >= 15) return await message.channel.send("Response chunks too large. Try again");
|
||||
if (chunks.length >= 15) return await message.channel.send("Response chunks too large. Try again");
|
||||
|
||||
|
||||
for (let i = 0; i < chunks.length; i++) {
|
||||
@ -240,7 +241,7 @@ async function generateResponse(conversation, message) {
|
||||
|
||||
// Append a new line and the new content to the existing content of the last message
|
||||
conversation.messages[lastMessageIndex].content += "\n" + response;
|
||||
|
||||
|
||||
console.log("A URL was provided, response: " + response)
|
||||
|
||||
} catch (err) {
|
||||
@ -270,33 +271,89 @@ async function generateResponse(conversation, message) {
|
||||
const freeMemory = os.freemem() / 1024 / 1024 / 1024;
|
||||
const totalMemory = os.totalmem() / 1024 / 1024 / 1024;
|
||||
const usedMemory = totalMemory - freeMemory;
|
||||
|
||||
const embedData = {
|
||||
color: 0x0099ff,
|
||||
title: 'Please wait.. I am thinking...',
|
||||
fields: [
|
||||
{
|
||||
name: 'System Load',
|
||||
value: `${systemLoad.toFixed(2)}%`,
|
||||
},
|
||||
{
|
||||
name: 'Memory Usage',
|
||||
value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`,
|
||||
},
|
||||
{
|
||||
name: 'Time',
|
||||
value: `~${time} seconds.`,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
// if the message object doesn't exist, create it
|
||||
if (!botMessage) {
|
||||
(async () => {
|
||||
botMessage = await message.channel.send({ embeds: [embedData] });
|
||||
})();
|
||||
let embedData;
|
||||
if (process.env.GPU == 0) {
|
||||
embedData = {
|
||||
color: 0x0099ff,
|
||||
title: 'Please wait.. I am thinking...',
|
||||
fields: [
|
||||
{
|
||||
name: 'System Load',
|
||||
value: `${systemLoad.toFixed(2)}%`,
|
||||
},
|
||||
{
|
||||
name: 'Memory Usage',
|
||||
value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`,
|
||||
},
|
||||
{
|
||||
name: 'Time',
|
||||
value: `~${time} seconds.`,
|
||||
},
|
||||
],
|
||||
};
|
||||
// if the message object doesn't exist, create it
|
||||
if (!botMessage) {
|
||||
(async () => {
|
||||
botMessage = await message.channel.send({ embeds: [embedData] });
|
||||
})();
|
||||
} else {
|
||||
botMessage.edit({ embeds: [embedData] }); // otherwise, update the message
|
||||
}
|
||||
} else {
|
||||
botMessage.edit({ embeds: [embedData] }); // otherwise, update the message
|
||||
smi(function (err, data) {
|
||||
if (err) {
|
||||
// Handle error if smi function fails
|
||||
console.error(err);
|
||||
return;
|
||||
}
|
||||
|
||||
let utilization = data.nvidia_smi_log.gpu.utilization;
|
||||
let gpuUtilization = utilization.gpu_util;
|
||||
let memoryUtilization = utilization.memory_util;
|
||||
let gpuTemp = data.nvidia_smi_log.gpu.temperature.gpu_temp;
|
||||
|
||||
// These are not used until nvidia-docker fixes their support
|
||||
let gpuTarget = data.nvidia_smi_log.gpu.temperature.gpu_target_temperature;
|
||||
let gpuFanSpeed = data.nvidia_smi_log.gpu.fan_speed;
|
||||
embedData = {
|
||||
color: 0x0099ff,
|
||||
title: 'Please wait.. I am thinking...',
|
||||
fields: [
|
||||
{
|
||||
name: 'System Load',
|
||||
value: `${systemLoad.toFixed(2)}%`,
|
||||
},
|
||||
{
|
||||
name: 'Memory Usage',
|
||||
value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`,
|
||||
},
|
||||
{
|
||||
name: 'GPU Utilization',
|
||||
value: `${gpuUtilization}`,
|
||||
},
|
||||
{
|
||||
name: 'Memory Utilization',
|
||||
value: `${memoryUtilization}`,
|
||||
},
|
||||
{
|
||||
name: 'GPU Temperature',
|
||||
value: `${gpuTemp}`,
|
||||
},
|
||||
{
|
||||
name: 'Time',
|
||||
value: `~${time} seconds.`,
|
||||
},
|
||||
],
|
||||
};
|
||||
// if the message object doesn't exist, create it
|
||||
if (!botMessage) {
|
||||
(async () => {
|
||||
botMessage = await message.channel.send({ embeds: [embedData] });
|
||||
})();
|
||||
} else {
|
||||
botMessage.edit({ embeds: [embedData] }); // otherwise, update the message
|
||||
}
|
||||
})
|
||||
}
|
||||
});
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user