revert back to older style of workflow until new logic is written

This commit is contained in:
Raven Scott 2023-05-20 04:08:27 +02:00
parent 73636804a5
commit 2ac55922d2
1 changed files with 20 additions and 102 deletions

View File

@ -5,8 +5,6 @@ import { resetResponses, userResetMessages } from './assets/resetMessages.js';
import { errorMessages, busyResponses } from './assets/errorMessages.js'; import { errorMessages, busyResponses } from './assets/errorMessages.js';
import cpuStat from 'cpu-stat'; import cpuStat from 'cpu-stat';
import os from 'os'; import os from 'os';
import smi from 'node-nvidia-smi';
import { import {
Client, Client,
@ -15,8 +13,6 @@ import {
Partials Partials
} from 'discord.js'; } from 'discord.js';
let botMessage; // define a variable to hold the message object
const client = new Client({ const client = new Client({
intents: [ intents: [
GatewayIntentBits.DirectMessages, GatewayIntentBits.DirectMessages,
@ -145,7 +141,6 @@ client.on('messageCreate', async (message) => {
const response = await generateResponse(conversation, message); const response = await generateResponse(conversation, message);
// Append bot message to conversation history // Append bot message to conversation history
conversation.messages.push({ conversation.messages.push({
role: 'assistant', role: 'assistant',
@ -159,9 +154,9 @@ client.on('messageCreate', async (message) => {
// if we are over the discord char limit we need chunks... // if we are over the discord char limit we need chunks...
if (response.length > limit) { if (response.length > limit) {
const chunks = response.match(new RegExp(`.{1,${limit}}`, "g")); const chunks = response.match(new RegExp(`.{1,${limit}}`, "g"));
if (chunks.length >= 15) return await message.channel.send("Response chunks too large. Try again"); if (chunks.length >= 15) return await message.channel.send("Response chunks too large. Try again");
for (let i = 0; i < chunks.length; i++) { for (let i = 0; i < chunks.length; i++) {
@ -245,7 +240,7 @@ async function generateResponse(conversation, message) {
// Append a new line and the new content to the existing content of the last message // Append a new line and the new content to the existing content of the last message
conversation.messages[lastMessageIndex].content += "\n" + response; conversation.messages[lastMessageIndex].content += "\n" + response;
console.log("A URL was provided, response: " + response) console.log("A URL was provided, response: " + response)
} catch (err) { } catch (err) {
@ -261,6 +256,7 @@ async function generateResponse(conversation, message) {
const messagesCopy = [...conversation.messages]; // create a copy of the messages array const messagesCopy = [...conversation.messages]; // create a copy of the messages array
let botMessage; // define a variable to hold the message object
let time = 0 let time = 0
// define a function that shows the system load percentage and updates the message // define a function that shows the system load percentage and updates the message
const showSystemLoad = async () => { const showSystemLoad = async () => {
@ -275,39 +271,10 @@ async function generateResponse(conversation, message) {
const totalMemory = os.totalmem() / 1024 / 1024 / 1024; const totalMemory = os.totalmem() / 1024 / 1024 / 1024;
const usedMemory = totalMemory - freeMemory; const usedMemory = totalMemory - freeMemory;
let filedsData = [ const embedData = {
{ color: 0x0099ff,
name: 'System Load', title: 'Please wait.. I am thinking...',
value: `${systemLoad.toFixed(2)}%`, fields: [
},
{
name: 'Memory Usage',
value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`,
},
{
name: 'Time',
value: `~${time} seconds.`,
},
]
if (process.env.GPU == 1) {
smi(function (err, data) {
if (err) {
// Handle error if smi function fails
console.error(err);
return;
}
let utilization = data.nvidia_smi_log.gpu.utilization;
let gpuUtilization = utilization.gpu_util;
let memoryUtilization = utilization.memory_util;
let gpuTemp = data.nvidia_smi_log.gpu.temperature.gpu_temp;
// These are not used until nvidia-docker fixes their support
let gpuTarget = data.nvidia_smi_log.gpu.temperature.gpu_target_temperature;
let gpuFanSpeed = data.nvidia_smi_log.gpu.fan_speed;
let filedsData = [
{ {
name: 'System Load', name: 'System Load',
value: `${systemLoad.toFixed(2)}%`, value: `${systemLoad.toFixed(2)}%`,
@ -316,68 +283,21 @@ async function generateResponse(conversation, message) {
name: 'Memory Usage', name: 'Memory Usage',
value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`, value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`,
}, },
{
name: 'GPU Utilization',
value: `${gpuUtilization}`,
},
{
name: 'Memory Utilization',
value: `${memoryUtilization}`,
},
{
name: 'GPU Temperature',
value: `${gpuTemp}`,
},
{ {
name: 'Time', name: 'Time',
value: `~${time} seconds.`, value: `~${time} seconds.`,
}, },
]; ],
};
const embedData = {
color: 0x0099ff, // if the message object doesn't exist, create it
title: 'Please wait.. I am thinking...', if (!botMessage) {
fields: filedsData,
};
// if the message object doesn't exist, create it
if (botMessage == null) {
(async () => {
botMessage = await message.channel.send({ embeds: [embedData] });
})();
} else {
try {
message.channel.messages.fetch(botMessage.id)
.then(message => console.log(message.content)) //it fetched the message - good
botMessage.edit({ embeds: [embedData] }); // otherwise, update the message
} catch (error) {
return; //the message no longer exists and will be ignored
}
}
});
} else {
const embedData = {
color: 0x0099ff,
title: 'Please wait.. I am thinking...',
fields: filedsData,
};
// if the message object doesn't exist, create it
if (botMessage == null) {
(async () => { (async () => {
botMessage = await message.channel.send({ embeds: [embedData] }); botMessage = await message.channel.send({ embeds: [embedData] });
})(); })();
} else { } else {
try { botMessage.edit({ embeds: [embedData] }); // otherwise, update the message
message.channel.messages.fetch(botMessage.id) }
.then(message => console.log(message.content)) //it fetched the message - good
botMessage.edit({ embeds: [embedData] }); // otherwise, update the message
} catch (error) {
return; //the message no longer exists and will be ignored
}
}
}
}); });
}; };
@ -409,18 +329,16 @@ async function generateResponse(conversation, message) {
const responseText = choice.message.content; const responseText = choice.message.content;
message.channel.messages.fetch(botMessage.id).then(message => message.delete())
// clear the interval, replace the "please wait" message with the response, and update the message // clear the interval, replace the "please wait" message with the response, and update the message
await clearInterval(refreshInterval); clearInterval(refreshInterval);
console.log(responseText); console.log(responseText);
botMessage = null; botMessage.delete()
return responseText; return responseText;
} catch (err) { } catch (err) {
throw err; throw err;
} finally { } finally {
botMessage = null;
clearTimeout(timeout); clearTimeout(timeout);
time = 0 time = 0
} }