From 2ac55922d2a30528a7fff349343ea99ae869f01a Mon Sep 17 00:00:00 2001 From: Raven Scott Date: Sat, 20 May 2023 04:08:27 +0200 Subject: [PATCH] revert back to older style of workflow until new logic is written --- llamabot.js | 122 +++++++++------------------------------------------- 1 file changed, 20 insertions(+), 102 deletions(-) diff --git a/llamabot.js b/llamabot.js index a994690..893d7cb 100644 --- a/llamabot.js +++ b/llamabot.js @@ -5,8 +5,6 @@ import { resetResponses, userResetMessages } from './assets/resetMessages.js'; import { errorMessages, busyResponses } from './assets/errorMessages.js'; import cpuStat from 'cpu-stat'; import os from 'os'; -import smi from 'node-nvidia-smi'; - import { Client, @@ -15,8 +13,6 @@ import { Partials } from 'discord.js'; -let botMessage; // define a variable to hold the message object - const client = new Client({ intents: [ GatewayIntentBits.DirectMessages, @@ -145,7 +141,6 @@ client.on('messageCreate', async (message) => { const response = await generateResponse(conversation, message); - // Append bot message to conversation history conversation.messages.push({ role: 'assistant', @@ -159,9 +154,9 @@ client.on('messageCreate', async (message) => { // if we are over the discord char limit we need chunks... if (response.length > limit) { - + const chunks = response.match(new RegExp(`.{1,${limit}}`, "g")); - if (chunks.length >= 15) return await message.channel.send("Response chunks too large. Try again"); + if (chunks.length >= 15) return await message.channel.send("Response chunks too large. Try again"); for (let i = 0; i < chunks.length; i++) { @@ -245,7 +240,7 @@ async function generateResponse(conversation, message) { // Append a new line and the new content to the existing content of the last message conversation.messages[lastMessageIndex].content += "\n" + response; - + console.log("A URL was provided, response: " + response) } catch (err) { @@ -261,6 +256,7 @@ async function generateResponse(conversation, message) { const messagesCopy = [...conversation.messages]; // create a copy of the messages array + let botMessage; // define a variable to hold the message object let time = 0 // define a function that shows the system load percentage and updates the message const showSystemLoad = async () => { @@ -275,39 +271,10 @@ async function generateResponse(conversation, message) { const totalMemory = os.totalmem() / 1024 / 1024 / 1024; const usedMemory = totalMemory - freeMemory; - let filedsData = [ - { - name: 'System Load', - value: `${systemLoad.toFixed(2)}%`, - }, - { - name: 'Memory Usage', - value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`, - }, - { - name: 'Time', - value: `~${time} seconds.`, - }, - ] - - if (process.env.GPU == 1) { - smi(function (err, data) { - if (err) { - // Handle error if smi function fails - console.error(err); - return; - } - - let utilization = data.nvidia_smi_log.gpu.utilization; - let gpuUtilization = utilization.gpu_util; - let memoryUtilization = utilization.memory_util; - let gpuTemp = data.nvidia_smi_log.gpu.temperature.gpu_temp; - - // These are not used until nvidia-docker fixes their support - let gpuTarget = data.nvidia_smi_log.gpu.temperature.gpu_target_temperature; - let gpuFanSpeed = data.nvidia_smi_log.gpu.fan_speed; - - let filedsData = [ + const embedData = { + color: 0x0099ff, + title: 'Please wait.. I am thinking...', + fields: [ { name: 'System Load', value: `${systemLoad.toFixed(2)}%`, @@ -316,68 +283,21 @@ async function generateResponse(conversation, message) { name: 'Memory Usage', value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`, }, - { - name: 'GPU Utilization', - value: `${gpuUtilization}`, - }, - { - name: 'Memory Utilization', - value: `${memoryUtilization}`, - }, - { - name: 'GPU Temperature', - value: `${gpuTemp}`, - }, { name: 'Time', value: `~${time} seconds.`, }, - ]; - - const embedData = { - color: 0x0099ff, - title: 'Please wait.. I am thinking...', - fields: filedsData, - }; - - // if the message object doesn't exist, create it - if (botMessage == null) { - (async () => { - botMessage = await message.channel.send({ embeds: [embedData] }); - })(); - } else { - try { - message.channel.messages.fetch(botMessage.id) - .then(message => console.log(message.content)) //it fetched the message - good - botMessage.edit({ embeds: [embedData] }); // otherwise, update the message - } catch (error) { - return; //the message no longer exists and will be ignored - } - } - }); - } else { - const embedData = { - color: 0x0099ff, - title: 'Please wait.. I am thinking...', - fields: filedsData, - }; - - // if the message object doesn't exist, create it - if (botMessage == null) { + ], + }; + + // if the message object doesn't exist, create it + if (!botMessage) { (async () => { botMessage = await message.channel.send({ embeds: [embedData] }); - })(); - } else { - try { - message.channel.messages.fetch(botMessage.id) - .then(message => console.log(message.content)) //it fetched the message - good - botMessage.edit({ embeds: [embedData] }); // otherwise, update the message - } catch (error) { - return; //the message no longer exists and will be ignored - } - } - } - + })(); + } else { + botMessage.edit({ embeds: [embedData] }); // otherwise, update the message + } }); }; @@ -409,18 +329,16 @@ async function generateResponse(conversation, message) { const responseText = choice.message.content; - message.channel.messages.fetch(botMessage.id).then(message => message.delete()) - // clear the interval, replace the "please wait" message with the response, and update the message - await clearInterval(refreshInterval); + clearInterval(refreshInterval); console.log(responseText); - botMessage = null; + botMessage.delete() + return responseText; } catch (err) { throw err; } finally { - botMessage = null; clearTimeout(timeout); time = 0 }