This commit is contained in:
Raven Scott 2023-05-21 00:20:53 +02:00
parent 927b5c834d
commit c75926728c

View File

@ -28,8 +28,11 @@ const client = new Client({
// Grab ChannelIDs from the .env file
const channelIDs = process.env.CHANNEL_IDS.split(',');
// Store Conversations in a MAP
const conversations = new Map();
// Set busy function this allows us to set our bot into busy mode
// locking out all other tasks until the current one is complete
function setBusy(userId, isBusy) {
if (conversations.has(userId)) {
conversations.get(userId).busy = isBusy;
@ -40,6 +43,8 @@ function setBusy(userId, isBusy) {
}
}
// General check, if any conversation is busy
// If yes, flag it and let us know
function isAnyConversationBusy() {
for (const conversation of conversations.values()) {
if (conversation.busy) {
@ -50,6 +55,7 @@ function isAnyConversationBusy() {
return false;
}
// Setting our precence to busy within the bots status
function setPresenceBusy() {
client.user.setPresence({
activities: [{
@ -60,6 +66,8 @@ function setPresenceBusy() {
});
}
// Setting our precence to ready within the bots status
function setPresenceOnline() {
client.user.setPresence({
activities: [{
@ -71,18 +79,23 @@ function setPresenceOnline() {
}
// When we have logged in to discord api
// Set precence to online.
client.once('ready', () => {
console.log('Bot is ready.');
setPresenceOnline()
});
// When a message is sent within discord, lets handle it.
client.on('messageCreate', async (message) => {
// Function to send a random message from any array
async function sendRand(array) {
const arrayChoice = array[Math.floor(Math.random() * array.length)];
await message.channel.send(arrayChoice); // give a notification of reset using a human like response.
}
// Function to send a random Direct Message from any array
async function sendRandDM(array) {
const arrayChoice = array[Math.floor(Math.random() * array.length)];
await message.author.send(arrayChoice); // give a notification of reset using a human like response.
@ -93,7 +106,8 @@ client.on('messageCreate', async (message) => {
return;
}
if (message.author.bot) return; // Ignore messages from bots
// Always ignore bots!
if (message.author.bot) return;
// Check if any conversation is busy
if (isAnyConversationBusy()) {
@ -103,12 +117,18 @@ client.on('messageCreate', async (message) => {
sendRandDM(busyResponses);
return;
}
// Set user ID and get our conversation.
const userID = message.author.id;
let conversation = conversations.get(userID) || {
messages: [],
busy: false
};
// If we do not have a conversation, lets generate one.
// This requires a chatflow for the API.
// Its better to have a default beginning conversation
// Providing context for the AI Model.
if (conversation.messages.length === 0) {
conversation.messages.push({
role: 'user',
@ -124,12 +144,15 @@ client.on('messageCreate', async (message) => {
});
}
// If a user needs a reset, we delete their MAP
if (message.content === '!reset' || message.content === '!r') {
conversations.delete(userID); // Delete user's conversation map if they request reset
sendRand(userResetMessages)
return;
}
// Begin processing our conversation, this is our main work flow.
// Append user message to conversation history
conversation.messages.push({
role: 'user',
@ -137,12 +160,18 @@ client.on('messageCreate', async (message) => {
});
try {
// Now we have our conversation set up
// Lets set precence to busy
// We also will set our conversations MAP to busy
// Locking out all other tasks
setPresenceBusy()
setBusy(message.author.id, true);
// Lets start generating the response
const response = await generateResponse(conversation, message);
// Append bot message to conversation history
// Append bot message to conversation history when it is ready
conversation.messages.push({
role: 'assistant',
content: response
@ -155,22 +184,26 @@ client.on('messageCreate', async (message) => {
// if we are over the discord char limit we need chunks...
if (response.length > limit) {
// We are going to check all of the message chunks if our response is too large for discord.
// We can extend our message size using chunks, the issue?
// Users can abuse this feature, we lock this to 15 to avoid API Abuse.
const chunks = response.match(new RegExp(`.{1,${limit}}`, "g"));
if (chunks.length >= 15) return await message.channel.send("Response chunks too large. Try again");
// If we do now have too many chunks, lets send each one using our overflow delay
for (let i = 0; i < chunks.length; i++) {
setTimeout(() => {
message.channel.send(chunks[i]);
}, i * (process.env.OVERFLOW_DELAY || 3) * 1000); // delay of 3 seconds between each chunk to save on API requests
}
} else {
// We are good to go, send the response
// We are good to go message is not too large for discord, send the response
await message.channel.send(response.replace("@", ""));
}
// We have completed our task, lets go online
setPresenceOnline()
// set our conversation MAP to not busy
setBusy(message.author.id, false);
} else {
// Handle empty response here
@ -180,24 +213,30 @@ client.on('messageCreate', async (message) => {
setPresenceOnline()
conversation.busy = false;
}
conversations.set(userID, conversation); // Update user's conversation map in memory
console.log(conversation)
conversations.set(userID, conversation); // Update user's conversation map in memory
// Print the current conversation as it stands
console.log(conversation)
} catch (err) {
// If we have any errors lets send a response
console.error(err);
return sendRand(errorMessages)
} finally {
// We are done! Lets finish up going online
setPresenceOnline()
setBusy(message.author.id, false);
}
});
// Import cheerio for scraping
import cheerio from 'cheerio';
async function generateResponse(conversation, message) {
// Begin web scraper if a https:// OR http:// URL is detected
// Check if message contains a URL
const urlRegex = /(https?:\/\/[^\s]+)/g;
// Match our REGEX
const urls = message.content.match(urlRegex);
if (urls) {
@ -219,15 +258,17 @@ async function generateResponse(conversation, message) {
response += `Description: ${pageDescription}\n`;
}
if (pageContent) {
// Lets check for content and grab only the amount as configured.
const MAX_CONTENT_LENGTH = process.env.MAX_CONTENT_LENGTH;
let plainTextContent = $('<div>').html(pageContent).text().trim().replace(/[\r\n\t]+/g, ' ');
// Clean up code remove it from processing
const codePattern = /\/\/|\/\*|\*\/|\{|\}|\[|\]|\bfunction\b|\bclass\b|\b0x[0-9A-Fa-f]+\b|\b0b[01]+\b/;
const isCode = codePattern.test(plainTextContent);
if (isCode) {
plainTextContent = plainTextContent.replace(codePattern, '');
}
// Remove anything enclosed in brackets
// Remove anything enclosed in brackets JUNK DATA
plainTextContent = plainTextContent.replace(/ *\([^)]*\) */g, '');
if (plainTextContent.length > MAX_CONTENT_LENGTH) {
plainTextContent = plainTextContent.substring(0, MAX_CONTENT_LENGTH) + '...';
@ -250,28 +291,38 @@ async function generateResponse(conversation, message) {
}
}
}
// We need an abort controller to stop our progress message editor
const controller = new AbortController();
// Set our timeout for the controller
const timeout = setTimeout(() => {
controller.abort();
}, 900000);
// Copy our messages from MAP
const messagesCopy = [...conversation.messages]; // create a copy of the messages array
let botMessage; // define a variable to hold the message object
let time = 0
// define a function that shows the system load percentage and updates the message
const showSystemLoad = async () => {
// Configure our inital time
time = Number(time) + Number(process.env.REFRESH_INTERVAL);
// Get system stats
cpuStat.usagePercent(function (err, percent, seconds) {
if (err) {
return console.log(err);
}
// Setting out system stat vars
const systemLoad = percent;
const freeMemory = os.freemem() / 1024 / 1024 / 1024;
const totalMemory = os.totalmem() / 1024 / 1024 / 1024;
const usedMemory = totalMemory - freeMemory;
// lets build some embed data
let embedData;
// If we have NO GPU config lets send system stats only
if (process.env.GPU == 0) {
embedData = {
color: 0x0099ff,
@ -300,6 +351,7 @@ async function generateResponse(conversation, message) {
botMessage.edit({ embeds: [embedData] }); // otherwise, update the message
}
} else {
// If we do have GPU=1 lets send some card info too!
smi(function (err, data) {
if (err) {
// Handle error if smi function fails
@ -365,6 +417,7 @@ async function generateResponse(conversation, message) {
const refreshInterval = setInterval(showSystemLoad, (process.env.REFRESH_INTERVAL || 7) * 1000);
try {
// Sending request to our API
const response = await fetch(`http://${process.env.ROOT_IP}:${process.env.ROOT_PORT}/v1/chat/completions`, {
method: 'POST',
headers: {