llama-cpp-python-djs-bot/llamabot.js

531 lines
20 KiB
JavaScript
Raw Normal View History

2023-04-12 14:17:18 +00:00
import "dotenv/config.js";
import fetch from 'node-fetch';
import { emptyResponses } from './assets/emptyMessages.js';
import { resetResponses, userResetMessages } from './assets/resetMessages.js';
import { errorMessages, busyResponses } from './assets/errorMessages.js';
2023-04-17 01:17:15 +00:00
import cpuStat from 'cpu-stat';
2023-04-17 01:57:02 +00:00
import os from 'os';
2023-05-20 13:11:58 +00:00
import smi from 'node-nvidia-smi';
import llamaTokenizer from 'llama-tokenizer-js'
2023-04-17 01:17:15 +00:00
2023-04-17 00:30:38 +00:00
import {
Client,
GatewayIntentBits,
ActivityType,
Partials
} from 'discord.js';
2023-04-09 16:07:32 +00:00
const client = new Client({
2023-04-09 23:49:51 +00:00
intents: [
GatewayIntentBits.DirectMessages,
GatewayIntentBits.Guilds,
2023-04-12 14:17:18 +00:00
GatewayIntentBits.GuildModeration,
2023-04-09 23:49:51 +00:00
GatewayIntentBits.GuildMessages,
GatewayIntentBits.MessageContent,
],
partials: [Partials.Channel],
2023-04-09 16:07:32 +00:00
});
// Grab ChannelIDs from the .env file
const channelIDs = process.env.CHANNEL_IDS.split(',');
2023-04-09 16:07:32 +00:00
2023-05-20 22:20:53 +00:00
// Store Conversations in a MAP
2023-04-09 16:07:32 +00:00
const conversations = new Map();
2023-05-22 16:23:17 +00:00
let botMessage; // define a variable to hold the message object
2023-05-20 22:20:53 +00:00
// Set busy function this allows us to set our bot into busy mode
// locking out all other tasks until the current one is complete
function setBusy(userId, isBusy) {
if (conversations.has(userId)) {
conversations.get(userId).busy = isBusy;
} else {
2023-04-10 02:38:38 +00:00
conversations.set(userId, {
busy: isBusy
});
}
}
2023-05-20 22:20:53 +00:00
// General check, if any conversation is busy
// If yes, flag it and let us know
function isAnyConversationBusy() {
for (const conversation of conversations.values()) {
if (conversation.busy) {
setPresenceBusy()
return true;
}
}
return false;
}
2023-05-20 22:20:53 +00:00
// Setting our precence to busy within the bots status
function setPresenceBusy() {
2023-04-09 23:49:51 +00:00
client.user.setPresence({
activities: [{
name: `Processing a Request`,
2023-04-09 23:49:51 +00:00
type: ActivityType.Playing
}],
status: 'dnd',
});
}
2023-05-20 22:20:53 +00:00
// Setting our precence to ready within the bots status
function setPresenceOnline() {
client.user.setPresence({
activities: [{
name: `Ready for Request`,
type: ActivityType.Playing
}],
status: 'online',
});
}
2023-05-20 22:20:53 +00:00
// When we have logged in to discord api
// Set precence to online.
client.once('ready', () => {
console.log('Bot is ready.');
2023-04-10 02:38:38 +00:00
setPresenceOnline()
2023-04-09 16:07:32 +00:00
});
2023-05-20 22:20:53 +00:00
// When a message is sent within discord, lets handle it.
2023-04-09 16:07:32 +00:00
client.on('messageCreate', async (message) => {
2023-05-20 22:20:53 +00:00
// Function to send a random message from any array
async function sendRand(array) {
const arrayChoice = array[Math.floor(Math.random() * array.length)];
await message.channel.send(arrayChoice); // give a notification of reset using a human like response.
}
2023-05-20 22:20:53 +00:00
// Function to send a random Direct Message from any array
async function sendRandDM(array) {
const arrayChoice = array[Math.floor(Math.random() * array.length)];
await message.author.send(arrayChoice); // give a notification of reset using a human like response.
}
2023-04-09 23:49:51 +00:00
// Only respond in the specified channels
if (!channelIDs.includes(message.channel.id)) {
return;
2023-04-09 23:49:51 +00:00
}
2023-05-22 16:23:17 +00:00
2023-05-20 22:20:53 +00:00
// Always ignore bots!
if (message.author.bot) return;
2023-04-09 16:07:32 +00:00
// Check if any conversation is busy
if (isAnyConversationBusy()) {
// Update bot presence to "Busy"
setPresenceBusy()
message.delete();
sendRandDM(busyResponses);
2023-04-09 23:49:51 +00:00
return;
}
2023-05-20 22:20:53 +00:00
// Set user ID and get our conversation.
2023-04-09 23:49:51 +00:00
const userID = message.author.id;
let conversation = conversations.get(userID) || {
messages: [],
busy: false
};
2023-05-22 16:23:17 +00:00
2023-05-20 22:20:53 +00:00
// If we do not have a conversation, lets generate one.
// This requires a chatflow for the API.
// Its better to have a default beginning conversation
// Providing context for the AI Model.
2023-04-09 23:49:51 +00:00
if (conversation.messages.length === 0) {
conversation.messages.push({
role: 'user',
2023-04-16 22:50:04 +00:00
content: ` ${process.env.INIT_PROMPT}`
2023-04-09 23:49:51 +00:00
});
conversation.messages.push({
role: 'user',
2023-04-16 12:47:04 +00:00
content: ` User name: ${message.author.username}.`
2023-04-09 23:49:51 +00:00
});
conversation.messages.push({
role: 'assistant',
2023-04-16 12:47:04 +00:00
content: ` Hello, ${message.author.username}, how may I help you?`
2023-04-09 23:49:51 +00:00
});
}
2023-05-22 16:23:17 +00:00
2023-05-20 22:20:53 +00:00
// If a user needs a reset, we delete their MAP
2023-04-09 23:49:51 +00:00
if (message.content === '!reset' || message.content === '!r') {
conversations.delete(userID); // Delete user's conversation map if they request reset
sendRand(userResetMessages)
2023-04-09 23:49:51 +00:00
return;
}
2023-04-09 16:07:32 +00:00
2023-05-20 22:20:53 +00:00
// Begin processing our conversation, this is our main work flow.
// Append user message to conversation history
2023-04-09 23:49:51 +00:00
conversation.messages.push({
role: 'user',
content: ` ${message.cleanContent}`
2023-04-09 23:49:51 +00:00
});
2023-04-09 16:07:32 +00:00
2023-04-09 23:49:51 +00:00
try {
2023-05-20 22:20:53 +00:00
2023-05-22 16:23:17 +00:00
// Now we have our conversation set up
// Lets set precence to busy
// We also will set our conversations MAP to busy
// Locking out all other tasks
setPresenceBusy()
setBusy(message.author.id, true);
2023-04-09 23:49:51 +00:00
2023-05-20 22:20:53 +00:00
// Lets start generating the response
const response = await generateResponse(conversation, message);
2023-04-09 23:49:51 +00:00
2023-05-20 22:20:53 +00:00
// Append bot message to conversation history when it is ready
2023-04-09 23:49:51 +00:00
conversation.messages.push({
role: 'assistant',
content: response
});
if (response && response.trim()) {
// Send response to user if it's not empty
const limit = 1980;
// if we are over the discord char limit we need chunks...
if (response.length > limit) {
2023-05-08 13:45:05 +00:00
2023-05-20 22:20:53 +00:00
// We are going to check all of the message chunks if our response is too large for discord.
// We can extend our message size using chunks, the issue?
// Users can abuse this feature, we lock this to 15 to avoid API Abuse.
const chunks = response.match(new RegExp(`.{1,${limit}}`, "g"));
2023-08-15 05:41:39 +00:00
if (chunks.length >= 40) return await message.channel.send("Response chunks too large. Try again");
2023-05-08 13:45:05 +00:00
2023-05-20 22:20:53 +00:00
// If we do now have too many chunks, lets send each one using our overflow delay
for (let i = 0; i < chunks.length; i++) {
setTimeout(() => {
message.channel.send(chunks[i]);
}, i * (process.env.OVERFLOW_DELAY || 3) * 1000); // delay of 3 seconds between each chunk to save on API requests
}
2023-05-20 22:20:53 +00:00
} else {
2023-05-20 22:20:53 +00:00
// We are good to go message is not too large for discord, send the response
await message.channel.send(response.replace("@", ""));
}
2023-05-20 22:20:53 +00:00
// We have completed our task, lets go online
setPresenceOnline()
2023-05-20 22:20:53 +00:00
// set our conversation MAP to not busy
setBusy(message.author.id, false);
2023-04-09 23:49:51 +00:00
} else {
// Handle empty response here
sendRand(emptyResponses)
2023-04-09 23:49:51 +00:00
conversations.delete(userID); // Delete user's conversation map if they request reset
2023-04-10 03:18:15 +00:00
sendRand(resetResponses)
setPresenceOnline()
2023-04-09 23:49:51 +00:00
conversation.busy = false;
}
2023-05-20 22:20:53 +00:00
2023-04-09 23:49:51 +00:00
conversations.set(userID, conversation); // Update user's conversation map in memory
2023-05-22 16:23:17 +00:00
2023-05-20 22:20:53 +00:00
// Print the current conversation as it stands
console.log(conversation)
2023-04-09 23:49:51 +00:00
} catch (err) {
2023-05-20 22:20:53 +00:00
// If we have any errors lets send a response
2023-04-09 23:49:51 +00:00
console.error(err);
2023-05-05 15:48:32 +00:00
return sendRand(errorMessages)
} finally {
2023-05-20 22:20:53 +00:00
// We are done! Lets finish up going online
setPresenceOnline()
setBusy(message.author.id, false);
}
2023-04-09 16:07:32 +00:00
});
2023-05-20 22:20:53 +00:00
// Import cheerio for scraping
import cheerio from 'cheerio';
async function generateResponse(conversation, message) {
2023-05-20 22:20:53 +00:00
// Begin web scraper if a https:// OR http:// URL is detected
// Check if message contains a URL
const urlRegex = /(https?:\/\/[^\s]+)/g;
2023-05-20 22:20:53 +00:00
// Match our REGEX
const urls = message.content.match(urlRegex);
if (urls) {
// If there are multiple URLs, process them one by one
for (const url of urls) {
try {
const res = await fetch(url);
const html = await res.text();
const $ = cheerio.load(html);
// Extract page title, meta description and content
const pageTitle = $('head title').text().trim();
const pageDescription = $('head meta[name="description"]').attr('content');
const pageContent = $('body').text().trim();
// Construct response message with page details
let response = `Title: ${pageTitle}\n`;
if (pageDescription) {
response += `Description: ${pageDescription}\n`;
}
if (pageContent) {
2023-05-20 22:20:53 +00:00
// Lets check for content and grab only the amount as configured.
2023-05-05 16:12:10 +00:00
const MAX_CONTENT_LENGTH = process.env.MAX_CONTENT_LENGTH;
2023-05-05 17:08:13 +00:00
let plainTextContent = $('<div>').html(pageContent).text().trim().replace(/[\r\n\t]+/g, ' ');
2023-05-20 22:20:53 +00:00
// Clean up code remove it from processing
2023-05-05 16:31:24 +00:00
const codePattern = /\/\/|\/\*|\*\/|\{|\}|\[|\]|\bfunction\b|\bclass\b|\b0x[0-9A-Fa-f]+\b|\b0b[01]+\b/;
const isCode = codePattern.test(plainTextContent);
2023-05-05 16:31:24 +00:00
if (isCode) {
plainTextContent = plainTextContent.replace(codePattern, '');
}
2023-05-20 22:20:53 +00:00
// Remove anything enclosed in brackets JUNK DATA
2023-05-05 16:31:24 +00:00
plainTextContent = plainTextContent.replace(/ *\([^)]*\) */g, '');
if (plainTextContent.length > MAX_CONTENT_LENGTH) {
plainTextContent = plainTextContent.substring(0, MAX_CONTENT_LENGTH) + '...';
}
2023-05-05 16:32:46 +00:00
response += `Content: ${plainTextContent.trim()}`;
}
response += `URL: ${url}`;
// Get the index of the last message in the array
const lastMessageIndex = conversation.messages.length - 1;
// Append a new line and the new content to the existing content of the last message
conversation.messages[lastMessageIndex].content += "\n" + response;
2023-05-20 13:11:58 +00:00
console.log("A URL was provided, response: " + response)
} catch (err) {
console.error(err);
2023-05-05 16:12:10 +00:00
return sendRand(errorMessages);
}
}
}
2023-05-20 22:20:53 +00:00
// We need an abort controller to stop our progress message editor
2023-04-09 23:49:51 +00:00
const controller = new AbortController();
2023-05-20 22:20:53 +00:00
// Set our timeout for the controller
2023-04-09 23:49:51 +00:00
const timeout = setTimeout(() => {
controller.abort();
}, 900000);
2023-05-20 22:20:53 +00:00
// Copy our messages from MAP
const messagesCopy = [...conversation.messages]; // create a copy of the messages array
2023-04-17 01:56:09 +00:00
let time = 0
// define a function that shows the system load percentage and updates the message
const showSystemLoad = async () => {
2023-05-20 22:20:53 +00:00
// Configure our inital time
2023-05-02 12:38:57 +00:00
time = Number(time) + Number(process.env.REFRESH_INTERVAL);
2023-05-20 22:20:53 +00:00
// Get system stats
cpuStat.usagePercent(function (err, percent, seconds) {
if (err) {
return console.log(err);
}
2023-05-20 22:20:53 +00:00
// Setting out system stat vars
const systemLoad = percent;
const freeMemory = os.freemem() / 1024 / 1024 / 1024;
const totalMemory = os.totalmem() / 1024 / 1024 / 1024;
const usedMemory = totalMemory - freeMemory;
2023-05-22 16:23:17 +00:00
2023-05-20 22:20:53 +00:00
// lets build some embed data
2023-05-20 13:11:58 +00:00
let embedData;
2023-05-22 16:23:17 +00:00
2023-05-20 22:20:53 +00:00
// If we have NO GPU config lets send system stats only
2023-05-20 13:11:58 +00:00
if (process.env.GPU == 0) {
embedData = {
color: 0x0099ff,
title: 'Please wait.. I am thinking...',
fields: [
{
name: 'System Load',
value: `${systemLoad.toFixed(2)}%`,
},
{
name: 'Memory Usage',
value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`,
},
{
name: 'Time',
value: `~${time} seconds.`,
},
],
};
// if the message object doesn't exist, create it
if (!botMessage) {
(async () => {
2023-05-23 13:53:53 +00:00
if (time == 0) return
2023-05-20 13:11:58 +00:00
botMessage = await message.channel.send({ embeds: [embedData] });
})();
} else {
2023-05-22 16:23:17 +00:00
(async () => {
if (!isAnyConversationBusy()) {
botMessage.delete()
} else {
await botMessage.edit({ embeds: [embedData] }); // otherwise, update the message
}
})();
2023-05-20 13:11:58 +00:00
}
} else {
2023-05-20 22:20:53 +00:00
// If we do have GPU=1 lets send some card info too!
2023-05-20 13:11:58 +00:00
smi(function (err, data) {
if (err) {
// Handle error if smi function fails
console.error(err);
return;
}
let utilization = data.nvidia_smi_log.gpu.utilization;
let gpuUtilization = utilization.gpu_util;
let memoryUtilization = utilization.memory_util;
let gpuTemp = data.nvidia_smi_log.gpu.temperature.gpu_temp;
// These are not used until nvidia-docker fixes their support
let gpuTarget = data.nvidia_smi_log.gpu.temperature.gpu_target_temperature;
let gpuFanSpeed = data.nvidia_smi_log.gpu.fan_speed;
embedData = {
color: 0x0099ff,
title: 'Please wait.. I am thinking...',
fields: [
{
name: 'System Load',
value: `${systemLoad.toFixed(2)}%`,
},
{
name: 'Memory Usage',
value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`,
},
{
name: 'GPU Utilization',
value: `${gpuUtilization}`,
},
{
name: 'Memory Utilization',
value: `${memoryUtilization}`,
},
{
name: 'GPU Temperature',
value: `${gpuTemp}`,
},
{
name: 'Time',
value: `~${time} seconds.`,
},
],
};
// if the message object doesn't exist, create it
if (!botMessage) {
(async () => {
2023-05-23 13:53:53 +00:00
if (time == 0) return
2023-05-20 13:11:58 +00:00
botMessage = await message.channel.send({ embeds: [embedData] });
})();
} else {
2023-05-23 12:56:21 +00:00
(async () => {
if (!isAnyConversationBusy()) {
botMessage.delete()
} else {
await botMessage.edit({ embeds: [embedData] }); // otherwise, update the message
}
})();
2023-05-20 13:11:58 +00:00
}
})
}
2023-04-28 14:44:07 +00:00
});
};
2023-05-22 16:23:17 +00:00
try {
2023-05-22 16:23:17 +00:00
// call the function initially
await showSystemLoad();
// Grab the REFRESH_INTERVAL from ENV if not exist, lets use 7 (seconds)
const refreshInterval = setInterval(showSystemLoad, (process.env.REFRESH_INTERVAL || 7) * 1000);
2023-04-11 15:38:47 +00:00
2023-08-15 03:00:23 +00:00
function countLlamaTokens(messages) {
let totalTokens = 0;
2023-08-15 05:41:39 +00:00
2023-08-15 03:00:23 +00:00
for (const message of messages) {
2023-08-15 03:44:06 +00:00
if (message.role === 'user' || message.role === 'assistant') {
2023-08-15 03:00:23 +00:00
const encodedTokens = llamaTokenizer.encode(message.content);
totalTokens += encodedTokens.length;
}
}
2023-08-15 05:41:39 +00:00
2023-08-15 03:00:23 +00:00
return totalTokens;
}
2023-08-15 05:41:39 +00:00
2023-08-15 03:00:23 +00:00
let totalTokens = countLlamaTokens(messagesCopy);
console.log(`Total Llama tokens: ${totalTokens}`);
let tokenLength = totalTokens
// Remove older conversations if necessary
const maxLength = 1800;
2023-08-22 01:15:10 +00:00
const tolerance = 25; // allow for some flexibility in the token length
2023-08-15 05:41:39 +00:00
if (tokenLength > maxLength + tolerance) {
const diff = tokenLength - (maxLength + tolerance);
let removedTokens = 0;
// Iterate over the messages in reverse order
for (let i = messagesCopy.length - 1; i >= 0; i--) {
const message = messagesCopy[i];
2023-08-22 01:15:10 +00:00
const messageTokens = countLlamaTokens([message]);
2023-08-15 05:41:39 +00:00
// Check if the current message plus the tokens in the message is less than or equal to the diff
2023-08-22 01:15:10 +00:00
if (removedTokens + messageTokens <= diff) {
messagesCopy.splice(i, 1);
2023-08-22 01:15:10 +00:00
removedTokens += messageTokens;
2023-08-15 05:41:39 +00:00
console.log(removedTokens + " removed \nAfter Resize: " + countLlamaTokens(messagesCopy));
} else {
2023-08-15 05:41:39 +00:00
// Remove more than one message if necessary to bring the total length below the maximum allowed length
2023-08-22 01:15:10 +00:00
const messagesToRemove = Math.floor(diff / messageTokens);
2023-08-15 05:41:39 +00:00
for (let j = 0; j < messagesToRemove; j++) {
messagesCopy.splice(i, 1);
2023-08-22 01:15:10 +00:00
removedTokens += messageTokens;
2023-08-15 05:41:39 +00:00
}
break;
}
}
}
2023-05-20 22:20:53 +00:00
// Sending request to our API
const response = await fetch(`http://${process.env.ROOT_IP}:${process.env.ROOT_PORT}/v1/chat/completions`, {
2023-04-09 23:49:51 +00:00
method: 'POST',
headers: {
'accept': 'application/json',
'Content-Type': 'application/json'
2023-04-09 23:49:51 +00:00
},
body: JSON.stringify({
messages: messagesCopy,
2023-05-08 20:00:24 +00:00
max_tokens: Number(process.env.MAX_TOKENS), // add the max_tokens parameter here
repeat_penalty: Number(process.env.REPEAT_PENALTY)
2023-04-09 23:49:51 +00:00
}),
signal: controller.signal
});
2023-04-09 23:49:51 +00:00
const responseData = await response.json();
console.log(JSON.stringify(responseData));
2023-04-09 23:49:51 +00:00
const choice = responseData.choices[0];
const responseText = choice.message.content;
// clear the interval, replace the "please wait" message with the response, and update the message
console.log(responseText);
try {
2023-08-15 03:00:23 +00:00
if (time > 2) {
await botMessage.delete();
clearInterval(refreshInterval);
botMessage = null;
console.log("Time limit reached. Message deleted.");
}
} catch (err) {
console.log("Error deleting message: ", err);
}
2023-08-15 03:00:23 +00:00
2023-04-16 14:22:57 +00:00
return responseText;
2023-04-09 23:49:51 +00:00
} catch (err) {
throw err;
2023-04-09 23:49:51 +00:00
} finally {
clearTimeout(timeout);
2023-05-22 15:46:59 +00:00
botMessage = null;
2023-04-17 01:56:09 +00:00
time = 0
2023-04-09 23:49:51 +00:00
}
2023-04-09 16:07:32 +00:00
}
client.login(process.env.THE_TOKEN); // Replace with your bot token