first commit

This commit is contained in:
2024-08-03 02:19:51 -04:00
commit 907f20c34c
11 changed files with 934 additions and 0 deletions

3
bot/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
.env
package-lock.json
node_modules

75
bot/assets/messages.js Normal file
View File

@ -0,0 +1,75 @@
// messages.js
const userResetMessages = [
"All good, we're starting fresh! How can I assist you?",
"Got it, let's start over! How can I help you today?",
"Alright, starting anew! What can I help you with?",
"No problem, we're starting fresh! What do you need help with?",
"Understood, let's start from scratch! What do you want to talk about?",
"Sure thing, we'll start over! What can I help you with today?",
"Gotcha, we'll start fresh! What's on your mind?",
"No worries, we'll start from the beginning! What do you need help with?",
"Starting over, got it! What can I assist you with?",
"Copy that, we'll start anew! What do you want to chat about?",
"Conversation reset, check! What do you need help with?",
"All set, we'll start fresh! What can I help you with today?",
"Starting over, no problem! What can I help you with?",
"Understood, we're starting from scratch! What can I assist you with?",
"Got it, we're starting over! What do you need help with?",
"Copy that, starting anew! What do you want to talk about?",
"No worries, we'll start fresh! What's on your mind?",
"All good, we'll start from the beginning! What do you need help with?",
"Sure thing, we'll start over! What can I help you with today?",
"Conversation reset, confirmed! What do you need help with?"
];
const errorMessages = [
"Uh oh, looks like something went awry! Try !reset to start fresh.",
"Oops, we hit a bump in the road! Give !reset a try to start anew.",
"We've encountered an error, but !reset can help us out! Give it a go.",
"Looks like something went wrong, but don't worry! !reset will give us a clean slate.",
"Oh no, we've hit a snag! Try !reset to see if that solves the issue.",
"Don't panic, but something went wrong. !reset can help us get back on track.",
"Sorry about that! Give !reset a try and we'll start over.",
"An error occurred, but we can fix it! Try !reset to start a fresh session.",
"Whoops! Something went wrong, but !reset can help us get back on track.",
"Looks like we hit a bump in the road. Give !reset a try to get us back on track.",
"We've encountered an issue, but don't worry! Try !reset to start anew.",
"Oh dear, something's not quite right. Give !reset a go to start over.",
"Oops, something went wrong. But don't worry, !reset will get us back on track!",
"Looks like we've encountered an error. Give !reset a try to start a new session.",
"Sorry about that! Give !reset a go and we'll start over.",
"An error occurred, but we can fix it! Try !reset to start over.",
"Uh oh, something went wrong. But don't worry, !reset can help us out.",
"Looks like we hit a roadblock, but !reset can get us back on track!",
"We've encountered a problem, but don't fret! Give !reset a try to start anew.",
"Oopsie daisy! Give !reset a try and we'll start over."
];
const busyResponses = [
"Sorry about that! Looks like I'm tied up at the moment. Please try again later.",
"Oops, I'm currently busy with something else. Please try again later.",
"Looks like I'm already working on something. Can you try again later?",
"I'm currently occupied with another process. Can you try again later?",
"I'm currently unavailable. Can you try again in a bit?",
"Looks like I'm currently busy. Can you check back later?",
"I'm currently engaged with another process. Please try again later.",
"I'm afraid I'm currently occupied with another request. Can you try again later?",
"Sorry, I'm currently busy with another task. Can you try again later?",
"I'm currently tied up with another request. Please try again later.",
"Looks like I'm currently busy with something else. Can you try again later?",
"I'm currently engaged with another task. Please try again later.",
"Sorry, I'm currently occupied with another process. Can you try again later?",
"I'm currently occupied with another task. Can you try again later?",
"I'm currently in the middle of another process. Can you try again later?",
"Sorry, I'm currently engaged with another task. Please try again later.",
"I'm currently in the middle of something else. Please try again later.",
"I'm afraid I'm busy with something else at the moment. Can you try again later?",
"Looks like I'm currently engaged with something else. Please try again later.",
"I'm currently unavailable. Can you try again later?"
];
module.exports = {
userResetMessages,
errorMessages,
busyResponses
};

63
bot/default.env Normal file
View File

@ -0,0 +1,63 @@
# Discord Token
THE_TOKEN = ""
# The Channel IDs the bot will operate in seperated by commas
CHANNEL_IDS =
INIT_PROMPT="You are an assitant"
# Key for AbuseDB
ABUSE_KEY=
# When a message is too large for discord we chunk the response into seperate messages.
# To ensure we do not rate limit the bot we send these at a delay interval.
# DEFAULT: 3 a good setting is between 3 and 7 seconds.
OVERFLOW_DELAY=3
# Max Content to fetch from given URLs
MAX_CONTENT_LENGTH=8000
# Max tokens for Generations
MAX_TOKENS = 4000
# ROOT_IP is only used when running the bot without docker compose
ROOT_IP = 192.168.0.8
# PORT is only used when running the bot without docker compose
ROOT_PORT = 3000
# Directory to your models (llama.cpp specfic settings)
DATA_DIR = /models
# Enable Expirmental Message Caches (Limited to single session)
# Cache will use ~1.4 GB or MORE of RAM. ONLY ENABLE IF YOUR SYSTEM CAN HANDLE THIS.
CACHE = 1
CACHE_TYPE = "ram"
# Set number of threads to use, currently, a standard thread will utilize 1 whole core
# I usually will set this between all cores I physcally have OR 2 cores less to allow for other processes.
N_THREADS = 8
# Always use MMAP unless you know what you are doing
USE_MMAP=1
# Only use MLOCK if you know what it does!
USE_MLOCK=0
# The higher the number the more hard core.
REPEAT_PENALTY=1
# GPU SPECIFIC SETTINGS BELOW
GPU=1
N_GPU_LAYERS=35
PYTHONUNBUFFERED=1
# Custom Stuff internal to my use cases.
PATH_KEY=""
API_KEY=""
API_PATH=""

177
bot/discord-bot.js Normal file
View File

@ -0,0 +1,177 @@
const {Client, GatewayIntentBits, EmbedBuilder} = require('discord.js');
const axios = require('axios');
const he = require('he');
const fetch = (...args) => import('node-fetch').then(({ default: fetch }) => fetch(...args));
const { userResetMessages } = require('./assets/messages.js');
const cheerio = require('cheerio');
require('dotenv').config();
const channelIDs = process.env.CHANNEL_IDS.split(',');
const client = new Client({
intents: [GatewayIntentBits.Guilds, GatewayIntentBits.GuildMessages, GatewayIntentBits.MessageContent]
});
const MAX_CONTENT_LENGTH = process.env.MAX_CONTENT_LENGTH || 8000;
client.on('ready', () => {
console.log(`Logged in as ${client.user.tag}!`);
});
client.on('messageCreate', async message => {
// Function to send a random message from any array
async function sendRand(array) {
const arrayChoice = array[Math.floor(Math.random() * array.length)];
await message.channel.send(arrayChoice); // give a notification of reset using a human-like response.
}
if (message.author.bot) return;
// Only respond in the specified channels
if (!channelIDs.includes(message.channel.id)) {
return;
}
const content = message.content.trim();
let additionalContent = '';
if (content === '!r' || content === '!reset') {
// Handle conversation reset
return await sendRand(userResetMessages)
}
if (content === '!restartCore') {
// Handle core restart
return await restartCore(message);
}
await handleUserMessage(message, content, additionalContent);
});
async function handleUserMessage(message, content, additionalContent) {
const encodedMessage = he.encode(content + additionalContent);
// Start typing indicator
const typingInterval = setInterval(() => {
message.channel.sendTyping();
}, 9000);
message.channel.sendTyping(); // Initial typing indicator
try {
const response = await axios.post(`http://${process.env.ROOT_IP}:${process.env.ROOT_PORT}/api/v1/chat`, {
message: encodedMessage,
max_tokens: Number(process.env.MAX_TOKENS),
repeat_penalty: Number(process.env.REPEAT_PENALTY)
}, {
headers: {
'Content-Type': 'application/json',
'x-forwarded-for-id': message.author.id,
'x-forwarded-for-name': message.author.username,
'x-forwarded-for-guild': message.guild.name
}
});
clearInterval(typingInterval); // Stop typing indicator
const data = response.data;
await sendLongMessage(message, data.content);
} catch (error) {
clearInterval(typingInterval); // Stop typing indicator
if (error.response && error.response.status === 429) {
try {
await message.author.send('I am currently busy. Please try again later.');
} catch (dmError) {
console.error('Failed to send DM:', dmError);
message.reply('I am currently busy. Please try again later.');
}
} else {
message.reply('Error: ' + error.message);
}
}
}
async function resetConversation(message) {
try {
const response = await axios.post(
`${process.env.API_PATH}/reset-conversation`, {}, {
headers: {
'x-forwarded-for-id': message.author.id,
}
}
);
console.log(response.status)
if (response.status === 200) {
return await sendRand(userResetMessages);
} else {
message.reply('Error clearing message history.');
}
} catch (error) {
message.reply('Error clearing message history.');
}
}
async function restartCore(message) {
try {
const response = await axios.post(`${process.env.API_PATH}/restart-core`);
if (response.status === 200) {
message.reply('The core server was restarted.');
} else {
message.reply('Error restarting the core.');
}
} catch (error) {
message.reply('Error restarting the core.');
}
}
async function sendLongMessage(message, responseText) {
const limit = 8096;
if (responseText.length > limit) {
const lines = responseText.split('\n');
const chunks = [];
let currentChunk = '';
for (const line of lines) {
if (currentChunk.length + line.length > limit) {
chunks.push(currentChunk);
currentChunk = '';
}
currentChunk += line + '\n';
}
if (currentChunk.trim() !== '') {
chunks.push(currentChunk.trim());
}
if (chunks.length >= 80) return await message.channel.send("Response chunks too large. Try again");
for (let i = 0; i < chunks.length; i++) {
const chunk = chunks[i];
const embed = new EmbedBuilder()
.setDescription(chunk) // Wraps the chunk in a code block
.setColor("#3498DB")
.setTimestamp();
setTimeout(() => {
message.channel.send({
embeds: [embed]
});
}, i * (process.env.OVERFLOW_DELAY || 3) * 1000);
}
} else {
const embed = new EmbedBuilder()
.setDescription(responseText) // Wraps the response in a code block
.setColor("#3498DB")
.setTimestamp();
message.channel.send({
embeds: [embed]
});
}
}
client.login(process.env.THE_TOKEN);

11
bot/package.json Normal file
View File

@ -0,0 +1,11 @@
{
"name": "llama-bot",
"version": "1.0.0",
"main": "discord-bot.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "",
"license": "ISC",
"description": ""
}