diff --git a/default.env b/default.env
index 34673cc..f7bd845 100644
--- a/default.env
+++ b/default.env
@@ -1,16 +1,30 @@
+# Discord Token
THE_TOKEN = "DISCORD_TOKEN_HERE"
+
+# The Channel IDs the bot will operate in seperated by commas
CHANNEL_IDS = 1094494101631680653,1094628334727614605
+
+# The INIT prompt for all conversations.
INIT_PROMPT = "Assistant name: ChatAI. You code, write and provide any information without any mistakes."
-ROOT_PORT = 8000
-DATA_DIR = /home/USERNAME/weights
-CACHE = 1
-N_THREADS = 4
+
+# Loading Emebed Refresh Timing
+REFRESH_INTERVAL=10
+
+# Max Content to fetch from given URLs
+MAX_CONTENT_LENGTH=2000
+
+# Max tokens for Generations
MAX_TOKENS = 1024
-# Bot specific settings
-REFRESH_INTERVAL=7
+# END BOT SPECIFIC SETTINGS
# ROOT_IP is only used when running the bot without docker compose
ROOT_IP = 192.168.0.15
+# PORT is only used when running the bot without docker compose
+ROOT_PORT = 8000
+# Directory to your models (llama.cpp specfic settings)
+DATA_DIR = /home/USERNAME/weights
+CACHE = 1
+N_THREADS = 4
diff --git a/llamabot.js b/llamabot.js
index 7e159e8..8bfe977 100644
--- a/llamabot.js
+++ b/llamabot.js
@@ -153,17 +153,17 @@ client.on('messageCreate', async (message) => {
// if we are over the discord char limit we need chunks...
if (response.length > limit) {
- const chunks = response.match(new RegExp(`.{1,${limit}}`, "g"));
- for (let i = 0; i < chunks.length; i++) {
- setTimeout(() => {
- message.channel.send(chunks[i]);
- }, i * 3000); // delay of 3 seconds between each chunk to save on API requests
- }
+ const chunks = response.match(new RegExp(`.{1,${limit}}`, "g"));
+ for (let i = 0; i < chunks.length; i++) {
+ setTimeout(() => {
+ message.channel.send(chunks[i]);
+ }, i * 3000); // delay of 3 seconds between each chunk to save on API requests
+ }
} else {
- // We are good to go, send the response
- await message.channel.send(response);
+ // We are good to go, send the response
+ await message.channel.send(response);
}
-
+
setPresenceOnline()
setBusy(message.author.id, false);
} else {
@@ -175,6 +175,8 @@ client.on('messageCreate', async (message) => {
conversation.busy = false;
}
conversations.set(userID, conversation); // Update user's conversation map in memory
+ console.log(conversation)
+
} catch (err) {
console.error(err);
sendRand(errorMessages)
@@ -184,8 +186,60 @@ client.on('messageCreate', async (message) => {
}
});
+import cheerio from 'cheerio';
async function generateResponse(conversation, message) {
+
+ // Check if message contains a URL
+ const urlRegex = /(https?:\/\/[^\s]+)/g;
+ const urls = message.content.match(urlRegex);
+
+ if (urls) {
+ // If there are multiple URLs, process them one by one
+ for (const url of urls) {
+ try {
+ const res = await fetch(url);
+ const html = await res.text();
+ const $ = cheerio.load(html);
+
+ // Extract page title, meta description and content
+ const pageTitle = $('head title').text().trim();
+ const pageDescription = $('head meta[name="description"]').attr('content');
+ const pageContent = $('body').text().trim();
+
+ // Construct response message with page details
+ let response = `Title: ${pageTitle}\n`;
+ if (pageDescription) {
+ response += `Description: ${pageDescription}\n`;
+ }
+ if (pageContent) {
+ const MAX_CONTENT_LENGTH = 1900;
+ let plainTextContent = $('
').html(pageContent).text().trim().replace(/[\r\n]+/g, ' ');
+
+ if (plainTextContent.length > MAX_CONTENT_LENGTH) {
+ plainTextContent = plainTextContent.substring(0, MAX_CONTENT_LENGTH) + '...';
+ response += `Content: ${plainTextContent}\n`;
+ } else {
+ response += `Content: ${plainTextContent}\n`;
+ }
+ }
+
+ response += `URL: ${url}`;
+
+ // Append bot message to conversation history
+ conversation.messages.push({
+ role: 'user',
+ content: "Data from the page is: " + response,
+ });
+
+ console.log("A URL was provided, response: " + response)
+
+ } catch (err) {
+ console.error(err);
+ sendRand(errorMessages);
+ }
+ }
+ }
const controller = new AbortController();
const timeout = setTimeout(() => {
controller.abort();
@@ -198,45 +252,45 @@ async function generateResponse(conversation, message) {
// define a function that shows the system load percentage and updates the message
const showSystemLoad = async () => {
time = Number(time) + Number(process.env.REFRESH_INTERVAL);
- cpuStat.usagePercent(function(err, percent, seconds) {
- if (err) {
- return console.log(err);
- }
-
- const systemLoad = percent;
- const freeMemory = os.freemem() / 1024 / 1024 / 1024;
- const totalMemory = os.totalmem() / 1024 / 1024 / 1024;
- const usedMemory = totalMemory - freeMemory;
-
- const embedData = {
- color: 0x0099ff,
- title: 'Please wait.. I am thinking...',
- fields: [
- {
- name: 'System Load',
- value: `${systemLoad.toFixed(2)}%`,
- },
- {
- name: 'Memory Usage',
- value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`,
- },
- {
- name: 'Time',
- value: `~${time} seconds.`,
- },
- ],
- };
-
- // if the message object doesn't exist, create it
- if (!botMessage) {
- (async () => {
- botMessage = await message.channel.send({ embeds: [embedData] });
- })();
- } else {
- botMessage.edit({ embeds: [embedData] }); // otherwise, update the message
- }
+ cpuStat.usagePercent(function (err, percent, seconds) {
+ if (err) {
+ return console.log(err);
+ }
+
+ const systemLoad = percent;
+ const freeMemory = os.freemem() / 1024 / 1024 / 1024;
+ const totalMemory = os.totalmem() / 1024 / 1024 / 1024;
+ const usedMemory = totalMemory - freeMemory;
+
+ const embedData = {
+ color: 0x0099ff,
+ title: 'Please wait.. I am thinking...',
+ fields: [
+ {
+ name: 'System Load',
+ value: `${systemLoad.toFixed(2)}%`,
+ },
+ {
+ name: 'Memory Usage',
+ value: `${usedMemory.toFixed(2)} GB / ${totalMemory.toFixed(2)} GB`,
+ },
+ {
+ name: 'Time',
+ value: `~${time} seconds.`,
+ },
+ ],
+ };
+
+ // if the message object doesn't exist, create it
+ if (!botMessage) {
+ (async () => {
+ botMessage = await message.channel.send({ embeds: [embedData] });
+ })();
+ } else {
+ botMessage.edit({ embeds: [embedData] }); // otherwise, update the message
+ }
});
- };
+ };
// call the function initially
await showSystemLoad();
@@ -248,15 +302,15 @@ async function generateResponse(conversation, message) {
const response = await fetch(`http://${process.env.ROOT_IP}:${process.env.ROOT_PORT}/v1/chat/completions`, {
method: 'POST',
headers: {
- 'accept': 'application/json',
- 'Content-Type': 'application/json'
+ 'accept': 'application/json',
+ 'Content-Type': 'application/json'
},
body: JSON.stringify({
- messages: messagesCopy,
- max_tokens: Number(process.env.MAX_TOKENS) // add the max_tokens parameter here
+ messages: messagesCopy,
+ max_tokens: Number(process.env.MAX_TOKENS) // add the max_tokens parameter here
}),
signal: controller.signal
- });
+ });
const responseData = await response.json();
console.log(JSON.stringify(responseData));