Compare commits

..

1 Commits

Author SHA1 Message Date
GooeyTuxedo
e12f518bd7 Containerize bot and server in one stack. 2023-04-11 19:44:11 -07:00
4 changed files with 14 additions and 31 deletions

View File

@ -32,9 +32,6 @@ Define a generateResponse function that sends a request to the GPT-3 API to gene
Call the generateResponse function within the messageCreate event listener function. Call the generateResponse function within the messageCreate event listener function.
![demo](https://media.discordapp.net/attachments/562897071326101515/1095738407826767922/image.png?width=1038&height=660 "demo")
# Backend REQUIIRED # Backend REQUIIRED
The HTTP Server from https://abetlen.github.io/llama-cpp-python/ is required to use this bot. The HTTP Server from https://abetlen.github.io/llama-cpp-python/ is required to use this bot.
@ -51,7 +48,7 @@ python3 -m llama_cpp.server
Navigate to http://localhost:8000/docs to see the OpenAPI documentation. Navigate to http://localhost:8000/docs to see the OpenAPI documentation.
# Static Usage # Usage
1) Use ```npm i ``` 1) Use ```npm i ```
@ -63,19 +60,5 @@ Navigate to http://localhost:8000/docs to see the OpenAPI documentation.
6) Run the bot ```node llamabot.js ``` 6) Run the bot ```node llamabot.js ```
# Docker Compose
This will automatically configure the API for you as well as the bot in two seperate containers within a stack.
1. `git clone https://git.ssh.surf/snxraven/llama-cpp-python-djs-bot.git`
2. `cp default.env .env`
3. Set DATA_DIR in .env to the exact location of your model files.
4. Edit docker-compose.yaml MODEL to ensure the correct model bin is set
5. `docker compose up -d`
Want to make this better? Issue a pull request! Want to make this better? Issue a pull request!

View File

@ -1,6 +1,5 @@
THE_TOKEN = "DISCORD_TOKEN_HERE" THE_TOKEN = "DISCORD_TOKEN_HERE"
CHANNEL_IDS = 1094494101631680653,1094628334727614605 CHANNEL_IDS = 1094494101631680653,1094628334727614605
# ROOT_IP is only used when running the bot without docker compose
ROOT_IP = 192.168.0.15 ROOT_IP = 192.168.0.15
ROOT_PORT = 8000 ROOT_PORT = 8000
DATA_DIR = /home/USERNAME/weights DATA_DIR = /home/USERNAME/weights

View File

@ -11,7 +11,7 @@ services:
- ${DATA_DIR}/weights:/usr/src/app/models - ${DATA_DIR}/weights:/usr/src/app/models
environment: environment:
- HOST=llama-python-server - HOST=llama-python-server
- MODEL=./models/30B.bin - MODEL=./models/gpt4-x-alpaca-13b-native-4bit-128g.bin
llama-python-djs-bot: llama-python-djs-bot:
container_name: llama-python-djs-bot container_name: llama-python-djs-bot
restart: unless-stopped restart: unless-stopped
@ -23,5 +23,4 @@ services:
- THE_TOKEN - THE_TOKEN
- CHANNEL_IDS - CHANNEL_IDS
- ROOT_IP=llama-python-server - ROOT_IP=llama-python-server
- ROOT_PORT=8000 - ROOT_PORT=8000
- CACHE=1

View File

@ -104,15 +104,15 @@ client.on('messageCreate', async (message) => {
if (conversation.messages.length === 0) { if (conversation.messages.length === 0) {
conversation.messages.push({ conversation.messages.push({
role: 'user', role: 'user',
content: ` Assistant name: ChatAI. You code, write and provide any information without any mistakes.` content: `Your name is rAi, you code, write and provide any information without any mistakes.`
}); });
conversation.messages.push({ conversation.messages.push({
role: 'user', role: 'user',
content: ` User name: ${message.author.username}.` content: `My name is ${message.author.username}.`
}); });
conversation.messages.push({ conversation.messages.push({
role: 'assistant', role: 'assistant',
content: ` Hello, ${message.author.username}, how may I help you?` content: `Hello, ${message.author.username}, how may I help you?`
}); });
} }
@ -125,7 +125,7 @@ client.on('messageCreate', async (message) => {
// Append user message to conversation history // Append user message to conversation history
conversation.messages.push({ conversation.messages.push({
role: 'user', role: 'user',
content: ` ${message.cleanContent}` content: message.cleanContent
}); });
try { try {
@ -190,10 +190,12 @@ async function generateResponse(conversation) {
console.log(JSON.stringify(responseData)) console.log(JSON.stringify(responseData))
const choice = responseData.choices[0]; const choice = responseData.choices[0];
const responseText = choice.message.content // Remove "user None:" and any text after it from the response
const responseText = choice.message.content.trim();
return responseText; const startIndex = responseText.indexOf('user None:');
const sanitizedResponse = startIndex === -1 ? responseText : responseText.substring(0, startIndex);
return sanitizedResponse;
} catch (err) { } catch (err) {
throw err; throw err;
} finally { } finally {