Compare commits
12 Commits
e12f518bd7
...
ba9aeaeb3e
Author | SHA1 | Date | |
---|---|---|---|
|
ba9aeaeb3e | ||
|
7b3e0c1db2 | ||
|
5f8e57d121 | ||
|
cc770e617d | ||
|
5a56251e20 | ||
|
6bb74c8020 | ||
|
56c7bfd26d | ||
|
5793b7b4ad | ||
|
cf6e47eebc | ||
|
f98caa23cc | ||
|
d3162bce32 | ||
|
ec7dbde761 |
19
README.md
19
README.md
@ -32,6 +32,9 @@ Define a generateResponse function that sends a request to the GPT-3 API to gene
|
|||||||
|
|
||||||
Call the generateResponse function within the messageCreate event listener function.
|
Call the generateResponse function within the messageCreate event listener function.
|
||||||
|
|
||||||
|
![demo](https://media.discordapp.net/attachments/562897071326101515/1095738407826767922/image.png?width=1038&height=660 "demo")
|
||||||
|
|
||||||
|
|
||||||
# Backend REQUIIRED
|
# Backend REQUIIRED
|
||||||
|
|
||||||
The HTTP Server from https://abetlen.github.io/llama-cpp-python/ is required to use this bot.
|
The HTTP Server from https://abetlen.github.io/llama-cpp-python/ is required to use this bot.
|
||||||
@ -48,7 +51,7 @@ python3 -m llama_cpp.server
|
|||||||
|
|
||||||
Navigate to http://localhost:8000/docs to see the OpenAPI documentation.
|
Navigate to http://localhost:8000/docs to see the OpenAPI documentation.
|
||||||
|
|
||||||
# Usage
|
# Static Usage
|
||||||
|
|
||||||
1) Use ```npm i ```
|
1) Use ```npm i ```
|
||||||
|
|
||||||
@ -60,5 +63,19 @@ Navigate to http://localhost:8000/docs to see the OpenAPI documentation.
|
|||||||
|
|
||||||
6) Run the bot ```node llamabot.js ```
|
6) Run the bot ```node llamabot.js ```
|
||||||
|
|
||||||
|
# Docker Compose
|
||||||
|
This will automatically configure the API for you as well as the bot in two seperate containers within a stack.
|
||||||
|
|
||||||
|
1. `git clone https://git.ssh.surf/snxraven/llama-cpp-python-djs-bot.git`
|
||||||
|
|
||||||
|
|
||||||
|
2. `cp default.env .env`
|
||||||
|
|
||||||
|
3. Set DATA_DIR in .env to the exact location of your model files.
|
||||||
|
|
||||||
|
4. Edit docker-compose.yaml MODEL to ensure the correct model bin is set
|
||||||
|
|
||||||
|
5. `docker compose up -d`
|
||||||
|
|
||||||
|
|
||||||
Want to make this better? Issue a pull request!
|
Want to make this better? Issue a pull request!
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
THE_TOKEN = "DISCORD_TOKEN_HERE"
|
THE_TOKEN = "DISCORD_TOKEN_HERE"
|
||||||
CHANNEL_IDS = 1094494101631680653,1094628334727614605
|
CHANNEL_IDS = 1094494101631680653,1094628334727614605
|
||||||
|
# ROOT_IP is only used when running the bot without docker compose
|
||||||
ROOT_IP = 192.168.0.15
|
ROOT_IP = 192.168.0.15
|
||||||
ROOT_PORT = 8000
|
ROOT_PORT = 8000
|
||||||
DATA_DIR = /home/USERNAME/weights
|
DATA_DIR = /home/USERNAME/weights
|
||||||
|
@ -11,7 +11,7 @@ services:
|
|||||||
- ${DATA_DIR}/weights:/usr/src/app/models
|
- ${DATA_DIR}/weights:/usr/src/app/models
|
||||||
environment:
|
environment:
|
||||||
- HOST=llama-python-server
|
- HOST=llama-python-server
|
||||||
- MODEL=./models/gpt4-x-alpaca-13b-native-4bit-128g.bin
|
- MODEL=./models/30B.bin
|
||||||
llama-python-djs-bot:
|
llama-python-djs-bot:
|
||||||
container_name: llama-python-djs-bot
|
container_name: llama-python-djs-bot
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
@ -23,4 +23,5 @@ services:
|
|||||||
- THE_TOKEN
|
- THE_TOKEN
|
||||||
- CHANNEL_IDS
|
- CHANNEL_IDS
|
||||||
- ROOT_IP=llama-python-server
|
- ROOT_IP=llama-python-server
|
||||||
- ROOT_PORT=8000
|
- ROOT_PORT=8000
|
||||||
|
- CACHE=1
|
||||||
|
18
llamabot.js
18
llamabot.js
@ -104,15 +104,15 @@ client.on('messageCreate', async (message) => {
|
|||||||
if (conversation.messages.length === 0) {
|
if (conversation.messages.length === 0) {
|
||||||
conversation.messages.push({
|
conversation.messages.push({
|
||||||
role: 'user',
|
role: 'user',
|
||||||
content: `Your name is rAi, you code, write and provide any information without any mistakes.`
|
content: ` Assistant name: ChatAI. You code, write and provide any information without any mistakes.`
|
||||||
});
|
});
|
||||||
conversation.messages.push({
|
conversation.messages.push({
|
||||||
role: 'user',
|
role: 'user',
|
||||||
content: `My name is ${message.author.username}.`
|
content: ` User name: ${message.author.username}.`
|
||||||
});
|
});
|
||||||
conversation.messages.push({
|
conversation.messages.push({
|
||||||
role: 'assistant',
|
role: 'assistant',
|
||||||
content: `Hello, ${message.author.username}, how may I help you?`
|
content: ` Hello, ${message.author.username}, how may I help you?`
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -125,7 +125,7 @@ client.on('messageCreate', async (message) => {
|
|||||||
// Append user message to conversation history
|
// Append user message to conversation history
|
||||||
conversation.messages.push({
|
conversation.messages.push({
|
||||||
role: 'user',
|
role: 'user',
|
||||||
content: message.cleanContent
|
content: ` ${message.cleanContent}`
|
||||||
});
|
});
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -190,12 +190,10 @@ async function generateResponse(conversation) {
|
|||||||
console.log(JSON.stringify(responseData))
|
console.log(JSON.stringify(responseData))
|
||||||
const choice = responseData.choices[0];
|
const choice = responseData.choices[0];
|
||||||
|
|
||||||
// Remove "user None:" and any text after it from the response
|
const responseText = choice.message.content
|
||||||
const responseText = choice.message.content.trim();
|
|
||||||
const startIndex = responseText.indexOf('user None:');
|
return responseText;
|
||||||
const sanitizedResponse = startIndex === -1 ? responseText : responseText.substring(0, startIndex);
|
|
||||||
|
|
||||||
return sanitizedResponse;
|
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
throw err;
|
throw err;
|
||||||
} finally {
|
} finally {
|
||||||
|
Loading…
Reference in New Issue
Block a user