Compare commits

...

9 Commits
main ... main

Author SHA1 Message Date
GooeyTuxedo e2571841c1 Merge branch 'upstream' 2023-04-11 10:25:57 -07:00
GooeyTuxedo f8c43e4b92 Update "/about" for my instance 2023-04-08 13:22:05 -07:00
GooeyTuxedo dd3e8561b0 Downloaded some other models 2023-04-08 11:42:24 -07:00
GooeyTuxedo 802a95a118 Restart container on exit from unhandled api error 2023-04-08 11:42:03 -07:00
GooeyTuxedo 339ae68ae9 downloaded another model 2023-04-07 17:28:32 -07:00
GooeyTuxedo 55739e255c Fix .env value passthrough 2023-04-07 15:14:11 -07:00
GooeyTuxedo 2991c4f17c my models 2023-04-07 15:08:29 -07:00
GooeyTuxedo 4b5d5f3b63 One liner to get around llama max length bug 2023-04-07 14:38:00 -07:00
GooeyTuxedo 4518df8312 Add docker build files and set chat default settings via exposed env vars 2023-04-07 14:37:34 -07:00
4 changed files with 49 additions and 11 deletions

View File

@ -7,30 +7,34 @@ module.exports = {
const specsFields = [
{
name: "Processor",
value: "Intel i7-1065G7 (8) @ 3.900GHz"
value: "AMD Ryzen 7 1700X (16) @ 3.950GHz"
},
{
name: "Memory",
value: "11 GB RAM"
value: "32 GB RAM"
},
{
name: "Chat Threads",
value: "7"
value: "8"
},
{
name: "Memory Speed",
value: "3733 MT/s"
value: "2933 MT/s"
},
{
name: "Video Card",
value: "AMD ATI Radeon R9 290 (no ROCm support)"
},
{
name: "Other",
value: "USB Liveboot\nNo VideoCard on Board!\nSingle Task Only - 256 Max Token Output"
value: "Xubuntu 22.04\nSingle Task Only - 256 Max Token Output"
}
];
const embed = new EmbedBuilder()
.setColor("#FF0000")
.setTitle("About rAI")
.setDescription(`Latency : ${client.ws.ping}ms\n\nrAI is a bot managed by \`snxraven#8205\` \nRunning GPT4ALL and LLama 7B/7B-native.`)
.setDescription(`Latency : ${client.ws.ping}ms\n\nrAI is a bot managed by \`MrTuxedo#0765\` \nRunning GPT4ALL and LLama 7B/7B-native.`)
.addFields(specsFields)
.setTimestamp()
.setFooter({ text: `Requested by ${interaction.user.tag}`, iconURL: `${interaction.user.displayAvatarURL()}` });

View File

@ -17,7 +17,8 @@ var nThreads = process.env.NTHREADS;
// End session defaults
// Set model list we have downloaded
let modelList = ["7B", "7B-native", "gpt4all"]
// let modelList = ["7B", "7B-native", "gpt4all"]
let modelList = ["7B", "7B-native", "13B", "30B", "gpt4all", "vicuna"]
module.exports = {
name: "create-session",
@ -51,7 +52,7 @@ module.exports = {
run: async (client, interaction) => {
// set a default prompt
let initPrompt = `My name is ${interaction.user.username} my special number is ${interaction.user.discriminator} and my Discord ID is ${interaction.user.id} we met in ${interaction.guild.name} a Discord sever. You are rAi and you are the smartest AI Model, you know everything. Below is an instruction that describes a task. You respond appropriately to complete the request. You understand a complete answer is always ended by [end of text].`;
let initPrompt = process.env.INITPROMPT || `My name is ${interaction.user.username} my special number is ${interaction.user.discriminator} and my Discord ID is ${interaction.user.id} we met in ${interaction.guild.name} a Discord sever. You are rAi and you are the smartest AI Model, you know everything. Below is an instruction that describes a task. You respond appropriately to complete the request. You understand a complete answer is always ended by [end of text].`;
console.log(`--- ${interaction.user.id} has requested a new session! ---`)
const file = './cache/' + interaction.user.id
@ -95,7 +96,7 @@ module.exports = {
} else {
if (modelList.includes(userInputModel)) {
model = userInputModel;
console.log(`User set initPrompt to ${model}`)
console.log(`User set model to ${model}`)
} else {
let modelListStr = modelList.join(", ");
return interaction.followUp(`You may only use one of the following: ${modelListStr}`);

33
docker-compose.yml Normal file
View File

@ -0,0 +1,33 @@
version: '3'
services:
serge-discord-bot:
container_name: serge-discord-bot
restart: unless-stopped
build:
context: .
volumes:
- ${DATA_DIR}/rai-bot:/app/cache
environment:
- NODE_ENV=production
## Env Vars for rAi chatbot
- TOKEN # Pass the value from .env
- PUBLIC_URL
- INTERNAL_IP=serge
- SERGE_PORT=8008
## Default chat settings overrides
- MODEL=7B-native
- TEMPERATURE=0.1
- TOPK=50
- TOPP=0.95
- MAXLENGTH=256
- CONTEXTWINDOW=512
- REPEATLASTN=64
- REPEATPENALTY=1.3
- NTHREADS=8
# - INITPROMPT="some custom prompt"
networks:
- serge_default
networks:
serge_default:
external: true

View File

@ -30,8 +30,8 @@ module.exports = async (client) => {
// // Register for a single guild
// await client.guilds.cache.get("GUIDIDHERE").commands.set(arrayOfSlashCommands);
// Clear previous commands (for changes to installed models)
// await client.application.commands.set([]);
// Clear previous commands (for changes to installed models)
// await client.application.commands.set([]);
// Register for all the guilds the bot is in
await client.application.commands.set(arrayOfSlashCommands);