Compare commits
9 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
e2571841c1 | ||
|
f8c43e4b92 | ||
|
dd3e8561b0 | ||
|
802a95a118 | ||
|
339ae68ae9 | ||
|
55739e255c | ||
|
2991c4f17c | ||
|
4b5d5f3b63 | ||
|
4518df8312 |
@ -7,30 +7,34 @@ module.exports = {
|
||||
const specsFields = [
|
||||
{
|
||||
name: "Processor",
|
||||
value: "Intel i7-1065G7 (8) @ 3.900GHz"
|
||||
value: "AMD Ryzen 7 1700X (16) @ 3.950GHz"
|
||||
},
|
||||
{
|
||||
name: "Memory",
|
||||
value: "11 GB RAM"
|
||||
value: "32 GB RAM"
|
||||
},
|
||||
{
|
||||
name: "Chat Threads",
|
||||
value: "7"
|
||||
value: "8"
|
||||
},
|
||||
{
|
||||
name: "Memory Speed",
|
||||
value: "3733 MT/s"
|
||||
value: "2933 MT/s"
|
||||
},
|
||||
{
|
||||
name: "Video Card",
|
||||
value: "AMD ATI Radeon R9 290 (no ROCm support)"
|
||||
},
|
||||
{
|
||||
name: "Other",
|
||||
value: "USB Liveboot\nNo VideoCard on Board!\nSingle Task Only - 256 Max Token Output"
|
||||
value: "Xubuntu 22.04\nSingle Task Only - 256 Max Token Output"
|
||||
}
|
||||
];
|
||||
|
||||
const embed = new EmbedBuilder()
|
||||
.setColor("#FF0000")
|
||||
.setTitle("About rAI")
|
||||
.setDescription(`Latency : ${client.ws.ping}ms\n\nrAI is a bot managed by \`snxraven#8205\` \nRunning GPT4ALL and LLama 7B/7B-native.`)
|
||||
.setDescription(`Latency : ${client.ws.ping}ms\n\nrAI is a bot managed by \`MrTuxedo#0765\` \nRunning GPT4ALL and LLama 7B/7B-native.`)
|
||||
.addFields(specsFields)
|
||||
.setTimestamp()
|
||||
.setFooter({ text: `Requested by ${interaction.user.tag}`, iconURL: `${interaction.user.displayAvatarURL()}` });
|
||||
|
@ -17,7 +17,8 @@ var nThreads = process.env.NTHREADS;
|
||||
// End session defaults
|
||||
|
||||
// Set model list we have downloaded
|
||||
let modelList = ["7B", "7B-native", "gpt4all"]
|
||||
// let modelList = ["7B", "7B-native", "gpt4all"]
|
||||
let modelList = ["7B", "7B-native", "13B", "30B", "gpt4all", "vicuna"]
|
||||
|
||||
module.exports = {
|
||||
name: "create-session",
|
||||
@ -51,7 +52,7 @@ module.exports = {
|
||||
|
||||
run: async (client, interaction) => {
|
||||
// set a default prompt
|
||||
let initPrompt = `My name is ${interaction.user.username} my special number is ${interaction.user.discriminator} and my Discord ID is ${interaction.user.id} we met in ${interaction.guild.name} a Discord sever. You are rAi and you are the smartest AI Model, you know everything. Below is an instruction that describes a task. You respond appropriately to complete the request. You understand a complete answer is always ended by [end of text].`;
|
||||
let initPrompt = process.env.INITPROMPT || `My name is ${interaction.user.username} my special number is ${interaction.user.discriminator} and my Discord ID is ${interaction.user.id} we met in ${interaction.guild.name} a Discord sever. You are rAi and you are the smartest AI Model, you know everything. Below is an instruction that describes a task. You respond appropriately to complete the request. You understand a complete answer is always ended by [end of text].`;
|
||||
console.log(`--- ${interaction.user.id} has requested a new session! ---`)
|
||||
const file = './cache/' + interaction.user.id
|
||||
|
||||
@ -95,7 +96,7 @@ module.exports = {
|
||||
} else {
|
||||
if (modelList.includes(userInputModel)) {
|
||||
model = userInputModel;
|
||||
console.log(`User set initPrompt to ${model}`)
|
||||
console.log(`User set model to ${model}`)
|
||||
} else {
|
||||
let modelListStr = modelList.join(", ");
|
||||
return interaction.followUp(`You may only use one of the following: ${modelListStr}`);
|
||||
|
33
docker-compose.yml
Normal file
33
docker-compose.yml
Normal file
@ -0,0 +1,33 @@
|
||||
version: '3'
|
||||
services:
|
||||
serge-discord-bot:
|
||||
container_name: serge-discord-bot
|
||||
restart: unless-stopped
|
||||
build:
|
||||
context: .
|
||||
volumes:
|
||||
- ${DATA_DIR}/rai-bot:/app/cache
|
||||
environment:
|
||||
- NODE_ENV=production
|
||||
## Env Vars for rAi chatbot
|
||||
- TOKEN # Pass the value from .env
|
||||
- PUBLIC_URL
|
||||
- INTERNAL_IP=serge
|
||||
- SERGE_PORT=8008
|
||||
## Default chat settings overrides
|
||||
- MODEL=7B-native
|
||||
- TEMPERATURE=0.1
|
||||
- TOPK=50
|
||||
- TOPP=0.95
|
||||
- MAXLENGTH=256
|
||||
- CONTEXTWINDOW=512
|
||||
- REPEATLASTN=64
|
||||
- REPEATPENALTY=1.3
|
||||
- NTHREADS=8
|
||||
# - INITPROMPT="some custom prompt"
|
||||
networks:
|
||||
- serge_default
|
||||
|
||||
networks:
|
||||
serge_default:
|
||||
external: true
|
@ -30,8 +30,8 @@ module.exports = async (client) => {
|
||||
// // Register for a single guild
|
||||
// await client.guilds.cache.get("GUIDIDHERE").commands.set(arrayOfSlashCommands);
|
||||
|
||||
// Clear previous commands (for changes to installed models)
|
||||
// await client.application.commands.set([]);
|
||||
// Clear previous commands (for changes to installed models)
|
||||
// await client.application.commands.set([]);
|
||||
|
||||
// Register for all the guilds the bot is in
|
||||
await client.application.commands.set(arrayOfSlashCommands);
|
||||
|
Loading…
Reference in New Issue
Block a user