llama-cpp-python-djs-bot/docker-compose.gpu.yml

34 lines
815 B
YAML
Raw Normal View History

2023-05-19 15:32:21 -04:00
version: '3.9'
services:
backend:
container_name: llama-gpu-server
2023-05-19 15:32:21 -04:00
restart: unless-stopped
build:
context: ./gpu-server
env_file: .env
volumes:
- ${DATA_DIR}:/usr/src/app/models
environment:
- HOST=llama-gpu-server
2023-05-19 15:32:21 -04:00
- MODEL=./models/ggml-vic7b-q5_1.bin.1
- NVIDIA_VISIBLE_DEVICES=all
runtime: nvidia
frontend:
container_name: llama-djs-bot
2023-05-19 15:32:21 -04:00
restart: unless-stopped
build:
context: .
depends_on:
- backend
2023-05-19 15:32:21 -04:00
environment:
- THE_TOKEN
- REFRESH_INTERVAL
- CHANNEL_IDS
- GPU
- ROOT_IP=llama-gpu-server
2023-05-19 15:32:21 -04:00
- ROOT_PORT=8000
- INIT_PROMPT='Assistant Name':' ChatAI. You code, write and provide any information without any mistakes.'
- NVIDIA_VISIBLE_DEVICES=all
runtime: nvidia