diff --git a/docker-compose.gpu.yml b/docker-compose.gpu.yml index 30470e2..835cdc3 100644 --- a/docker-compose.gpu.yml +++ b/docker-compose.gpu.yml @@ -1,8 +1,8 @@ version: '3.9' services: - llama-python-server: - container_name: llama-python-gpu-server + backend: + container_name: llama-gpu-server restart: unless-stopped build: context: ./gpu-server @@ -10,25 +10,25 @@ services: volumes: - ${DATA_DIR}:/usr/src/app/models environment: - - HOST=llama-python-gpu-server + - HOST=llama-gpu-server - MODEL=./models/ggml-vic7b-q5_1.bin.1 - NVIDIA_VISIBLE_DEVICES=all runtime: nvidia - llama-python-djs-bot: - container_name: llama-python-djs-bot + frontend: + container_name: llama-djs-bot restart: unless-stopped build: context: . depends_on: - - llama-python-server + - backend environment: - THE_TOKEN - REFRESH_INTERVAL - CHANNEL_IDS - GPU - - ROOT_IP=llama-python-server + - ROOT_IP=llama-gpu-server - ROOT_PORT=8000 - INIT_PROMPT='Assistant Name':' ChatAI. You code, write and provide any information without any mistakes.' - NVIDIA_VISIBLE_DEVICES=all - runtime: nvidia + runtime: nvidia \ No newline at end of file