version: '3.9' services: llama-python-server: container_name: llama-python-server restart: unless-stopped build: context: ./server env_file: .env volumes: - ${DATA_DIR}/weights:/usr/src/app/models environment: - HOST=llama-python-server - MODEL=./models/gpt4-x-alpaca-13b-native-4bit-128g.bin llama-python-djs-bot: container_name: llama-python-djs-bot restart: unless-stopped build: context: . depends_on: - llama-python-server environment: - THE_TOKEN - CHANNEL_IDS - ROOT_IP=llama-python-server - ROOT_PORT=8000