Fix up env

This commit is contained in:
Raven Scott 2023-05-08 21:12:57 +02:00
parent 14fa3b06ff
commit 83a7bb90ed
3 changed files with 8 additions and 1 deletions

View File

@ -37,3 +37,9 @@ CACHE = 1
# Set number of threads to use, currently, a standard thread will utilize 1 whole core # Set number of threads to use, currently, a standard thread will utilize 1 whole core
# I usually will set this between all cores I physcally have OR 2 cores less to allow for other processes. # I usually will set this between all cores I physcally have OR 2 cores less to allow for other processes.
N_THREADS = 4 N_THREADS = 4
# Always use MMAP unless you know what you are doing
USE_MMAP=1
# Only use MLOCK if you know what it does!
USE_MLOCK=0

View File

@ -11,6 +11,7 @@ services:
- ${DATA_DIR}:/usr/src/app/models - ${DATA_DIR}:/usr/src/app/models
environment: environment:
- HOST=llama-python-server - HOST=llama-python-server
- MODEL=./models/ggml-vic7b-q4_0.bin
llama-python-djs-bot: llama-python-djs-bot:
container_name: llama-python-djs-bot container_name: llama-python-djs-bot
restart: unless-stopped restart: unless-stopped

View File

@ -8,4 +8,4 @@ WORKDIR /usr/src/app
RUN pip install --no-cache-dir llama-cpp-python[server] RUN pip install --no-cache-dir llama-cpp-python[server]
CMD python3 -m llama_cpp.server --model /usr/src/app/models/gpt4-x-alpaca-13b-native-4bit-128g.bin CMD python3 -m llama_cpp.server