diff --git a/huggingface-config/Dockerfile b/huggingface-config/Dockerfile index b5d431c..52d56cf 100644 --- a/huggingface-config/Dockerfile +++ b/huggingface-config/Dockerfile @@ -32,7 +32,11 @@ ENV HOST localhost ENV PORT 7860 ENV MODEL=/code/ggml-vic7b-q4_0.bin ENV CACHE=1 +ENV USE_MLOCK=0 +ENV REPEAT_PENALTY=1 +ENV MODEL=/code/ggml-vic7b-q4_0.bin ENV PM2_HOME=/code/.pm2 + RUN wget -q https://huggingface.co/eachadea/ggml-vicuna-7b-1.1/resolve/main/ggml-vic7b-q4_0.bin CMD /bin/bash /code/start.sh diff --git a/huggingface-config/startServices.json b/huggingface-config/startServices.json index a156716..4570ce3 100644 --- a/huggingface-config/startServices.json +++ b/huggingface-config/startServices.json @@ -7,7 +7,7 @@ }, { "name": "AI-API", - "script": "python3 -m llama_cpp.server --model /code/ggml-vic7b-q4_0.bin", + "script": "python3 -m llama_cpp.server", "args" : "" } ]