Fix hugging face
This commit is contained in:
parent
64be911772
commit
4b090592ad
@ -32,7 +32,11 @@ ENV HOST localhost
|
|||||||
ENV PORT 7860
|
ENV PORT 7860
|
||||||
ENV MODEL=/code/ggml-vic7b-q4_0.bin
|
ENV MODEL=/code/ggml-vic7b-q4_0.bin
|
||||||
ENV CACHE=1
|
ENV CACHE=1
|
||||||
|
ENV USE_MLOCK=0
|
||||||
|
ENV REPEAT_PENALTY=1
|
||||||
|
ENV MODEL=/code/ggml-vic7b-q4_0.bin
|
||||||
ENV PM2_HOME=/code/.pm2
|
ENV PM2_HOME=/code/.pm2
|
||||||
|
|
||||||
RUN wget -q https://huggingface.co/eachadea/ggml-vicuna-7b-1.1/resolve/main/ggml-vic7b-q4_0.bin
|
RUN wget -q https://huggingface.co/eachadea/ggml-vicuna-7b-1.1/resolve/main/ggml-vic7b-q4_0.bin
|
||||||
|
|
||||||
CMD /bin/bash /code/start.sh
|
CMD /bin/bash /code/start.sh
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "AI-API",
|
"name": "AI-API",
|
||||||
"script": "python3 -m llama_cpp.server --model /code/ggml-vic7b-q4_0.bin",
|
"script": "python3 -m llama_cpp.server",
|
||||||
"args" : ""
|
"args" : ""
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
Loading…
Reference in New Issue
Block a user