forked from snxraven/llama-cpp-python-djs-bot
HuggingFace Deploy Instructions
This commit is contained in:
38
huggingface-config/Dockerfile
Normal file
38
huggingface-config/Dockerfile
Normal file
@ -0,0 +1,38 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
RUN apt update
|
||||
|
||||
RUN DEBIAN_FRONTEND=noninteractive apt install curl sudo -y
|
||||
|
||||
RUN curl -fsSL https://deb.nodesource.com/setup_18.x | sudo -E bash -
|
||||
|
||||
RUN DEBIAN_FRONTEND=noninteractive apt install nodejs -y
|
||||
|
||||
RUN npm i pm2 -g
|
||||
|
||||
RUN mkdir -p /code/.pm2
|
||||
|
||||
RUN mkdir -p /.pm2
|
||||
|
||||
RUN chmod 0777 /code/.pm2
|
||||
|
||||
RUN chmod 0777 /.pm2
|
||||
|
||||
RUN DEBIAN_FRONTEND=noninteractive apt install wget python3 python3-pip -y
|
||||
|
||||
WORKDIR /code
|
||||
|
||||
RUN pip install --no-cache-dir llama-cpp-python[server]
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN npm i
|
||||
|
||||
ENV HOST localhost
|
||||
ENV PORT 7860
|
||||
ENV MODEL=/code/ggml-vic7b-q4_0.bin
|
||||
ENV CACHE=1
|
||||
ENV PM2_HOME=/code/.pm2
|
||||
RUN wget https://huggingface.co/eachadea/ggml-vicuna-7b-1.1/resolve/main/ggml-vic7b-q4_0.bin
|
||||
|
||||
CMD /bin/bash /code/start.sh
|
Reference in New Issue
Block a user