-
Notifications
You must be signed in to change notification settings - Fork 21
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
39 lines (39 loc) · 1.56 KB
/
docker-compose.yml
File metadata and controls
39 lines (39 loc) · 1.56 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
services:
web:
build:
context: .
dockerfile: Dockerfile_cuda
args:
COMPUTE_LEVEL: "86" # Your CUDA GPU compute level (here: 8.6). Only relevant when building the image.
CUDA_VERSION: "12.6.3"
# Change image to ghcr.io/katherlab/llmaix-metal:latest to use on Apple Silicon
image: ghcr.io/katherlab/llmaix-cuda:latest
ports:
- "19999:${PORT:-5000}"
volumes:
# Adjust: /path/to/your/local/model/dir:/models - don't modify the path after the colon!
- /PATH/TO/YOUR/LOCAL/MODEL/DIR:/models
environment:
- SERVER_PATH=${SERVER_PATH:-/build/llama.cpp/llama-server}
- PORT=${PORT:-5000}
- HOST=${HOST:-0.0.0.0}
# Adjust if your model config in your models directory has another name than config.yml
- CONFIG_FILE=${CONFIG_FILE:-/models/config.yml}
- GPU=${GPU:-ALL}
- LLAMACPP_PORT=${LLAMACPP_PORT:-2929}
- DEBUG=${DEBUG:-false}
- MODE=${MODE:-choice}
- VERBOSE_LLAMA=${VERBOSE_LLAMA:-false}
# Set a password here if you want to protect the API (e.g. - PASSWORD:-llmaixpassword). If you set a password, the username is llmaix
- PASSWORD=${PASSWORD:-}
# Adjust the API URL and Key (e.g. - API_URL:-http://localhost:5000 - API_KEY:-someapikey)
- API_URL=${API_URL:-}
- API_KEY=${API_KEY:-}
- ONLY_API=${ONLY_API:-false} # if true, a local model config will not be checked
deploy:
resources:
reservations:
devices:
- driver: nvidia
device_ids: ['0']
capabilities: [gpu]