Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
8b48183
Update audio to text
thepetk Jul 28, 2025
879b650
Update chatbot
thepetk Jul 28, 2025
ccb1b79
Update codegen
thepetk Jul 28, 2025
58fb586
Update model server
thepetk Jul 28, 2025
e7d26d7
Update object-detection
thepetk Jul 28, 2025
035c368
Update rag
thepetk Jul 28, 2025
3af7523
Fix argoNS
thepetk Jul 28, 2025
49c10ae
Fix identation
thepetk Jul 28, 2025
6d6304a
Fix weird identation issue
thepetk Jul 28, 2025
44c9833
RHDHPAI-581: adding UI components and connecting cluster info
Sep 29, 2025
19e3a81
Add in the Relase process for Templates and Installer (#91)
maysunfaisal Sep 10, 2025
ac4501d
Merge branch 'main' into JslYoon-RHDHPAI-581
JslYoon Oct 2, 2025
eda9bb8
Update & clean up documentation
Oct 3, 2025
053db25
generating templates
Oct 3, 2025
43b1526
temp
Oct 7, 2025
950db5e
Update rolling demo branch for RHDH 1.8 (#93)
thepetk Oct 8, 2025
55164d8
Update the remote cluster ui help field (#94)
thepetk Oct 7, 2025
1082fca
Remove allowedHosts and rename gitCommitMessage (#96)
thepetk Oct 8, 2025
375aee6
RHDHPAI-581: adding UI components and connecting cluster info
Sep 29, 2025
548ac09
Move remote cluster info to deployments tab
JslYoon Oct 9, 2025
03c213a
change UI order
JslYoon Oct 9, 2025
169d4b7
RHDHPAI-581: adding UI components and connecting cluster info (#90)
JslYoon Oct 3, 2025
ee86511
RHDHPAI-581: adding UI components and connecting cluster info
Sep 29, 2025
f0d9bd7
RHDHPAI-581: adding UI components and connecting cluster info
Sep 29, 2025
9274dcf
Move remote cluster info to deployments tab
JslYoon Oct 9, 2025
c6b1b05
rebase main
JslYoon Oct 9, 2025
2180f1a
Merge branch 'main' into JslYoon-RHDHPAI-581
JslYoon Oct 9, 2025
e12a78d
resolve rebase conflicts
JslYoon Oct 9, 2025
cc7fd58
update model image
JslYoon Oct 9, 2025
84d99a4
Merge branch 'ai-rolling-demo' into JslYoon-RHDHPAI-581
JslYoon Oct 9, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions scripts/envs/audio-to-text
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@ export APP_RUN_COMMAND="streamlit run whisper_client.py"
export INIT_CONTAINER="quay.io/redhat-ai-dev/whisper-small:latest"

# https://github.com/redhat-ai-dev/developer-images/blob/main/model-servers/whispercpp/1.7.6/Containerfile
# https://github.com/ggerganov/whisper.cpp/blob/v1.7.6/examples/server/README.md
export MODEL_SERVICE_CONTAINER="quay.io/redhat-ai-dev/whispercpp:1.7.6"
export MODEL_SERVICE_CONTAINER="quay.io/redhat-ai-dev/whispercpp:1.8.0"
export MODEL_SERVICE_DESC="Simple HTTP server where WAV Files are passed to the inference model via HTTP requests"
export MODEL_SERVICE_SRC="https://github.com/containers/ai-lab-recipes/tree/main/model_servers/whispercpp"

Expand Down
10 changes: 5 additions & 5 deletions scripts/envs/chatbot
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@ export APP_RUN_COMMAND="streamlit run chatbot_ui.py"
# https://github.com/redhat-ai-dev/developer-images/tree/main/models/granite-3.1-8b-instruct-gguf
export INIT_CONTAINER="quay.io/redhat-ai-dev/granite-3.1-8b-instruct-gguf:latest"

# https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.8
export MODEL_SERVICE_CONTAINER="quay.io/redhat-ai-dev/llamacpp_python:0.3.8"
# https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.16
export MODEL_SERVICE_CONTAINER="quay.io/redhat-ai-dev/llamacpp_python:0.3.16"
export MODEL_SERVICE_DESC="A Python binding of LLM inference in C/C++ with minimal setup"
export MODEL_SERVICE_SRC="https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.8"
export MODEL_SERVICE_SRC="https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.16"

# https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.8.4
export VLLM_CONTAINER="quay.io/redhat-ai-dev/vllm-openai-ubi9:v0.8.4"
export VLLM_CONTAINER="quay.io/redhat-ai-dev/vllm-openai-ubi9:v0.11.0"
export VLLM_DESC="A high throughput, memory efficient inference and serving engine with GPU support for LLMs in OpenShift"
export VLLM_SRC="https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.8.4"
export VLLM_SRC="https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.11.0"

# https://huggingface.co/ibm-granite/granite-3.1-8b-instruct
export LLM_MODEL_NAME="ibm-granite/granite-3.1-8b-instruct"
Expand Down
12 changes: 6 additions & 6 deletions scripts/envs/codegen
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@ export APP_RUN_COMMAND="streamlit run codegen-app.py"
# https://github.com/redhat-ai-dev/developer-images/tree/main/models/mistral-7b-instruct-v0.2
export INIT_CONTAINER="quay.io/redhat-ai-dev/mistral-7b-instruct-v0.2:latest"

# https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.8
export MODEL_SERVICE_CONTAINER="quay.io/redhat-ai-dev/llamacpp_python:0.3.8"
# https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.16
export MODEL_SERVICE_CONTAINER="quay.io/redhat-ai-dev/llamacpp_python:0.3.16"
export MODEL_SERVICE_DESC="A Python binding of LLM inference in C/C++ with minimal setup"
export MODEL_SERVICE_SRC="https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.8"
export MODEL_SERVICE_SRC="https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.16"

# https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.8.4
export VLLM_CONTAINER="quay.io/redhat-ai-dev/vllm-openai-ubi9:v0.8.4"
# https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.11.0
export VLLM_CONTAINER="quay.io/redhat-ai-dev/vllm-openai-ubi9:v0.11.0"
export VLLM_DESC="A high throughput, memory efficient inference and serving engine with GPU support for LLMs in OpenShift"
export VLLM_SRC="https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.8.4"
export VLLM_SRC="https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.11.0"

# https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-AWQ
export LLM_MODEL_NAME="TheBloke/Mistral-7B-Instruct-v0.2-AWQ"
Expand Down
6 changes: 3 additions & 3 deletions scripts/envs/model-server
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@ export APP_DESC="Deploy a granite-3.1 8b model with a vLLM server. While no appl
export APP_SUMMARY="A granite-3.1 8b model server deployment."
export APP_TAGS='["ai", "vllm", "modelserver"]'

# https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.8.4
export VLLM_CONTAINER="quay.io/redhat-ai-dev/vllm-openai-ubi9:v0.8.4"
# https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.11.0
export VLLM_CONTAINER="quay.io/redhat-ai-dev/vllm-openai-ubi9:v0.11.0"
export VLLM_DESC="A high throughput, memory efficient inference and serving engine with GPU support for LLMs in OpenShift"
export VLLM_SRC="https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.8.4"
export VLLM_SRC="https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.11.0"

# https://huggingface.co/ibm-granite/granite-3.1-8b-instruct
export LLM_MODEL_NAME="ibm-granite/granite-3.1-8b-instruct"
Expand Down
12 changes: 6 additions & 6 deletions scripts/envs/rag
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@ export APP_RUN_COMMAND="streamlit run rag_app.py"
# https://github.com/redhat-ai-dev/developer-images/tree/main/models/granite-3.1-8b-instruct-gguf
export INIT_CONTAINER="quay.io/redhat-ai-dev/granite-3.1-8b-instruct-gguf:latest"

# https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.8
export MODEL_SERVICE_CONTAINER="quay.io/redhat-ai-dev/llamacpp_python:0.3.8"
# https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.16
export MODEL_SERVICE_CONTAINER="quay.io/redhat-ai-dev/llamacpp_python:0.3.16"
export MODEL_SERVICE_DESC="A Python binding of LLM inference in C/C++ with minimal setup"
export MODEL_SERVICE_SRC="https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.8"
export MODEL_SERVICE_SRC="https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.16"

# https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.8.4
export VLLM_CONTAINER="quay.io/redhat-ai-dev/vllm-openai-ubi9:v0.8.4"
# https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.11.0
export VLLM_CONTAINER="quay.io/redhat-ai-dev/vllm-openai-ubi9:v0.11.0"
export VLLM_DESC="A high throughput, memory efficient inference and serving engine with GPU support for LLMs in OpenShift"
export VLLM_SRC="https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.8.4"
export VLLM_SRC="https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.11.0"

# https://huggingface.co/ibm-granite/granite-3.1-8b-instruct
export LLM_MODEL_NAME="ibm-granite/granite-3.1-8b-instruct"
Expand Down
2 changes: 1 addition & 1 deletion scripts/util
Original file line number Diff line number Diff line change
Expand Up @@ -126,4 +126,4 @@ function apply-configurations() {
source $ROOT_DIR/properties
cat $DEST/template.yaml | envsubst >$DEST/new-template.yaml
mv $DEST/new-template.yaml $DEST/template.yaml
}
}
34 changes: 34 additions & 0 deletions skeleton/template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -451,6 +451,15 @@ spec:
- imageName
# SED_APP_SUPPORT_END
- namespace
ui:order:
- imageRegistry
- imageOrg
- imageName
- namespace
- deployArgoCDApplicationOnRemoteCluster
- remoteClusterAPIUrl
- remoteClusterDeploymentNamespace
- rhoaiSelected
properties:
# SED_APP_SUPPORT_START
imageRegistry:
Expand All @@ -477,6 +486,11 @@ spec:
ui:autofocus: true
ui:options:
rows: 5
deployArgoCDApplicationOnRemoteCluster:
title: Deploy ArgoCD Application on Remote Cluster?
type: boolean
default: false
ui:help: "Check this to deploy the ArgoCD application to a remote cluster"
# SED_APP_SUPPORT_START
rhoaiSelected:
title: Create Workbench for OpenShift AI
Expand All @@ -485,6 +499,26 @@ spec:
default: false
ui:help: If you select this field, you must ensure that Red Hat OpenShift AI has been installed on your cluster.
# SED_APP_SUPPORT_END
dependencies:
deployArgoCDApplicationOnRemoteCluster:
oneOf:
- required:
- remoteClusterAPIUrl
- remoteClusterDeploymentNamespace
properties:
deployArgoCDApplicationOnRemoteCluster:
const: true
remoteClusterAPIUrl:
title: Remote Cluster API URL
type: string
ui:help: "Kube API URL of remote cluster"
remoteClusterDeploymentNamespace:
title: Remote Cluster Deployment Namespace
type: string
ui:help: "The namespace of the remote cluster that the argoCD application will be deployed"
- properties:
deployArgoCDApplicationOnRemoteCluster:
const: false
# These steps are executed in the scaffolder backend, using data that we gathered
# via the parameters above.
steps:
Expand Down
38 changes: 36 additions & 2 deletions templates/audio-to-text/template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,15 @@ spec:
- imageName
# SED_APP_SUPPORT_END
- namespace
ui:order:
- imageRegistry
- imageOrg
- imageName
- namespace
- deployArgoCDApplicationOnRemoteCluster
- remoteClusterAPIUrl
- remoteClusterDeploymentNamespace
- rhoaiSelected
properties:
# SED_APP_SUPPORT_START
imageRegistry:
Expand All @@ -280,6 +289,11 @@ spec:
ui:autofocus: true
ui:options:
rows: 5
deployArgoCDApplicationOnRemoteCluster:
title: Deploy ArgoCD Application on Remote Cluster?
type: boolean
default: false
ui:help: "Check this to deploy the ArgoCD application to a remote cluster"
# SED_APP_SUPPORT_START
rhoaiSelected:
title: Create Workbench for OpenShift AI
Expand All @@ -288,6 +302,26 @@ spec:
default: false
ui:help: If you select this field, you must ensure that Red Hat OpenShift AI has been installed on your cluster.
# SED_APP_SUPPORT_END
dependencies:
deployArgoCDApplicationOnRemoteCluster:
oneOf:
- required:
- remoteClusterAPIUrl
- remoteClusterDeploymentNamespace
properties:
deployArgoCDApplicationOnRemoteCluster:
const: true
remoteClusterAPIUrl:
title: Remote Cluster API URL
type: string
ui:help: "Kube API URL of remote cluster"
remoteClusterDeploymentNamespace:
title: Remote Cluster Deployment Namespace
type: string
ui:help: "The namespace of the remote cluster that the argoCD application will be deployed"
- properties:
deployArgoCDApplicationOnRemoteCluster:
const: false
# These steps are executed in the scaffolder backend, using data that we gathered
# via the parameters above.
steps:
Expand Down Expand Up @@ -318,7 +352,7 @@ spec:
appContainer: ${{ 'quay.io/redhat-ai-dev/ai-template-bootstrap-app:latest' if parameters.hostType === 'GitHub' else 'quay.io/redhat-ai-dev/audio-to-text:latest' }}
appPort: 8501
appRunCommand: "streamlit run whisper_client.py"
modelServiceContainer: quay.io/redhat-ai-dev/whispercpp:1.7.6
modelServiceContainer: quay.io/redhat-ai-dev/whispercpp:1.8.0
modelServicePort: 8001
customModelName: ${{ steps['fetch-model-from-catalog'].output.entity.metadata.name if parameters.modelServer === 'choose-from-the-catalog' else parameters.modelName }}
modelName: ggerganov/whisper.cpp
Expand Down Expand Up @@ -413,7 +447,7 @@ spec:
modelPath: "/model/model.file"
appContainer: ${{ 'quay.io/redhat-ai-dev/ai-template-bootstrap-app:latest' if parameters.hostType === 'GitHub' else 'quay.io/redhat-ai-dev/audio-to-text:latest' }}
appPort: 8501
modelServiceContainer: quay.io/redhat-ai-dev/whispercpp:1.7.6
modelServiceContainer: quay.io/redhat-ai-dev/whispercpp:1.8.0
modelServicePort: 8001
existingModelServer: ${{ true if parameters.modelServer === 'Bring you own model server' else (true if parameters.modelServer === 'choose-from-the-catalog' else false) }}
# SED_EXISTING_START
Expand Down
52 changes: 43 additions & 9 deletions templates/chatbot/template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ spec:
const: vLLM
modelServerDescription:
type: 'null'
description: A high throughput, memory efficient inference and serving engine with GPU support for LLMs in OpenShift. If you choose vLLM, ensure that your cluster has Nvidia GPU supported (with compute capability 7.0 or higher). Also, it should have enough CPU & memory resources for the model you would like to work with. | [Learn more](https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.8.4)
description: A high throughput, memory efficient inference and serving engine with GPU support for LLMs in OpenShift. If you choose vLLM, ensure that your cluster has Nvidia GPU supported (with compute capability 7.0 or higher). Also, it should have enough CPU & memory resources for the model you would like to work with. | [Learn more](https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.11.0)
modelNameDeployed:
title: Model Name
description: Text Generation | Apache-2.0 | [Learn more](https://huggingface.co/ibm-granite/granite-3.1-8b-instruct)
Expand All @@ -220,7 +220,7 @@ spec:
const: llama.cpp
modelServerDescription:
type: 'null'
description: A Python binding of LLM inference in C/C++ with minimal setup. | [Learn more](https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.8)
description: A Python binding of LLM inference in C/C++ with minimal setup. | [Learn more](https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.16)
modelNameDeployed:
title: Model Name
description: Text Generation | Apache-2.0 | [Learn more](https://huggingface.co/ibm-granite/granite-3.1-8b-instruct)
Expand Down Expand Up @@ -286,6 +286,15 @@ spec:
- imageName
# SED_APP_SUPPORT_END
- namespace
ui:order:
- imageRegistry
- imageOrg
- imageName
- namespace
- deployArgoCDApplicationOnRemoteCluster
- remoteClusterAPIUrl
- remoteClusterDeploymentNamespace
- rhoaiSelected
properties:
# SED_APP_SUPPORT_START
imageRegistry:
Expand All @@ -312,6 +321,11 @@ spec:
ui:autofocus: true
ui:options:
rows: 5
deployArgoCDApplicationOnRemoteCluster:
title: Deploy ArgoCD Application on Remote Cluster?
type: boolean
default: false
ui:help: "Check this to deploy the ArgoCD application to a remote cluster"
# SED_APP_SUPPORT_START
rhoaiSelected:
title: Create Workbench for OpenShift AI
Expand All @@ -320,6 +334,26 @@ spec:
default: false
ui:help: If you select this field, you must ensure that Red Hat OpenShift AI has been installed on your cluster.
# SED_APP_SUPPORT_END
dependencies:
deployArgoCDApplicationOnRemoteCluster:
oneOf:
- required:
- remoteClusterAPIUrl
- remoteClusterDeploymentNamespace
properties:
deployArgoCDApplicationOnRemoteCluster:
const: true
remoteClusterAPIUrl:
title: Remote Cluster API URL
type: string
ui:help: "Kube API URL of remote cluster"
remoteClusterDeploymentNamespace:
title: Remote Cluster Deployment Namespace
type: string
ui:help: "The namespace of the remote cluster that the argoCD application will be deployed"
- properties:
deployArgoCDApplicationOnRemoteCluster:
const: false
# These steps are executed in the scaffolder backend, using data that we gathered
# via the parameters above.
steps:
Expand Down Expand Up @@ -350,15 +384,15 @@ spec:
appContainer: ${{ 'quay.io/redhat-ai-dev/ai-template-bootstrap-app:latest' if parameters.hostType === 'GitHub' else 'quay.io/redhat-ai-dev/chatbot:latest' }}
appPort: 8501
appRunCommand: "streamlit run chatbot_ui.py"
modelServiceContainer: quay.io/redhat-ai-dev/llamacpp_python:0.3.8
modelServiceContainer: quay.io/redhat-ai-dev/llamacpp_python:0.3.16
modelServicePort: 8001
customModelName: ${{ steps['fetch-model-from-catalog'].output.entity.metadata.name if parameters.modelServer === 'choose-from-the-catalog' else parameters.modelName }}
modelName: ibm-granite/granite-3.1-8b-instruct
modelSrc: https://huggingface.co/ibm-granite/granite-3.1-8b-instruct
modelServerName: ${{ parameters.modelServer }}
customModelAndModelServerSelected: ${{ true if parameters.modelServer === 'Bring you own model server' else (true if parameters.modelServer === 'choose-from-the-catalog' else false) }}
modelServiceSrcVLLM: https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.8.4
modelServiceSrcOther: https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.8
modelServiceSrcVLLM: https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.11.0
modelServiceSrcOther: https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.16
# Renders all the template variables into the files and directory names and content, and places the result in the workspace.
- id: fetch-skeleton
name: Fetch Skeleton
Expand Down Expand Up @@ -445,12 +479,12 @@ spec:
modelPath: "/model/model.file"
appContainer: ${{ 'quay.io/redhat-ai-dev/ai-template-bootstrap-app:latest' if parameters.hostType === 'GitHub' else 'quay.io/redhat-ai-dev/chatbot:latest' }}
appPort: 8501
modelServiceContainer: quay.io/redhat-ai-dev/llamacpp_python:0.3.8
modelServiceContainer: quay.io/redhat-ai-dev/llamacpp_python:0.3.16
modelServicePort: 8001
# SED_LLM_SERVER_START
# for vllm
vllmSelected: ${{ parameters.modelServer === 'vLLM' }}
vllmModelServiceContainer: quay.io/redhat-ai-dev/vllm-openai-ubi9:v0.8.4
vllmModelServiceContainer: quay.io/redhat-ai-dev/vllm-openai-ubi9:v0.11.0
modelName: ${{ parameters.modelName if parameters.modelServer === 'Bring you own model server' else (steps['fetch-model-from-catalog'].output.entity.metadata.name if parameters.modelServer === 'choose-from-the-catalog' else 'ibm-granite/granite-3.1-8b-instruct') }}
modelSrc: https://huggingface.co/ibm-granite/granite-3.1-8b-instruct
maxModelLength: 4096
Expand All @@ -470,8 +504,8 @@ spec:
dbRequired: false
supportApp: true
modelServerName: ${{ parameters.modelServer }}
modelServiceSrcVLLM: https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.8.4
modelServiceSrcOther: https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.8
modelServiceSrcVLLM: https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/vllm/0.11.0
modelServiceSrcOther: https://github.com/redhat-ai-dev/developer-images/tree/main/model-servers/llamacpp_python/0.3.16
imageRegistry: ${{ parameters.imageRegistry }}
imageOrg: ${{ parameters.imageOrg }}
imageName: ${{ parameters.imageName }}
Expand Down
Loading