From 8d1b0c2b2224614c983fb02ec0348db9e685bfe7 Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Tue, 25 Mar 2025 03:47:27 +0000
Subject: [PATCH 01/24] merge
---
docs/en/supported_models.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/en/supported_models.md b/docs/en/supported_models.md
index e8749d1d..a1d94a8a 100644
--- a/docs/en/supported_models.md
+++ b/docs/en/supported_models.md
@@ -52,4 +52,4 @@
| jina-embeddings-v3 | jina | embedding | huggingface | g5.xlarge,g5.2xlarge,g5.4xlarge,g5.8xlarge,g5.16xlarge | sagemaker_realtime,ecs | ✅ |
| bge-reranker-v2-m3 | bge | rerank | vllm | g4dn.2xlarge,g5.xlarge,g5.2xlarge,g5.4xlarge,g5.8xlarge,g5.16xlarge | sagemaker_realtime,ecs | ✅ |
| bge-reranker-large | bge | rerank | vllm | g4dn.2xlarge,g5.xlarge,g5.2xlarge,g5.4xlarge,g5.8xlarge,g5.16xlarge | sagemaker_realtime,ecs | ✅ |
-| jina-reranker-v2-base-multilingual | jina | rerank | huggingface | g5.xlarge,g5.2xlarge,g5.4xlarge,g5.8xlarge,g5.16xlarge | sagemaker_realtime,ecs | ✅ |
+| jina-reranker-v2-base-multilingual | jina | rerank | huggingface | g5.xlarge,g5.2xlarge,g5.4xlarge,g5.8xlarge,g5.16xlarge | sagemaker_realtime,ecs | ✅ |
\ No newline at end of file
From ef01e39d77fd3e9fdba4bd89943244f01729d989 Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Tue, 25 Mar 2025 03:52:23 +0000
Subject: [PATCH 02/24] merge
---
docs/en/supported_models.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/en/supported_models.md b/docs/en/supported_models.md
index a1d94a8a..e8749d1d 100644
--- a/docs/en/supported_models.md
+++ b/docs/en/supported_models.md
@@ -52,4 +52,4 @@
| jina-embeddings-v3 | jina | embedding | huggingface | g5.xlarge,g5.2xlarge,g5.4xlarge,g5.8xlarge,g5.16xlarge | sagemaker_realtime,ecs | ✅ |
| bge-reranker-v2-m3 | bge | rerank | vllm | g4dn.2xlarge,g5.xlarge,g5.2xlarge,g5.4xlarge,g5.8xlarge,g5.16xlarge | sagemaker_realtime,ecs | ✅ |
| bge-reranker-large | bge | rerank | vllm | g4dn.2xlarge,g5.xlarge,g5.2xlarge,g5.4xlarge,g5.8xlarge,g5.16xlarge | sagemaker_realtime,ecs | ✅ |
-| jina-reranker-v2-base-multilingual | jina | rerank | huggingface | g5.xlarge,g5.2xlarge,g5.4xlarge,g5.8xlarge,g5.16xlarge | sagemaker_realtime,ecs | ✅ |
\ No newline at end of file
+| jina-reranker-v2-base-multilingual | jina | rerank | huggingface | g5.xlarge,g5.2xlarge,g5.4xlarge,g5.8xlarge,g5.16xlarge | sagemaker_realtime,ecs | ✅ |
From 2845bc357fdca586143bf1738c432501b754a4e2 Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Thu, 27 Mar 2025 08:14:39 +0000
Subject: [PATCH 03/24] add Mistral-Small-3.1-24B-Instruct-2503
---
docs/en/best_deployment_practices.md | 5 +++
docs/en/supported_models.md | 1 +
src/emd/models/engines.py | 11 ++++++
src/emd/models/model_series.py | 6 ++++
src/emd/models/utils/constants.py | 1 +
src/emd/models/vlms/__init__.py | 1 +
src/emd/models/vlms/mistral.py | 54 ++++++++++++++++++++++++++++
7 files changed, 79 insertions(+)
create mode 100644 src/emd/models/vlms/mistral.py
diff --git a/docs/en/best_deployment_practices.md b/docs/en/best_deployment_practices.md
index 3a66bfb8..26fae4d3 100644
--- a/docs/en/best_deployment_practices.md
+++ b/docs/en/best_deployment_practices.md
@@ -4,6 +4,11 @@ This document provides examples of best practices for deploying models using EMD
## Famous Models
+### Mistral Small Series
+```
+emd deploy --model-id Mistral-Small-3.1-24B-Instruct-2503 --instance-type g5.12xlarge --engine-type vllm --service-type sagemaker_realtime
+```
+
### Gemma 3 Series
```
diff --git a/docs/en/supported_models.md b/docs/en/supported_models.md
index e8749d1d..0ecfeca7 100644
--- a/docs/en/supported_models.md
+++ b/docs/en/supported_models.md
@@ -44,6 +44,7 @@
| gemma-3-4b-it | gemma3 | vlm | vllm | g5.2xlarge,g5.4xlarge,g5.8xlarge,g5.16xlarge | sagemaker_realtime,sagemaker_async,ecs | ❎ |
| gemma-3-12b-it | gemma3 | vlm | vllm | g5.2xlarge,g5.4xlarge,g5.8xlarge,g5.16xlarge | sagemaker_realtime,sagemaker_async,ecs | ❎ |
| gemma-3-27b-it | gemma3 | vlm | vllm | g5.12xlarge,g5.24xlarge,g5.48xlarge | sagemaker_realtime,sagemaker_async,ecs | ❎ |
+| Mistral-Small-3.1-24B-Instruct-2503 | mistral | vlm | vllm | g5.12xlarge,g5.24xlarge,g5.48xlarge | sagemaker_realtime,sagemaker_async,ecs | ❎ |
| txt2video-LTX | comfyui | video | comfyui | g5.4xlarge,g5.8xlarge,g6e.2xlarge | sagemaker_async | ❎ |
| whisper | whisper | whisper | huggingface | g5.xlarge,g5.2xlarge,g5.4xlarge,g5.8xlarge,g5.16xlarge | sagemaker_async | ❎ |
| bce-embedding-base_v1 | bce | embedding | vllm | g4dn.2xlarge,g5.xlarge,g5.2xlarge,g5.4xlarge,g5.8xlarge,g5.16xlarge | sagemaker_realtime,ecs | ✅ |
diff --git a/src/emd/models/engines.py b/src/emd/models/engines.py
index f251071a..e748ab83 100644
--- a/src/emd/models/engines.py
+++ b/src/emd/models/engines.py
@@ -77,6 +77,17 @@ class KtransformersEngine(OpenAICompitableEngine):
}
)
+
+vllm_mistral_small_engine082 = VllmEngine(
+ **{
+ **vllm_engine064.model_dump(),
+ "engine_dockerfile_config": {"VERSION":"v0.8.2"},
+ "dockerfile_name":"Dockerfile",
+ "default_cli_args": " --tokenizer-mode mistral --config-format mistral --load-format mistral --limit-mm-per-prompt 'image=4' --max-model-len 16384",
+ "environment_variables": ""
+ }
+)
+
vllm_deepseek_r1_distill_qwen_engine071 = VllmEngine(**{
**vllm_engine064.model_dump(),
"engine_dockerfile_config": {"VERSION":"v0.7.1"},
diff --git a/src/emd/models/model_series.py b/src/emd/models/model_series.py
index 09062998..ceae13ad 100644
--- a/src/emd/models/model_series.py
+++ b/src/emd/models/model_series.py
@@ -97,6 +97,12 @@
reference_link="https://blog.google/technology/developers/gemma-3/"
)
+MISTRAL_SERIES = ModelSeries(
+ model_series_name=ModelSeriesType.MISTRAL,
+ description="LLMs and VLMs provided by MISTRAL AI.",
+ reference_link="https://huggingface.co/mistralai"
+)
+
DEEPSEEK_REASONING_MODEL = ModelSeries(
model_series_name=ModelSeriesType.DEEPSEEK_REASONING_MODEL,
description="DeepSeek-R1-Zero and DeepSeek-R1 are innovative reasoning models, with the former showcasing strong performance through reinforcement learning alone, while the latter enhances reasoning capabilities by incorporating cold-start data, achieving results comparable to OpenAI-o1 and setting new benchmarks with its distilled versions.",
diff --git a/src/emd/models/utils/constants.py b/src/emd/models/utils/constants.py
index b9c21b49..d78414e6 100644
--- a/src/emd/models/utils/constants.py
+++ b/src/emd/models/utils/constants.py
@@ -214,6 +214,7 @@ def get_service_quota_code(cls, instance_type: str):
class ModelSeriesType(ConstantBase):
GEMMA3 = "gemma3"
+ MISTRAL = "mistral"
QWEN2D5 = "qwen2.5"
GLM4 = "glm4"
INTERLM2d5 = "internlm2.5"
diff --git a/src/emd/models/vlms/__init__.py b/src/emd/models/vlms/__init__.py
index bf74f45c..4440a29e 100644
--- a/src/emd/models/vlms/__init__.py
+++ b/src/emd/models/vlms/__init__.py
@@ -1,3 +1,4 @@
from . import qwen
from . import internvl
from . import gemma3
+from . import mistral
diff --git a/src/emd/models/vlms/mistral.py b/src/emd/models/vlms/mistral.py
new file mode 100644
index 00000000..fc597105
--- /dev/null
+++ b/src/emd/models/vlms/mistral.py
@@ -0,0 +1,54 @@
+from ..engines import vllm_mistral_small_engine082
+from .. import Model
+from ..frameworks import fastapi_framework
+from ..services import (
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+)
+from emd.models.utils.constants import ModelType
+from ..model_series import MISTRAL_SERIES
+from ..instances import (
+ g5d2xlarge_instance,
+ g5d4xlarge_instance,
+ g5d8xlarge_instance,
+ g5d12xlarge_instance,
+ g5d16xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ g6e2xlarge_instance,
+ local_instance
+)
+from ..utils.constants import ModelFilesDownloadSource
+
+
+Model.register(
+ dict(
+ model_id = "Mistral-Small-3.1-24B-Instruct-2503",
+ supported_engines=[vllm_mistral_small_engine082],
+ supported_instances=[
+ g5d12xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ huggingface_model_id="unsloth/Mistral-Small-3.1-24B-Instruct-2503",
+ # require_huggingface_token=False,
+ modelscope_model_id="mistralai/Mistral-Small-3.1-24B-Instruct-2503",
+ # model_files_download_source=ModelFilesDownloadSource.MODELSCOPE,
+ application_scenario="vision llms for image understanding",
+ description="The latest series of mistral small",
+ model_type=ModelType.VLM,
+ model_series=MISTRAL_SERIES,
+ )
+)
From d72b12b22282e6e67dc069f99fdd3ee59e25bcbd Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Tue, 1 Apr 2025 08:37:49 +0000
Subject: [PATCH 04/24] modify qwq-32b deploy
---
src/emd/models/engines.py | 7 +++++++
src/emd/models/llms/qwen.py | 5 +++--
src/emd/models/services.py | 1 +
src/pipeline/backend/backend.py | 7 ++++---
4 files changed, 15 insertions(+), 5 deletions(-)
diff --git a/src/emd/models/engines.py b/src/emd/models/engines.py
index e748ab83..0dc243f7 100644
--- a/src/emd/models/engines.py
+++ b/src/emd/models/engines.py
@@ -135,6 +135,13 @@ class KtransformersEngine(OpenAICompitableEngine):
"default_cli_args": " --chat-template emd/models/chat_templates/qwq_32b_add_prefill_chat_template.jinja --max_model_len 16000 --max_num_seq 10 --disable-log-stats --enable-auto-tool-choice --tool-call-parser hermes"
})
+vllm_qwq_engine082 = VllmEngine(**{
+ **vllm_qwen25vl72b_engine073.model_dump(),
+ "engine_dockerfile_config": {"VERSION":"v0.8.2"},
+ "environment_variables": "export VLLM_ATTENTION_BACKEND=FLASHINFER && export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
+ "default_cli_args": " --chat-template emd/models/chat_templates/qwq_32b_add_prefill_chat_template.jinja --max_model_len 16000 --max_num_seq 10 --disable-log-stats --enable-auto-tool-choice --tool-call-parser hermes --enable-reasoning --reasoning-parser deepseek_r1"
+})
+
vllm_internvl2d5_76b_engine064 = VllmEngine(**{
**vllm_engine064.model_dump(),
diff --git a/src/emd/models/llms/qwen.py b/src/emd/models/llms/qwen.py
index fa48ce79..35a2cc1f 100644
--- a/src/emd/models/llms/qwen.py
+++ b/src/emd/models/llms/qwen.py
@@ -7,7 +7,8 @@
tgi_qwen2d5_on_inf2,
tgi_qwen2d5_72b_on_inf2,
vllm_qwen2d5_72b_engine064,
- vllm_qwq_engine073
+ vllm_qwq_engine073,
+ vllm_qwq_engine082
)
from ..services import (
sagemaker_service,
@@ -471,7 +472,7 @@
Model.register(
dict(
model_id = "QwQ-32B",
- supported_engines=[vllm_qwq_engine073],
+ supported_engines=[vllm_qwq_engine082],
supported_instances=[
g5d12xlarge_instance,
g5d24xlarge_instance,
diff --git a/src/emd/models/services.py b/src/emd/models/services.py
index 05737773..859a12c9 100644
--- a/src/emd/models/services.py
+++ b/src/emd/models/services.py
@@ -91,6 +91,7 @@
"ServiceType":"service_type",
"EngineType":"engine_type",
"Region": "region",
+ "DesiredCapacity": "desired_capacity",
"ContainerCpu": "container_cpu",
"ContainerMemory": "container_memory",
"ContainerGpu":"instance_gpu_num"
diff --git a/src/pipeline/backend/backend.py b/src/pipeline/backend/backend.py
index fc46d39d..4996c97a 100644
--- a/src/pipeline/backend/backend.py
+++ b/src/pipeline/backend/backend.py
@@ -134,9 +134,10 @@ def start_server(self, server_start_command):
logger.info(f"Starting {self.engine_type} server with command: {server_start_command}")
t = threading.Thread(target=os.system,args=(server_start_command,),daemon=True)
t.start()
- t2 = threading.Thread(target=self.check_model_serve_ready,args=(t, "127.0.0.1", self.server_port),daemon=True)
- t2.start()
- t2.join()
+ self.check_model_serve_ready(t, "127.0.0.1", self.server_port)
+ logger.info(f"Server started successfully.")
+ # t2.start()
+ # t2.join()
return
From 48b97c44c487ca29336f94ef69eb0d8741d7a0fd Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Mon, 7 Apr 2025 08:43:51 +0000
Subject: [PATCH 05/24] add txgemma model;
---
docs/en/best_deployment_practices.md | 42 +++++++++++++
src/emd/constants.py | 6 ++
src/emd/models/engines.py | 12 ++++
src/emd/models/llms/__init__.py | 3 +-
src/emd/models/llms/deepseek.py | 54 ++++++++---------
src/emd/models/llms/txgemma.py | 91 ++++++++++++++++++++++++++++
src/emd/models/model_series.py | 7 +++
src/emd/models/services.py | 2 +-
src/emd/models/utils/constants.py | 1 +
src/emd/models/vlms/gemma3.py | 4 ++
src/emd/sdk/deploy.py | 13 +++-
src/emd/utils/file_utils.py | 6 ++
12 files changed, 209 insertions(+), 32 deletions(-)
create mode 100644 src/emd/models/llms/txgemma.py
create mode 100644 src/emd/utils/file_utils.py
diff --git a/docs/en/best_deployment_practices.md b/docs/en/best_deployment_practices.md
index 26fae4d3..d4747e57 100644
--- a/docs/en/best_deployment_practices.md
+++ b/docs/en/best_deployment_practices.md
@@ -64,6 +64,48 @@ emd deploy --model-id Qwen2.5-14B-Instruct-AWQ --instance-type g4dn.2xlarge --en
}'
```
+### Example: Customize model download methods
+- You can load models from different locations by addingappropriate values in the extra-params parameter
+1. Load model from S3
+```json
+{
+ "model_params":{
+ "model_files_s3_path":""
+ }
+}
+```
+2. Load model from local path (only applicable for local deployment)
+```json
+{
+ "model_params": { "model_files_local_path":""
+ }
+}
+```
+3. Skip downloading and uploading model files in codebuild, which will significantly reducedeployment time
+```json
+{
+ "model_params": {
+ "need_prepare_model":false
+ }
+}
+```
+4. Specify the download source for model files
+```json
+{
+ "model_params":{
+ "model_files_download_source":"huggingface|modelscope|auto(default)"
+ }
+}
+```
+5. Specify the model ID on huggingface or modelscope
+```json
+{
+ "model_params": {
+ "huggingface_model_id":"model id on huggingface","modelscope_model_id":"model id on modelscope"
+ }
+}
+```
+
## Environmental variables
- `LOCAL_DEPLOY_PORT: ` Local deployment port, default: `8080`
diff --git a/src/emd/constants.py b/src/emd/constants.py
index a3f949b1..53dd60c8 100644
--- a/src/emd/constants.py
+++ b/src/emd/constants.py
@@ -1,4 +1,5 @@
from .revision import VERSION, convert_version_name_to_stack_name
+import os
ENV_STACK_NAME = f'EMD-Env'
MODEL_STACK_NAME_PREFIX = f"EMD-Model"
ENV_BUCKET_NAME_PREFIX = "emd-env-artifactbucket"
@@ -25,3 +26,8 @@
LOCAL_REGION = "local"
# EMD_USE_NO_PROFILE_CHOICE = "Don't set"
+
+LOCAL_DEPLOY_PIPELINE_ZIP_DIR = os.path.join(
+ os.path.expanduser("~"),
+ f"emd_{VERSION}"
+)
diff --git a/src/emd/models/engines.py b/src/emd/models/engines.py
index 0dc243f7..0bc13595 100644
--- a/src/emd/models/engines.py
+++ b/src/emd/models/engines.py
@@ -78,6 +78,18 @@ class KtransformersEngine(OpenAICompitableEngine):
)
+vllm_texgemma082 = VllmEngine(**{
+ "engine_type":EngineType.VLLM,
+ "engine_dockerfile_config": {"VERSION":"v0.8.2"},
+ "engine_cls":"vllm.vllm_backend.VLLMBackend",
+ "base_image_host":"public.ecr.aws",
+ "use_public_ecr":True,
+ "docker_login_region":"us-east-1",
+ "default_cli_args": " --max_num_seq 10 --disable-log-stats"
+}
+)
+
+
vllm_mistral_small_engine082 = VllmEngine(
**{
**vllm_engine064.model_dump(),
diff --git a/src/emd/models/llms/__init__.py b/src/emd/models/llms/__init__.py
index e823ab18..2750df0c 100644
--- a/src/emd/models/llms/__init__.py
+++ b/src/emd/models/llms/__init__.py
@@ -5,5 +5,6 @@
llama,
deepseek,
baichuan,
- jina
+ jina,
+ txgemma
)
diff --git a/src/emd/models/llms/deepseek.py b/src/emd/models/llms/deepseek.py
index df61526b..1359ac33 100644
--- a/src/emd/models/llms/deepseek.py
+++ b/src/emd/models/llms/deepseek.py
@@ -334,33 +334,33 @@
)
)
-Model.register(
- dict(
- model_id = "deepseek-r1-671b-1.58bit_ollama",
- supported_engines=[ollama_deepseek_r1_qwen2d5_1d5b_engine057],
- supported_instances=[
- g5d48xlarge_instance,
- local_instance
- ],
- supported_services=[
- sagemaker_service,
- sagemaker_async_service,
- ecs_service,
- local_service
- ],
- supported_frameworks=[
- fastapi_framework
- ],
- allow_china_region=False,
- ollama_model_id="SIGJNF/deepseek-r1-671b-1.58bit",
- # modelscope_model_id="Qwen/Qwen2.5-14B-Instruct",
- require_huggingface_token=False,
- application_scenario="Agent, tool use, translation, summary",
- description="The latest series of DeepSeek LLMs for reasoning",
- model_type=ModelType.LLM,
- model_series=DEEPSEEK_REASONING_MODEL
- )
-)
+# Model.register(
+# dict(
+# model_id = "deepseek-r1-671b-1.58bit_ollama",
+# supported_engines=[ollama_deepseek_r1_qwen2d5_1d5b_engine057],
+# supported_instances=[
+# g5d48xlarge_instance,
+# local_instance
+# ],
+# supported_services=[
+# sagemaker_service,
+# sagemaker_async_service,
+# ecs_service,
+# local_service
+# ],
+# supported_frameworks=[
+# fastapi_framework
+# ],
+# allow_china_region=False,
+# ollama_model_id="SIGJNF/deepseek-r1-671b-1.58bit",
+# # modelscope_model_id="Qwen/Qwen2.5-14B-Instruct",
+# require_huggingface_token=False,
+# application_scenario="Agent, tool use, translation, summary",
+# description="The latest series of DeepSeek LLMs for reasoning",
+# model_type=ModelType.LLM,
+# model_series=DEEPSEEK_REASONING_MODEL
+# )
+# )
Model.register(
diff --git a/src/emd/models/llms/txgemma.py b/src/emd/models/llms/txgemma.py
new file mode 100644
index 00000000..0c4e29ff
--- /dev/null
+++ b/src/emd/models/llms/txgemma.py
@@ -0,0 +1,91 @@
+from ..engines import vllm_texgemma082
+from .. import Model
+from ..frameworks import fastapi_framework
+from ..services import (
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+)
+from emd.models.utils.constants import ModelType
+from ..model_series import TXGEMMA_SERIES
+from ..instances import (
+ g5d2xlarge_instance,
+ g5d4xlarge_instance,
+ g5d8xlarge_instance,
+ g5d12xlarge_instance,
+ g5d16xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ g6e2xlarge_instance,
+ local_instance
+)
+from ..utils.constants import ModelFilesDownloadSource
+
+
+Model.register(
+ dict(
+ model_id = "txgemma-9b-chat",
+ supported_engines=[vllm_texgemma082],
+ supported_instances=[
+ g5d12xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ g5d2xlarge_instance,
+ g5d4xlarge_instance,
+ g5d8xlarge_instance,
+ g5d16xlarge_instance,
+ local_instance
+ ],
+ disable_hf_transfer=True,
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ huggingface_model_id="google/txgemma-9b-chat",
+ modelscope_model_id="AI-ModelScope/txgemma-9b-chat",
+ model_files_download_source=ModelFilesDownloadSource.MODELSCOPE,
+ # require_huggingface_token=True,
+ application_scenario="llms for the development of therapeutics.",
+ description="The latest series of txgemma",
+ model_type=ModelType.LLM,
+ model_series=TXGEMMA_SERIES,
+ )
+)
+
+
+Model.register(
+ dict(
+ model_id = "txgemma-27b-chat",
+ supported_engines=[vllm_texgemma082],
+ supported_instances=[
+ g5d12xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ local_instance
+ ],
+ disable_hf_transfer=True,
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ huggingface_model_id="google/txgemma-27b-chat",
+ modelscope_model_id="AI-ModelScope/txgemma-27b-chat",
+ model_files_download_source=ModelFilesDownloadSource.MODELSCOPE,
+ # require_huggingface_token=True,
+ application_scenario="llms for the development of therapeutics.",
+ description="The latest series of txgemma",
+ model_type=ModelType.LLM,
+ model_series=TXGEMMA_SERIES,
+ )
+)
diff --git a/src/emd/models/model_series.py b/src/emd/models/model_series.py
index ceae13ad..896fa512 100644
--- a/src/emd/models/model_series.py
+++ b/src/emd/models/model_series.py
@@ -97,6 +97,13 @@
reference_link="https://blog.google/technology/developers/gemma-3/"
)
+TXGEMMA_SERIES = ModelSeries(
+ model_series_name=ModelSeriesType.TXGEMMA,
+ description="TXGemma is a series of open models to accelerate the development of therapeutics.",
+ reference_link="https://huggingface.co/collections/google/txgemma-release-67dd92e931c857d15e4d1e87"
+)
+
+
MISTRAL_SERIES = ModelSeries(
model_series_name=ModelSeriesType.MISTRAL,
description="LLMs and VLMs provided by MISTRAL AI.",
diff --git a/src/emd/models/services.py b/src/emd/models/services.py
index 859a12c9..824792f0 100644
--- a/src/emd/models/services.py
+++ b/src/emd/models/services.py
@@ -91,7 +91,7 @@
"ServiceType":"service_type",
"EngineType":"engine_type",
"Region": "region",
- "DesiredCapacity": "desired_capacity",
+ "DesiredCapacity": ValueWithDefault(name="desired_capacity",default=1),
"ContainerCpu": "container_cpu",
"ContainerMemory": "container_memory",
"ContainerGpu":"instance_gpu_num"
diff --git a/src/emd/models/utils/constants.py b/src/emd/models/utils/constants.py
index d78414e6..27311173 100644
--- a/src/emd/models/utils/constants.py
+++ b/src/emd/models/utils/constants.py
@@ -214,6 +214,7 @@ def get_service_quota_code(cls, instance_type: str):
class ModelSeriesType(ConstantBase):
GEMMA3 = "gemma3"
+ TXGEMMA = "txgemma"
MISTRAL = "mistral"
QWEN2D5 = "qwen2.5"
GLM4 = "glm4"
diff --git a/src/emd/models/vlms/gemma3.py b/src/emd/models/vlms/gemma3.py
index 4f049787..822cddd6 100644
--- a/src/emd/models/vlms/gemma3.py
+++ b/src/emd/models/vlms/gemma3.py
@@ -10,6 +10,7 @@
from emd.models.utils.constants import ModelType
from ..model_series import Gemma3_SERIES
from ..instances import (
+ g4dn12xlarge_instance,
g5d2xlarge_instance,
g5d4xlarge_instance,
g5d8xlarge_instance,
@@ -43,6 +44,7 @@
supported_frameworks=[
fastapi_framework
],
+ allow_china_region = True,
modelscope_model_id="LLM-Research/gemma-3-4b-it",
model_files_download_source=ModelFilesDownloadSource.MODELSCOPE,
# require_huggingface_token=False,
@@ -74,6 +76,7 @@
supported_frameworks=[
fastapi_framework
],
+ allow_china_region = True,
# huggingface_model_id="google/gemma-3-12b-it",
# require_huggingface_token=False,
modelscope_model_id="LLM-Research/gemma-3-12b-it",
@@ -106,6 +109,7 @@
supported_frameworks=[
fastapi_framework
],
+ allow_china_region = True,
# huggingface_model_id="unsloth/gemma-3-27b-it",
modelscope_model_id="LLM-Research/gemma-3-27b-it",
model_files_download_source=ModelFilesDownloadSource.MODELSCOPE,
diff --git a/src/emd/sdk/deploy.py b/src/emd/sdk/deploy.py
index 6426215d..03443ebf 100644
--- a/src/emd/sdk/deploy.py
+++ b/src/emd/sdk/deploy.py
@@ -14,8 +14,10 @@
MODEL_DEFAULT_TAG,
MODEL_STACK_NAME_PREFIX,
VERSION,
- LOCAL_REGION
+ LOCAL_REGION,
+ LOCAL_DEPLOY_PIPELINE_ZIP_DIR
)
+from emd.utils.file_utils import mkdir_with_mode
from emd.models import Model
from emd.models.utils.constants import FrameworkType, ServiceType,InstanceType
from emd.models.utils.serialize_utils import dump_extra_params
@@ -318,7 +320,10 @@ def deploy_local(
# region: Optional[str] = None,
# model_stack_name=None,
extra_params=None,
- pipeline_zip_local_path=f"/tmp/emd_{VERSION}/pipeline.zip",
+ pipeline_zip_local_path=os.path.join(
+ LOCAL_DEPLOY_PIPELINE_ZIP_DIR,
+ "pipeline.zip"
+ ),
# env_stack_on_failure = "ROLLBACK",
# force_env_stack_update = False,
# waiting_until_deploy_complete = True
@@ -328,7 +333,9 @@ def deploy_local(
logger.info(f"parsed extra_params: {extra_params}")
extra_params = dump_extra_params(extra_params or {})
dir = os.path.dirname(pipeline_zip_local_path)
- os.makedirs(dir, exist_ok=True)
+
+ mkdir_with_mode(dir, exist_ok=True,mode=0o777)
+ # os.makedirs(dir, exist_ok=True,mode=0o777)
with open(pipeline_zip_local_path, "wb") as f:
buffer = ziped_pipeline()
f.write(buffer.read())
diff --git a/src/emd/utils/file_utils.py b/src/emd/utils/file_utils.py
new file mode 100644
index 00000000..11599dc1
--- /dev/null
+++ b/src/emd/utils/file_utils.py
@@ -0,0 +1,6 @@
+import os
+
+def mkdir_with_mode(directory,exist_ok=True,mode=0o777):
+ oldmask = os.umask(0)
+ os.makedirs(directory, mode=mode,exist_ok=exist_ok)
+ os.umask(oldmask)
From d7fe697bd5f05e6421746111fb335d0627aa718d Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Mon, 7 Apr 2025 09:36:47 +0000
Subject: [PATCH 06/24] modify model list command
---
src/emd/cli.py | 12 +++++++++---
src/emd/models/llms/txgemma.py | 2 ++
src/emd/models/model.py | 6 ++++--
3 files changed, 15 insertions(+), 5 deletions(-)
diff --git a/src/emd/cli.py b/src/emd/cli.py
index 7b6fcbf8..9212833d 100644
--- a/src/emd/cli.py
+++ b/src/emd/cli.py
@@ -83,11 +83,17 @@
@app.command(help="List supported models")
@catch_aws_credential_errors
-def list_supported_models(model_id: Annotated[
+def list_supported_models(
+ model_id: Annotated[
str, typer.Argument(help="Model ID")
- ] = None):
+ ] = None,
+ detail: Annotated[
+ Optional[bool],
+ typer.Option("-a", "--detail", help="output model infomation in details.")
+ ] = False
+):
# console.print("[bold blue]Retrieving models...[/bold blue]")
- support_models = Model.get_supported_models()
+ support_models = Model.get_supported_models(detail=detail)
if model_id:
support_models = [model for _model_id,model in support_models.items() if _model_id == model_id]
r = json.dumps(support_models,indent=2,ensure_ascii=False)
diff --git a/src/emd/models/llms/txgemma.py b/src/emd/models/llms/txgemma.py
index 0c4e29ff..06aa7e6f 100644
--- a/src/emd/models/llms/txgemma.py
+++ b/src/emd/models/llms/txgemma.py
@@ -47,6 +47,7 @@
supported_frameworks=[
fastapi_framework
],
+ allow_china_region=True,
huggingface_model_id="google/txgemma-9b-chat",
modelscope_model_id="AI-ModelScope/txgemma-9b-chat",
model_files_download_source=ModelFilesDownloadSource.MODELSCOPE,
@@ -79,6 +80,7 @@
supported_frameworks=[
fastapi_framework
],
+ allow_china_region=True,
huggingface_model_id="google/txgemma-27b-chat",
modelscope_model_id="AI-ModelScope/txgemma-27b-chat",
model_files_download_source=ModelFilesDownloadSource.MODELSCOPE,
diff --git a/src/emd/models/model.py b/src/emd/models/model.py
index 1e052ef1..6289e890 100644
--- a/src/emd/models/model.py
+++ b/src/emd/models/model.py
@@ -210,8 +210,10 @@ def get_model(cls ,model_id:str,update:dict = None) -> T:
return model
@classmethod
- def get_supported_models(cls) -> dict:
- return {model_id: model.model_type for model_id,model in cls.model_map.items()}
+ def get_supported_models(cls,detail=False) -> dict:
+ if not detail:
+ return {model_id: model.model_type for model_id,model in cls.model_map.items()}
+ return {model_id: model.model_dump() for model_id,model in cls.model_map.items()}
def find_current_engine(self,engine_type:str) -> dict:
supported_engines:List[Engine] = self.supported_engines
From d575c580476c32ed3eeb4f5c627f95c72af7e918 Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Mon, 7 Apr 2025 09:42:18 +0000
Subject: [PATCH 07/24] fix typo
---
src/emd/cli.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/emd/cli.py b/src/emd/cli.py
index 9212833d..ed7421e2 100644
--- a/src/emd/cli.py
+++ b/src/emd/cli.py
@@ -89,7 +89,7 @@ def list_supported_models(
] = None,
detail: Annotated[
Optional[bool],
- typer.Option("-a", "--detail", help="output model infomation in details.")
+ typer.Option("-a", "--detail", help="output model information in details.")
] = False
):
# console.print("[bold blue]Retrieving models...[/bold blue]")
From 4370be096f50186c1b95ab4d5552fe4ba8d14ec6 Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Fri, 18 Apr 2025 06:43:29 +0000
Subject: [PATCH 08/24] add some ecs parameters
---
src/emd/cfn/ecs/post_build.py | 4 ++--
src/emd/models/services.py | 3 +++
src/pipeline/deploy/deploy.py | 2 --
3 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/src/emd/cfn/ecs/post_build.py b/src/emd/cfn/ecs/post_build.py
index 4a5acc4f..6d1b8c32 100644
--- a/src/emd/cfn/ecs/post_build.py
+++ b/src/emd/cfn/ecs/post_build.py
@@ -104,9 +104,9 @@ def deploy_vpc_template(region):
vpc_id = None
subnets = None
for output in outputs:
- if output["OutputKey"] == "VPCID":
+ if output["OutputKey"] == "VPCID" and output["OutputValue"]:
vpc_id = output["OutputValue"]
- elif output["OutputKey"] == "Subnets":
+ elif output["OutputKey"] == "Subnets" and output["OutputValue"]:
subnets = output["OutputValue"]
update_parameters_file("parameters.json", {"VPCID": vpc_id, "Subnets": subnets})
return vpc_id, subnets
diff --git a/src/emd/models/services.py b/src/emd/models/services.py
index 824792f0..838ddf2c 100644
--- a/src/emd/models/services.py
+++ b/src/emd/models/services.py
@@ -92,6 +92,9 @@
"EngineType":"engine_type",
"Region": "region",
"DesiredCapacity": ValueWithDefault(name="desired_capacity",default=1),
+ "MaxSize": ValueWithDefault(name="max_size",default=1),
+ "VPCID": ValueWithDefault(name="vpc_id",default=""),
+ "Subnets": ValueWithDefault(name="subnet_ids",default=""),
"ContainerCpu": "container_cpu",
"ContainerMemory": "container_memory",
"ContainerGpu":"instance_gpu_num"
diff --git a/src/pipeline/deploy/deploy.py b/src/pipeline/deploy/deploy.py
index 42040755..ceeee6a5 100644
--- a/src/pipeline/deploy/deploy.py
+++ b/src/pipeline/deploy/deploy.py
@@ -108,9 +108,7 @@ def run(
+ "-"
+ time.strftime("%Y-%m-%d-%H-%M-%S")
)
-
role_arn = get_or_create_role(role_name, region)
-
create_sagemaker_endpoint(
region=region,
instance_type=instance_type,
From 5cb72e3185116af3996eeadd7527b5cc2197df9f Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Tue, 22 Apr 2025 06:15:01 +0000
Subject: [PATCH 09/24] add glm4-z1 models
---
src/emd/commands/deploy.py | 10 +-
src/emd/models/engines.py | 17 ++
src/emd/models/llms/glm.py | 152 +++++++++++++++++-
.../zhipu_z1_vllm_image_dockerfile | 8 +
4 files changed, 184 insertions(+), 3 deletions(-)
create mode 100644 src/pipeline/backend/convert_engine_image_to_dmaa_dockerfiles/zhipu_z1_vllm_image_dockerfile
diff --git a/src/emd/commands/deploy.py b/src/emd/commands/deploy.py
index fc92453a..3d0b001a 100644
--- a/src/emd/commands/deploy.py
+++ b/src/emd/commands/deploy.py
@@ -239,6 +239,9 @@ def deploy(
dockerfile_local_path: Annotated[
str, typer.Option("--dockerfile-local-path", help="Your custom Dockerfile path for building the model image, all files must be in the same directory")
] = None,
+ local_gpus:Annotated[
+ str, typer.Option("--local-gpus", help="Local gpu ids to deploy the model (e.g. `0,1,2`), only working with local deployment mode.")
+ ] = None,
):
if only_allow_local_deploy:
allow_local_deploy = True
@@ -389,8 +392,10 @@ def deploy(
)
if service_type == ServiceType.LOCAL:
if check_cuda_exists():
- if os.environ.get('CUDA_VISIBLE_DEVICES'):
- console.print(f"[bold blue]local gpus: {os.environ.get('CUDA_VISIBLE_DEVICES')}[/bold blue]")
+ if local_gpus is not None:
+ os.environ['CUDA_VISIBLE_DEVICES']=local_gpus
+ elif os.environ.get('CUDA_VISIBLE_DEVICES'):
+ pass
else:
gpu_num = get_gpu_num()
support_gpu_num = model.supported_instances[0].gpu_num
@@ -400,6 +405,7 @@ def deploy(
default=f"{default_gpus_str}"
).ask()
os.environ['CUDA_VISIBLE_DEVICES']=gpus_to_deploy
+ console.print(f"[bold blue]local gpus: {os.environ.get('CUDA_VISIBLE_DEVICES')}[/bold blue]")
instance_type = InstanceType.LOCAL
else:
if instance_type is None:
diff --git a/src/emd/models/engines.py b/src/emd/models/engines.py
index 0bc13595..09bdc089 100644
--- a/src/emd/models/engines.py
+++ b/src/emd/models/engines.py
@@ -165,6 +165,23 @@ class KtransformersEngine(OpenAICompitableEngine):
vllm_glm4_engine064 = vllm_engine064
+
+vllm_glm4_0414_engine082 = VllmEngine(**{
+ **vllm_qwen25vl72b_engine073.model_dump(),
+ "engine_dockerfile_config": {"VERSION":"glm_z1_and_0414"},
+ "environment_variables": "export VLLM_USE_V1=0 && export VLLM_ATTENTION_BACKEND=FLASHINFER && export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
+ "default_cli_args": "--max_model_len 16000 --max_num_seq 10 --disable-log-stats --enable-auto-tool-choice --tool-call-parser pythonic"
+})
+
+
+vllm_glm4_z1_engine082 = VllmEngine(**{
+ **vllm_qwen25vl72b_engine073.model_dump(),
+ "engine_dockerfile_config": {"VERSION":"glm_z1_and_0414"},
+ "environment_variables": "export VLLM_USE_V1=0 && export VLLM_ATTENTION_BACKEND=FLASHINFER && export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
+ "default_cli_args": "--max_model_len 16000 --max_num_seq 10 --disable-log-stats --enable-auto-tool-choice --tool-call-parser pythonic --enable-reasoning --reasoning-parser granite"
+})
+
+
vllm_glm4_wo_flashinfer_engine064 = VllmEngine(**{
**vllm_engine064.model_dump(),
# "engine_dockerfile_config": {"VERSION":"v0.6.0"},
diff --git a/src/emd/models/llms/glm.py b/src/emd/models/llms/glm.py
index faaf69ef..ea3d02bd 100644
--- a/src/emd/models/llms/glm.py
+++ b/src/emd/models/llms/glm.py
@@ -1,5 +1,10 @@
from .. import Model
-from ..engines import vllm_glm4_engine064,vllm_glm4_wo_flashinfer_engine064
+from ..engines import (
+ vllm_glm4_engine064,
+ vllm_glm4_wo_flashinfer_engine064,
+ vllm_glm4_0414_engine082,
+ vllm_glm4_z1_engine082
+)
from ..services import (
sagemaker_service,
sagemaker_async_service,
@@ -79,3 +84,148 @@
model_series=GLM4_SERIES
)
)
+
+
+Model.register(
+ dict(
+ model_id = "GLM-4-9B-0414",
+ supported_engines=[vllm_glm4_0414_engine082],
+ supported_instances=[
+ g5d12xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ allow_china_region=True,
+ supported_frameworks=[fastapi_framework],
+ huggingface_model_id="THUDM/GLM-4-9B-0414",
+ modelscope_model_id="ZhipuAI/GLM-4-9B-0414",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="GLM-4-32B-0414 series",
+ model_type=ModelType.LLM,
+ model_series=GLM4_SERIES
+ )
+)
+
+Model.register(
+ dict(
+ model_id = "GLM-4-32B-0414",
+ supported_engines=[vllm_glm4_0414_engine082],
+ supported_instances=[
+ g5d12xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ allow_china_region=True,
+ supported_frameworks=[fastapi_framework],
+ huggingface_model_id="THUDM/GLM-4-32B-0414",
+ modelscope_model_id="ZhipuAI/GLM-4-32B-0414",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="GLM-4-32B-0414 series",
+ model_type=ModelType.LLM,
+ model_series=GLM4_SERIES
+ )
+)
+
+
+
+Model.register(
+ dict(
+ model_id = "GLM-Z1-9B-0414",
+ supported_engines=[vllm_glm4_z1_engine082],
+ supported_instances=[
+ g5d12xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ allow_china_region=True,
+ supported_frameworks=[fastapi_framework],
+ huggingface_model_id="THUDM/GLM-Z1-9B-0414",
+ modelscope_model_id="ZhipuAI/GLM-Z1-9B-0414",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="GLM-4-32B-0414 series",
+ model_type=ModelType.LLM,
+ model_series=GLM4_SERIES
+ )
+)
+
+
+Model.register(
+ dict(
+ model_id = "GLM-Z1-32B-0414",
+ supported_engines=[vllm_glm4_z1_engine082],
+ supported_instances=[
+ g5d12xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ allow_china_region=True,
+ supported_frameworks=[fastapi_framework],
+ huggingface_model_id="THUDM/GLM-Z1-32B-0414",
+ modelscope_model_id="ZhipuAI/GLM-Z1-32B-0414",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="GLM-4-32B-0414 series",
+ model_type=ModelType.LLM,
+ model_series=GLM4_SERIES
+ )
+)
+
+
+Model.register(
+ dict(
+ model_id = "GLM-Z1-Rumination-32B-0414",
+ supported_engines=[vllm_glm4_z1_engine082],
+ supported_instances=[
+ g5d12xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ allow_china_region=True,
+ supported_frameworks=[fastapi_framework],
+ huggingface_model_id="THUDM/GLM-Z1-Rumination-32B-0414",
+ modelscope_model_id="ZhipuAI/GLM-Z1-Rumination-32B-0414",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="GLM-4-32B-0414 series",
+ model_type=ModelType.LLM,
+ model_series=GLM4_SERIES
+ )
+)
diff --git a/src/pipeline/backend/convert_engine_image_to_dmaa_dockerfiles/zhipu_z1_vllm_image_dockerfile b/src/pipeline/backend/convert_engine_image_to_dmaa_dockerfiles/zhipu_z1_vllm_image_dockerfile
new file mode 100644
index 00000000..229501f9
--- /dev/null
+++ b/src/pipeline/backend/convert_engine_image_to_dmaa_dockerfiles/zhipu_z1_vllm_image_dockerfile
@@ -0,0 +1,8 @@
+From vllm/vllm-openai:v0.8.4
+
+RUN git clone https://github.com/vllm-project/vllm.git && cd vllm && git fetch origin pull/16618/head:pr-16618 && VLLM_USE_PRECOMPILED=1 pip install --editable .
+
+EXPOSE 8080
+
+# Set the serve script as the entrypoint
+ENTRYPOINT ["/usr/bin/serve"]
From a17b54d613c52e657f70507401a5fc62c37a5d0c Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Tue, 22 Apr 2025 08:49:32 +0000
Subject: [PATCH 10/24] modify vllm backend
---
src/emd/models/engines.py | 6 ++++--
src/pipeline/backend/vllm/vllm_backend.py | 21 ++++++++++++++++-----
2 files changed, 20 insertions(+), 7 deletions(-)
diff --git a/src/emd/models/engines.py b/src/emd/models/engines.py
index 09bdc089..255c1a75 100644
--- a/src/emd/models/engines.py
+++ b/src/emd/models/engines.py
@@ -170,7 +170,8 @@ class KtransformersEngine(OpenAICompitableEngine):
**vllm_qwen25vl72b_engine073.model_dump(),
"engine_dockerfile_config": {"VERSION":"glm_z1_and_0414"},
"environment_variables": "export VLLM_USE_V1=0 && export VLLM_ATTENTION_BACKEND=FLASHINFER && export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
- "default_cli_args": "--max_model_len 16000 --max_num_seq 10 --disable-log-stats --enable-auto-tool-choice --tool-call-parser pythonic"
+ # "default_cli_args": "--max_model_len 16000 --max_num_seq 10 --disable-log-stats --enable-auto-tool-choice --tool-call-parser pythonic"
+ "default_cli_args": "--max_model_len 16000 --max_num_seq 10 --disable-log-stats"
})
@@ -178,7 +179,8 @@ class KtransformersEngine(OpenAICompitableEngine):
**vllm_qwen25vl72b_engine073.model_dump(),
"engine_dockerfile_config": {"VERSION":"glm_z1_and_0414"},
"environment_variables": "export VLLM_USE_V1=0 && export VLLM_ATTENTION_BACKEND=FLASHINFER && export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
- "default_cli_args": "--max_model_len 16000 --max_num_seq 10 --disable-log-stats --enable-auto-tool-choice --tool-call-parser pythonic --enable-reasoning --reasoning-parser granite"
+ # "default_cli_args": "--max_model_len 16000 --max_num_seq 10 --disable-log-stats --enable-auto-tool-choice --tool-call-parser pythonic --enable-reasoning --reasoning-parser granite"
+ "default_cli_args": "--max_model_len 16000 --max_num_seq 10 --disable-log-stats --enable-reasoning --reasoning-parser granite"
})
diff --git a/src/pipeline/backend/vllm/vllm_backend.py b/src/pipeline/backend/vllm/vllm_backend.py
index 67701ae7..d68cc669 100644
--- a/src/pipeline/backend/vllm/vllm_backend.py
+++ b/src/pipeline/backend/vllm/vllm_backend.py
@@ -2,7 +2,7 @@
import sys
import os
from emd.models.utils.constants import ModelType
-
+import inspect
from backend.backend import OpenAICompitableProxyBackendBase
from emd.utils.logger_utils import get_logger
@@ -22,6 +22,13 @@ def create_proxy_server_start_command(self,model_path):
serve_command += f" --api-key {self.api_key}"
return serve_command
+ def openai_create_helper(self,fn:callable,request:dict):
+ sig = inspect.signature(fn)
+ extra_body = request.get("extra_body",{})
+ extra_params = {k:request.pop(k) for k in list(request.keys()) if k not in sig.parameters}
+ extra_body.update(extra_params)
+ request['extra_body'] = extra_body
+ return fn(**request)
def invoke(self, request):
# Transform input to vllm format
@@ -30,7 +37,7 @@ def invoke(self, request):
logger.info(f"Chat request:{request}")
if self.model_type == ModelType.EMBEDDING:
# print('cal embedding....')
- response = self.client.embeddings.create(**request)
+ response =self.openai_create_helper(self.client.embeddings.create,request)
# print('end cal embedding....')
elif self.model_type == ModelType.RERANK:
headers = {
@@ -43,7 +50,8 @@ def invoke(self, request):
headers=headers
).json()
else:
- response = self.client.chat.completions.create(**request)
+ # response = self.client.chat.completions.create(**request)
+ response = self.openai_create_helper(self.client.chat.completions.create,request)
logger.info(f"response:{response},{request}")
if request.get("stream", False):
@@ -58,7 +66,7 @@ async def ainvoke(self, request):
logger.info(f"Chat request:{request}")
if self.model_type == ModelType.EMBEDDING:
# print('cal embedding....')
- response = await self.async_client.embeddings.create(**request)
+ response = await self.openai_create_helper(self.async_client.embeddings.create,request)
# print('end cal embedding....')
elif self.model_type == ModelType.RERANK:
headers = {
@@ -71,7 +79,10 @@ async def ainvoke(self, request):
headers=headers
).json()
else:
- response = await self.async_client.chat.completions.create(**request)
+ response = await self.openai_create_helper(
+ self.async_client.chat.completions.create,
+ request
+ )
logger.info(f"response:{response},{request}")
if request.get("stream", False):
From a17a1f460f0c8ec0bb93ab8aab8bee7f98c0d2c2 Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Tue, 29 Apr 2025 09:10:12 +0000
Subject: [PATCH 11/24] add qwen3
---
README.md | 3 +-
docs/en/best_deployment_practices.md | 17 ++
src/emd/cfn/sagemaker_realtime/template.yaml | 6 +-
src/emd/models/engines.py | 27 ++
src/emd/models/llms/qwen.py | 241 +++++++++++++++++-
src/emd/models/model_series.py | 14 +
src/emd/models/services.py | 9 +-
src/emd/models/utils/constants.py | 2 +
src/emd/models/vlms/qwen.py | 42 ++-
.../zhipu_z1_vllm_image_build.md | 12 +
10 files changed, 364 insertions(+), 9 deletions(-)
create mode 100644 src/pipeline/backend/convert_engine_image_to_dmaa_dockerfiles/zhipu_z1_vllm_image_build.md
diff --git a/README.md b/README.md
index 56b1a3df..c6fcdf0c 100644
--- a/README.md
+++ b/README.md
@@ -15,7 +15,8 @@
## 🔥 Latest News
-
+- 2025-04-29: Deploy Qwen 3 series models with [one command line](https://github.com/aws-samples/easy-model-deployer/blob/main/docs/en/best_deployment_practices.md##famous-models###Qwen-3-Series).
+- 2025-04-21: Deploy GLM Z1/0414 series models with [one command line](https://github.com/aws-samples/easy-model-deployer/blob/main/docs/en/best_deployment_practices.md##famous-models###GLM-Z1/0414-Series).
- 2025-03-17: Deploy Gemma 3 series models with [one command line](https://github.com/aws-samples/easy-model-deployer/blob/main/docs/en/best_deployment_practices.md##famous-models###gemma-3-series).
- 2025-03-06: Deploy QwQ-32B with [one command line](docs/en/best_deployment_practices.md##famous-models###qwen-series###qwq-32b).
diff --git a/docs/en/best_deployment_practices.md b/docs/en/best_deployment_practices.md
index d4747e57..e9779634 100644
--- a/docs/en/best_deployment_practices.md
+++ b/docs/en/best_deployment_practices.md
@@ -3,6 +3,23 @@
This document provides examples of best practices for deploying models using EMD for various use cases.
## Famous Models
+### Qwen 3 Series
+```
+emd deploy --model-id Qwen3-30B-A3B --instance-type g5.12xlarge --engine-type vllm --service-type sagemaker_realtime
+
+emd deploy --model-id Qwen3-32B --instance-type g5.12xlarge --engine-type vllm --service-type sagemaker_realtime
+
+emd deploy --model-id Qwen3-8B --instance-type g5.12xlarge --engine-type vllm --service-type sagemaker_realtime
+```
+
+
+### GLM Z1/0414 Series
+```
+emd deploy --model-id GLM-Z1-32B-0414 --instance-type g5.12xlarge --engine-type vllm --service-type sagemaker_realtime
+
+emd deploy --model-id GLM-4-32B-0414 --instance-type g5.12xlarge --engine-type vllm --service-type sagemaker_realtime
+```
+
### Mistral Small Series
```
diff --git a/src/emd/cfn/sagemaker_realtime/template.yaml b/src/emd/cfn/sagemaker_realtime/template.yaml
index d5ada13b..90cf3406 100644
--- a/src/emd/cfn/sagemaker_realtime/template.yaml
+++ b/src/emd/cfn/sagemaker_realtime/template.yaml
@@ -26,6 +26,10 @@ Parameters:
Region:
Type: String
Description: The region to be used for the SageMaker Endpoint
+ MinCapacity:
+ Type: Number
+ Description: The minimum capacity of the endpoint
+ Default: 1
MaxCapacity:
Type: Number
Description: The maximum capacity of the endpoint
@@ -117,7 +121,7 @@ Resources:
Type: AWS::ApplicationAutoScaling::ScalableTarget
Properties:
MaxCapacity: !Ref MaxCapacity
- MinCapacity: 1
+ MinCapacity: !Ref MinCapacity
RoleARN: !GetAtt ExecutionRole.Arn
ResourceId: !Sub "endpoint/${SageMakerEndpoint.EndpointName}/variant/AllTraffic"
ScalableDimension: "sagemaker:variant:DesiredInstanceCount"
diff --git a/src/emd/models/engines.py b/src/emd/models/engines.py
index 255c1a75..fa1ecccb 100644
--- a/src/emd/models/engines.py
+++ b/src/emd/models/engines.py
@@ -127,6 +127,25 @@ class KtransformersEngine(OpenAICompitableEngine):
"environment_variables": "export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
"default_cli_args": " --chat-template emd/models/chat_templates/qwen2vl_add_prefill_chat_template.jinja --max_model_len 16000 --disable-log-stats --limit-mm-per-prompt image=2,video=1 --max_num_seq 1 --gpu_memory_utilization 0.9"
})
+
+
+vllm_ui_tars_1_5_engin084 = VllmEngine(**{
+ **vllm_engine064.model_dump(),
+ "engine_dockerfile_config": {"VERSION":"v0.8.4"},
+ "environment_variables": "export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
+ "default_cli_args": " --max_model_len 16000 --disable-log-stats --limit-mm-per-prompt image=1,video=0 --max_num_seq 2 --gpu_memory_utilization 0.9 --enable-prefix-caching"
+})
+
+
+
+vllm_qwen3_engin084 = VllmEngine(**{
+ **vllm_engine064.model_dump(),
+ "engine_dockerfile_config": {"VERSION":"v0.8.4"},
+ "environment_variables": "export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
+ "default_cli_args": " --max_model_len 16000 --disable-log-stats --enable-reasoning --reasoning-parser deepseek_r1 --enable-auto-tool-choice --tool-call-parser hermes --enable-prefix-caching"
+})
+
+
vllm_qwen2vl72b_engine064 = VllmEngine(**{
**vllm_engine064.model_dump(),
"environment_variables": "export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
@@ -141,6 +160,14 @@ class KtransformersEngine(OpenAICompitableEngine):
"default_cli_args": " --max_model_len 25000 --disable-log-stats --limit-mm-per-prompt image=20,video=1 --max_num_seq 1 --gpu_memory_utilization 0.9"
})
+vllm_qwen25vl72b_engine084 = VllmEngine(**{
+ **vllm_engine064.model_dump(),
+ "engine_dockerfile_config": {"VERSION":"v0.8.4"},
+ "dockerfile_name":"Dockerfile_qwen25_vl",
+ "environment_variables": "export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
+ "default_cli_args": " --max_model_len 32000 --disable-log-stats --limit-mm-per-prompt image=1,video=1 --max_num_seq 1 --gpu_memory_utilization 0.9"
+})
+
vllm_qwq_engine073 = VllmEngine(**{
**vllm_qwen25vl72b_engine073.model_dump(),
"environment_variables": "export VLLM_ATTENTION_BACKEND=FLASHINFER && export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
diff --git a/src/emd/models/llms/qwen.py b/src/emd/models/llms/qwen.py
index 35a2cc1f..7ea4d3d6 100644
--- a/src/emd/models/llms/qwen.py
+++ b/src/emd/models/llms/qwen.py
@@ -8,7 +8,8 @@
tgi_qwen2d5_72b_on_inf2,
vllm_qwen2d5_72b_engine064,
vllm_qwq_engine073,
- vllm_qwq_engine082
+ vllm_qwq_engine082,
+ vllm_qwen3_engin084
)
from ..services import (
sagemaker_service,
@@ -34,7 +35,7 @@
from emd.models.utils.constants import ModelType
from emd.models.utils.constants import ModelType
from emd.models import ModelSeries
-from ..model_series import QWEN2D5_SERIES,QWEN_REASONING_MODEL
+from ..model_series import QWEN2D5_SERIES,QWEN_REASONING_MODEL,QWEN3_SERIES
Model.register(
dict(
@@ -498,3 +499,239 @@
model_series=QWEN_REASONING_MODEL
)
)
+
+
+Model.register(
+ dict(
+ model_id = "Qwen3-8B",
+ supported_engines=[vllm_qwen3_engin084],
+ supported_instances=[
+ g5d2xlarge_instance,
+ g5d4xlarge_instance,
+ g5d8xlarge_instance,
+ g5d16xlarge_instance,
+ g4dn2xlarge_instance,
+ # g5d24xlarge_instance,
+ # g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ allow_china_region=True,
+ huggingface_model_id="Qwen/Qwen3-8B",
+ modelscope_model_id="Qwen/Qwen3-8B",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="The latest series of Qwen LLMs, offers base and tuned models from 0.5B to 72B\n parameters, featuring enhanced knowledge, improved coding and math skills, better instruction\n following, long-text generation, structured data handling, 128K token context support, and\n multilingual capabilities for 29+ languages.",
+ model_type=ModelType.LLM,
+ model_series=QWEN3_SERIES
+ )
+)
+
+Model.register(
+ dict(
+ model_id = "Qwen3-0.6B",
+ supported_engines=[vllm_qwen3_engin084],
+ supported_instances=[
+ g5d2xlarge_instance,
+ g5d4xlarge_instance,
+ g5d8xlarge_instance,
+ g5d16xlarge_instance,
+ g4dn2xlarge_instance,
+ # g5d24xlarge_instance,
+ # g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ allow_china_region=True,
+ huggingface_model_id="Qwen/Qwen3-0.6B",
+ modelscope_model_id="Qwen/Qwen3-0.6B",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="The latest series of Qwen LLMs, offers base and tuned models from 0.5B to 72B\n parameters, featuring enhanced knowledge, improved coding and math skills, better instruction\n following, long-text generation, structured data handling, 128K token context support, and\n multilingual capabilities for 29+ languages.",
+ model_type=ModelType.LLM,
+ model_series=QWEN3_SERIES
+ )
+)
+
+Model.register(
+ dict(
+ model_id = "Qwen3-1.7B",
+ supported_engines=[vllm_qwen3_engin084],
+ supported_instances=[
+ g5d2xlarge_instance,
+ g5d4xlarge_instance,
+ g5d8xlarge_instance,
+ g5d16xlarge_instance,
+ g4dn2xlarge_instance,
+ # g5d24xlarge_instance,
+ # g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ allow_china_region=True,
+ huggingface_model_id="Qwen/Qwen3-1.7B",
+ modelscope_model_id="Qwen/Qwen3-1.7B",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="The latest series of Qwen LLMs, offers base and tuned models from 0.5B to 72B\n parameters, featuring enhanced knowledge, improved coding and math skills, better instruction\n following, long-text generation, structured data handling, 128K token context support, and\n multilingual capabilities for 29+ languages.",
+ model_type=ModelType.LLM,
+ model_series=QWEN3_SERIES
+ )
+)
+
+
+Model.register(
+ dict(
+ model_id = "Qwen3-4B",
+ supported_engines=[vllm_qwen3_engin084],
+ supported_instances=[
+ g5d2xlarge_instance,
+ g5d4xlarge_instance,
+ g5d8xlarge_instance,
+ g5d16xlarge_instance,
+ g4dn2xlarge_instance,
+ # g5d24xlarge_instance,
+ # g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ allow_china_region=True,
+ huggingface_model_id="Qwen/Qwen3-4B",
+ modelscope_model_id="Qwen/Qwen3-4B",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="The latest series of Qwen LLMs, offers base and tuned models from 0.5B to 72B\n parameters, featuring enhanced knowledge, improved coding and math skills, better instruction\n following, long-text generation, structured data handling, 128K token context support, and\n multilingual capabilities for 29+ languages.",
+ model_type=ModelType.LLM,
+ model_series=QWEN3_SERIES
+ )
+)
+
+
+Model.register(
+ dict(
+ model_id = "Qwen3-14B",
+ supported_engines=[vllm_qwen3_engin084],
+ supported_instances=[
+ g5d12xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ # g5d24xlarge_instance,
+ # g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ allow_china_region=True,
+ huggingface_model_id="Qwen/Qwen3-14B",
+ modelscope_model_id="Qwen/Qwen3-14B",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="The latest series of Qwen LLMs, offers base and tuned models from 0.5B to 72B\n parameters, featuring enhanced knowledge, improved coding and math skills, better instruction\n following, long-text generation, structured data handling, 128K token context support, and\n multilingual capabilities for 29+ languages.",
+ model_type=ModelType.LLM,
+ model_series=QWEN3_SERIES
+ )
+)
+
+Model.register(
+ dict(
+ model_id = "Qwen3-32B",
+ supported_engines=[vllm_qwen3_engin084],
+ supported_instances=[
+ g5d12xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ # g5d24xlarge_instance,
+ # g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ allow_china_region=True,
+ huggingface_model_id="Qwen/Qwen3-32B",
+ modelscope_model_id="Qwen/Qwen3-32B",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="The latest series of Qwen LLMs, offers base and tuned models from 0.5B to 72B\n parameters, featuring enhanced knowledge, improved coding and math skills, better instruction\n following, long-text generation, structured data handling, 128K token context support, and\n multilingual capabilities for 29+ languages.",
+ model_type=ModelType.LLM,
+ model_series=QWEN3_SERIES
+ )
+)
+
+
+Model.register(
+ dict(
+ model_id = "Qwen3-30B-A3B",
+ supported_engines=[vllm_qwen3_engin084],
+ supported_instances=[
+ g5d12xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ # g5d24xlarge_instance,
+ # g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ allow_china_region=True,
+ huggingface_model_id="Qwen/Qwen3-30B-A3B",
+ modelscope_model_id="Qwen/Qwen3-30B-A3B",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="The latest series of Qwen LLMs, offers base and tuned models from 0.5B to 72B\n parameters, featuring enhanced knowledge, improved coding and math skills, better instruction\n following, long-text generation, structured data handling, 128K token context support, and\n multilingual capabilities for 29+ languages.",
+ model_type=ModelType.LLM,
+ model_series=QWEN3_SERIES
+ )
+)
diff --git a/src/emd/models/model_series.py b/src/emd/models/model_series.py
index 896fa512..e5b16ccb 100644
--- a/src/emd/models/model_series.py
+++ b/src/emd/models/model_series.py
@@ -7,6 +7,13 @@
reference_link="https://github.com/QwenLM/Qwen2.5"
)
+QWEN3_SERIES = ModelSeries(
+ model_series_name = ModelSeriesType.QWEN3,
+ description="the latest addition to the Qwen family of large language models. These models represent our most advanced and intelligent systems to date, improving from our experience in building QwQ and Qwen2.5. We are making the weights of Qwen3 available to the public, including both dense and Mixture-of-Expert (MoE) models.",
+ reference_link="https://github.com/QwenLM/Qwen3"
+)
+
+
GLM4_SERIES = ModelSeries(
model_series_name = ModelSeriesType.GLM4,
description="The GLM-4 series includes the latest generation of pre-trained models launched by Zhipu AI.",
@@ -62,6 +69,13 @@
reference_link="https://github.com/QwenLM/Qwen2-VL"
)
+
+AGENT_SERIES = ModelSeries(
+ model_series_name=ModelSeriesType.AGENT,
+ description="""LLM or VLM models for Agentic tasks, e.g. computer-use,brower-use""",
+ reference_link=""
+)
+
INTERNVL25_SERIES = ModelSeries(
model_series_name=ModelSeriesType.INTERNVL25,
description="""InternVL2.5 is an advanced multimodal large language model (MLLM) series with parameter coverage ranging from 1B to 78B. InternVL2_5-78B is the first open-source MLLMs to achieve over 70% on the MMMU benchmark, matching the performance of leading closed-source commercial models like GPT-4o.""",
diff --git a/src/emd/models/services.py b/src/emd/models/services.py
index 7c12612a..d66b4f2d 100644
--- a/src/emd/models/services.py
+++ b/src/emd/models/services.py
@@ -16,6 +16,7 @@
"EngineType":"engine_type",
"Region":"region",
"MaxCapacity": ValueWithDefault(name="max_capacity",default=1),
+ "MinCapacity": ValueWithDefault(name="min_capacity",default=1),
"AutoScalingTargetValue": ValueWithDefault(name="auto_scaling_target_value",default=10),
"SageMakerEndpointName": ValueWithDefault(name="sagemaker_endpoint_name",default="Auto-generate")
},
@@ -36,7 +37,8 @@
"EngineType":"engine_type",
"Region":"region",
"MaxCapacity": ValueWithDefault(name="max_capacity",default=1),
- "AutoScalingTargetValue": ValueWithDefault(name="auto_scaling_target_value",default=10)
+ "MinCapacity": ValueWithDefault(name="min_capacity",default=1),
+ "AutoScalingTargetValue": ValueWithDefault(name="auto_scaling_target_value",default=10),
},
name = "Amazon SageMaker AI Real-time inference",
service_type=ServiceType.SAGEMAKER_OLDER,
@@ -55,7 +57,10 @@
"FrameWorkType":"framework_type",
"ServiceType":"service_type",
"EngineType":"engine_type",
- "Region":"region"
+ "Region":"region",
+ "MaxCapacity": ValueWithDefault(name="max_capacity",default=1),
+ "MinCapacity": ValueWithDefault(name="min_capacity",default=1),
+ "AutoScalingTargetValue": ValueWithDefault(name="auto_scaling_target_value",default=10),
},
name = "Amazon SageMaker AI Asynchronous inference with OpenAI Compatible API",
service_type=ServiceType.SAGEMAKER_ASYNC,
diff --git a/src/emd/models/utils/constants.py b/src/emd/models/utils/constants.py
index 27311173..4fde3cad 100644
--- a/src/emd/models/utils/constants.py
+++ b/src/emd/models/utils/constants.py
@@ -217,6 +217,7 @@ class ModelSeriesType(ConstantBase):
TXGEMMA = "txgemma"
MISTRAL = "mistral"
QWEN2D5 = "qwen2.5"
+ QWEN3 = "qwen3"
GLM4 = "glm4"
INTERLM2d5 = "internlm2.5"
WHISPER = "whisper"
@@ -225,6 +226,7 @@ class ModelSeriesType(ConstantBase):
BCE = "bce"
COMFYUI = "comfyui"
QWEN2VL = "qwen2vl"
+ AGENT = "agent"
INTERNVL25 = "internvl2.5"
LLAMA = "llama"
QWEN_REASONING_MODEL = "qwen reasoning model"
diff --git a/src/emd/models/vlms/qwen.py b/src/emd/models/vlms/qwen.py
index c968515a..feb6e9cb 100644
--- a/src/emd/models/vlms/qwen.py
+++ b/src/emd/models/vlms/qwen.py
@@ -2,7 +2,9 @@
from ..engines import (
vllm_qwen2vl7b_engine064,
vllm_qwen2vl72b_engine064,
- vllm_qwen25vl72b_engine073
+ vllm_qwen25vl72b_engine073,
+ vllm_ui_tars_1_5_engin084,
+ vllm_qwen25vl72b_engine084
)
from ..services import (
sagemaker_service,
@@ -23,7 +25,7 @@
local_instance
)
from emd.models.utils.constants import ModelType
-from ..model_series import QWEN2VL_SERIES,QWEN_REASONING_MODEL
+from ..model_series import QWEN2VL_SERIES,QWEN_REASONING_MODEL,AGENT_SERIES
Model.register(
@@ -57,7 +59,7 @@
Model.register(
dict(
model_id = "Qwen2.5-VL-72B-Instruct-AWQ",
- supported_engines=[vllm_qwen25vl72b_engine073],
+ supported_engines=[vllm_qwen25vl72b_engine084],
supported_instances=[
g5d12xlarge_instance,
g5d24xlarge_instance,
@@ -169,3 +171,37 @@
model_series=QWEN2VL_SERIES
)
)
+
+
+
+Model.register(
+ dict(
+ model_id = "UI-TARS-1.5-7B",
+ supported_engines=[vllm_ui_tars_1_5_engin084],
+ supported_instances=[
+ g5d2xlarge_instance,
+ g5d4xlarge_instance,
+ g5d8xlarge_instance,
+ g5d12xlarge_instance,
+ g5d16xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ g6e2xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service, sagemaker_async_service,local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ allow_china_region=True,
+ huggingface_model_id="ByteDance-Seed/UI-TARS-1.5-7B",
+ modelscope_model_id="ByteDance-Seed/UI-TARS-1.5-7B",
+ require_huggingface_token=False,
+ application_scenario="computer-use or browser-use",
+ description="The latest series of UI-TARS-1.5 from ByteDance-Seed team",
+ model_type=ModelType.VLM,
+ model_series=AGENT_SERIES
+ )
+)
diff --git a/src/pipeline/backend/convert_engine_image_to_dmaa_dockerfiles/zhipu_z1_vllm_image_build.md b/src/pipeline/backend/convert_engine_image_to_dmaa_dockerfiles/zhipu_z1_vllm_image_build.md
new file mode 100644
index 00000000..39bacd10
--- /dev/null
+++ b/src/pipeline/backend/convert_engine_image_to_dmaa_dockerfiles/zhipu_z1_vllm_image_build.md
@@ -0,0 +1,12 @@
+
+To build the current image, please first download the following repo:
+```shell
+git clone https://github.com/vllm-project/vllm.git vllm_glm_z1
+cd vllm_glm_z1 && git reset --hard fe742aef5aaf406c62cafa248068818bfe517d6e
+```
+Then run the following command to build the image:
+```shell
+#optionally specifies: --build-arg max_jobs=8 --build-arg nvcc_threads=2
+DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=8 --build-arg nvcc_threads=2 -f docker/Dockerfile . --target vllm-openai --tag vllm/vllm-openai:glm_z1_and_0414
+docker tag vllm/vllm-openai:glm_z1_and_0414 public.ecr.aws/aws-gcr-solutions/dmaa-vllm/vllm-openai:glm_z1_and_0414
+```
From 8d37586df3053939fd1de28c4c7b7f94ac53f21e Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Tue, 29 Apr 2025 13:53:36 +0000
Subject: [PATCH 12/24] fix cli bugs
---
src/emd/models/engines.py | 4 ++--
src/emd/models/llms/qwen.py | 40 ++++++++++++++++++++++++++++++++++++-
2 files changed, 41 insertions(+), 3 deletions(-)
diff --git a/src/emd/models/engines.py b/src/emd/models/engines.py
index fa1ecccb..d6bd3e13 100644
--- a/src/emd/models/engines.py
+++ b/src/emd/models/engines.py
@@ -140,9 +140,9 @@ class KtransformersEngine(OpenAICompitableEngine):
vllm_qwen3_engin084 = VllmEngine(**{
**vllm_engine064.model_dump(),
- "engine_dockerfile_config": {"VERSION":"v0.8.4"},
+ "engine_dockerfile_config": {"VERSION":"v0.8.5"},
"environment_variables": "export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
- "default_cli_args": " --max_model_len 16000 --disable-log-stats --enable-reasoning --reasoning-parser deepseek_r1 --enable-auto-tool-choice --tool-call-parser hermes --enable-prefix-caching"
+ "default_cli_args": " --max_model_len 16000 --max_num_seq 30 --disable-log-stats --enable-reasoning --reasoning-parser deepseek_r1 --enable-auto-tool-choice --tool-call-parser hermes --enable-prefix-caching"
})
diff --git a/src/emd/models/llms/qwen.py b/src/emd/models/llms/qwen.py
index 7ea4d3d6..57a7e4f5 100644
--- a/src/emd/models/llms/qwen.py
+++ b/src/emd/models/llms/qwen.py
@@ -578,7 +578,7 @@
g5d4xlarge_instance,
g5d8xlarge_instance,
g5d16xlarge_instance,
- g4dn2xlarge_instance,
+ # g4dn2xlarge_instance,
# g5d24xlarge_instance,
# g5d48xlarge_instance,
local_instance
@@ -671,6 +671,44 @@
)
)
+
+# ValueError("type fp8e4nv not supported in this architecture. The supported fp8 dtypes are ('fp8e4b15', 'fp8e5')")
+# The g5 instance may not support fp8e4nv
+# Model.register(
+# dict(
+# model_id = "Qwen3-14B-FP8",
+# supported_engines=[vllm_qwen3_engin084],
+# supported_instances=[
+# g5d2xlarge_instance,
+# g5d4xlarge_instance,
+# g5d8xlarge_instance,
+# g5d16xlarge_instance,
+# # g4dn2xlarge_instance,
+# # g5d24xlarge_instance,
+# # g5d48xlarge_instance,
+# local_instance
+# ],
+# supported_services=[
+# sagemaker_service,
+# sagemaker_async_service,
+# ecs_service,
+# local_service
+# ],
+# supported_frameworks=[
+# fastapi_framework
+# ],
+# allow_china_region=True,
+# huggingface_model_id="Qwen/Qwen3-14B-FP8",
+# modelscope_model_id="Qwen/Qwen3-14B-FP8",
+# require_huggingface_token=False,
+# application_scenario="Agent, tool use, translation, summary",
+# description="The latest series of Qwen LLMs, offers base and tuned models from 0.5B to 72B\n parameters, featuring enhanced knowledge, improved coding and math skills, better instruction\n following, long-text generation, structured data handling, 128K token context support, and\n multilingual capabilities for 29+ languages.",
+# model_type=ModelType.LLM,
+# model_series=QWEN3_SERIES
+# )
+# )
+
+
Model.register(
dict(
model_id = "Qwen3-32B",
From 1f1ab3381420cf73cfd59ca786423aefbfed11c5 Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Tue, 29 Apr 2025 14:38:40 +0000
Subject: [PATCH 13/24] fix
---
src/emd/models/engines.py | 6 ------
1 file changed, 6 deletions(-)
diff --git a/src/emd/models/engines.py b/src/emd/models/engines.py
index dacf3303..d6bd3e13 100644
--- a/src/emd/models/engines.py
+++ b/src/emd/models/engines.py
@@ -140,15 +140,9 @@ class KtransformersEngine(OpenAICompitableEngine):
vllm_qwen3_engin084 = VllmEngine(**{
**vllm_engine064.model_dump(),
-<<<<<<< HEAD
"engine_dockerfile_config": {"VERSION":"v0.8.5"},
"environment_variables": "export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
"default_cli_args": " --max_model_len 16000 --max_num_seq 30 --disable-log-stats --enable-reasoning --reasoning-parser deepseek_r1 --enable-auto-tool-choice --tool-call-parser hermes --enable-prefix-caching"
-=======
- "engine_dockerfile_config": {"VERSION":"v0.8.4"},
- "environment_variables": "export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
- "default_cli_args": " --max_model_len 16000 --disable-log-stats --enable-reasoning --reasoning-parser deepseek_r1 --enable-auto-tool-choice --tool-call-parser hermes --enable-prefix-caching"
->>>>>>> 36a49970280a935d9e4f7cf97180faa8a9477bf7
})
From 29fa1425852d81ca7d73162fb01a0a1aee39140d Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Wed, 7 May 2025 11:49:04 +0000
Subject: [PATCH 14/24] add deeseek r1/Qwen3-235B-A22B
---
src/emd/commands/deploy.py | 1 +
src/emd/models/engines.py | 7 ++++-
src/emd/models/llms/deepseek.py | 26 +++++++++++++++++
src/emd/models/llms/qwen.py | 49 +++++++++++++++++++++++++++++++++
4 files changed, 82 insertions(+), 1 deletion(-)
diff --git a/src/emd/commands/deploy.py b/src/emd/commands/deploy.py
index 3d0b001a..9314377a 100644
--- a/src/emd/commands/deploy.py
+++ b/src/emd/commands/deploy.py
@@ -399,6 +399,7 @@ def deploy(
else:
gpu_num = get_gpu_num()
support_gpu_num = model.supported_instances[0].gpu_num
+ support_gpu_num = support_gpu_num or gpu_num
default_gpus_str = ",".join([str(i) for i in range(min(gpu_num,support_gpu_num))])
gpus_to_deploy = questionary.text(
"input the local gpu ids to deploy the model (e.g. 0,1,2):",
diff --git a/src/emd/models/engines.py b/src/emd/models/engines.py
index d6bd3e13..d16ffc38 100644
--- a/src/emd/models/engines.py
+++ b/src/emd/models/engines.py
@@ -108,6 +108,11 @@ class KtransformersEngine(OpenAICompitableEngine):
vllm_deepseek_r1_distill_llama_engine071 = vllm_deepseek_r1_distill_qwen_engine071
+vllm_deepseek_r1_engine084 = VllmEngine(**{
+ **vllm_engine064.model_dump(),
+ "engine_dockerfile_config": {"VERSION":"v0.8.4"},
+ "default_cli_args": "--max_num_seq 10 --max_model_len 16000 --chat-template emd/models/chat_templates/deepseek_r1.jinja"
+})
vllm_qwen2d5_72b_engine064 = VllmEngine(**{
**vllm_engine064.model_dump(),
@@ -165,7 +170,7 @@ class KtransformersEngine(OpenAICompitableEngine):
"engine_dockerfile_config": {"VERSION":"v0.8.4"},
"dockerfile_name":"Dockerfile_qwen25_vl",
"environment_variables": "export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
- "default_cli_args": " --max_model_len 32000 --disable-log-stats --limit-mm-per-prompt image=1,video=1 --max_num_seq 1 --gpu_memory_utilization 0.9"
+ "default_cli_args": " --max_model_len 32000 --disable-log-stats --limit-mm-per-prompt image=1,video=1 --max_num_seq 1 --gpu_memory_utilization 0.7"
})
vllm_qwq_engine073 = VllmEngine(**{
diff --git a/src/emd/models/llms/deepseek.py b/src/emd/models/llms/deepseek.py
index 1359ac33..a022329d 100644
--- a/src/emd/models/llms/deepseek.py
+++ b/src/emd/models/llms/deepseek.py
@@ -7,6 +7,7 @@
llama_cpp_deepseek_r1_distill_engineb9ab0a4,
tgi_deepseek_r1_llama_70b_engine301,
ktransformers_engine,
+ vllm_deepseek_r1_engine084
)
from ..services import (
sagemaker_service,
@@ -450,6 +451,31 @@
)
)
+Model.register(
+ dict(
+ model_id = "DeepSeek-R1",
+ supported_engines=[vllm_deepseek_r1_engine084],
+ supported_instances=[
+ local_instance
+ ],
+ supported_services=[
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ allow_china_region=True,
+ need_prepare_model=False,
+ huggingface_model_id="unsloth/DeepSeek-R1",
+ modelscope_model_id="unsloth/DeepSeek-R1",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="The latest series of DeepSeek LLMs for reasoning",
+ model_type=ModelType.LLM,
+ model_series=DEEPSEEK_REASONING_MODEL
+ )
+)
+
Model.register(
dict(
model_id = "deepseek-r1-671b-4bit_gguf",
diff --git a/src/emd/models/llms/qwen.py b/src/emd/models/llms/qwen.py
index 57a7e4f5..d0d423e3 100644
--- a/src/emd/models/llms/qwen.py
+++ b/src/emd/models/llms/qwen.py
@@ -773,3 +773,52 @@
model_series=QWEN3_SERIES
)
)
+
+
+Model.register(
+ dict(
+ model_id = "Qwen3-235B-A22B",
+ supported_engines=[vllm_qwen3_engin084],
+ supported_instances=[
+ local_instance
+ ],
+ supported_services=[
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ allow_china_region=True,
+ huggingface_model_id="Qwen/Qwen3-235B-A22B",
+ modelscope_model_id="Qwen/Qwen3-235B-A22B",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="The latest series of Qwen LLMs, offers base and tuned models from 0.5B to 72B\n parameters, featuring enhanced knowledge, improved coding and math skills, better instruction\n following, long-text generation, structured data handling, 128K token context support, and\n multilingual capabilities for 29+ languages.",
+ model_type=ModelType.LLM,
+ model_series=QWEN3_SERIES
+ )
+)
+
+Model.register(
+ dict(
+ model_id = "Qwen3-235B-A22B-FP8",
+ supported_engines=[vllm_qwen3_engin084],
+ supported_instances=[
+ local_instance
+ ],
+ supported_services=[
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ allow_china_region=True,
+ huggingface_model_id="Qwen/Qwen3-235B-A22B-FP8",
+ modelscope_model_id="Qwen/Qwen3-235B-A22B-FP8",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="The latest series of Qwen LLMs, offers base and tuned models from 0.5B to 72B\n parameters, featuring enhanced knowledge, improved coding and math skills, better instruction\n following, long-text generation, structured data handling, 128K token context support, and\n multilingual capabilities for 29+ languages.",
+ model_type=ModelType.LLM,
+ model_series=QWEN3_SERIES
+ )
+)
From a546df1e5e0fec325d178b55c7a2cc908fe24268 Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Thu, 8 May 2025 08:45:14 +0000
Subject: [PATCH 15/24] fix local deploy account bug
---
src/emd/models/model.py | 14 +++++++-------
src/pipeline/deploy/build_and_push_image.py | 8 ++++++--
2 files changed, 13 insertions(+), 9 deletions(-)
diff --git a/src/emd/models/model.py b/src/emd/models/model.py
index 6289e890..197fedbe 100644
--- a/src/emd/models/model.py
+++ b/src/emd/models/model.py
@@ -375,13 +375,13 @@ def get_deploy_version_from_stack_name(cls,stack_name):
except Exception as e:
raise ValueError(f"stack_name:{stack_name} is not a valid model stack name")
- def get_image_build_account_id(self):
- current_account_id = boto3.client("sts").get_caller_identity()["Account"]
- build_image_account_id = (
- self.executable_config.current_engine.base_image_account_id or \
- current_account_id
- )
- return build_image_account_id
+ # def get_image_build_account_id(self):
+ # current_account_id = boto3.client("sts").get_caller_identity()["Account"]
+ # build_image_account_id = (
+ # self.executable_config.current_engine.base_image_account_id or \
+ # current_account_id
+ # )
+ # return build_image_account_id
def get_image_push_account_id(self):
current_account_id = boto3.client("sts").get_caller_identity()["Account"]
diff --git a/src/pipeline/deploy/build_and_push_image.py b/src/pipeline/deploy/build_and_push_image.py
index 60aafe52..ffbd4927 100644
--- a/src/pipeline/deploy/build_and_push_image.py
+++ b/src/pipeline/deploy/build_and_push_image.py
@@ -194,7 +194,11 @@ def run(
# docker build image
# get current aws account_id
- push_image_account_id = execute_model.get_image_push_account_id()
+ if instance_type == InstanceType.LOCAL:
+ push_image_account_id = "local"
+ else:
+ push_image_account_id = execute_model.get_image_push_account_id()
+
build_image_account_id = (
execute_model.executable_config.current_engine.base_image_account_id
)
@@ -325,7 +329,7 @@ def run(
logger.info(f"pushing image: {push_image_script}")
assert os.system(push_image_script) == 0
- image_uri = ecr_repo_uri
+ # image_uri = ecr_repo_uri
logger.info(f"Image URI: {ecr_repo_uri}")
parameters = {"ecr_repo_uri": ecr_repo_uri}
From ffef6b01d10a8254464c78dfd430d7c70f58a3a7 Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Thu, 8 May 2025 08:57:35 +0000
Subject: [PATCH 16/24] add qwen 3 awq models
---
src/emd/models/llms/qwen.py | 71 +++++++++++++++++++++++++++++++++++++
1 file changed, 71 insertions(+)
diff --git a/src/emd/models/llms/qwen.py b/src/emd/models/llms/qwen.py
index d0d423e3..27b6308a 100644
--- a/src/emd/models/llms/qwen.py
+++ b/src/emd/models/llms/qwen.py
@@ -639,6 +639,42 @@
)
+
+Model.register(
+ dict(
+ model_id = "Qwen3-14B-AWQ",
+ supported_engines=[vllm_qwen3_engin084],
+ supported_instances=[
+ g5d2xlarge_instance,
+ g5d4xlarge_instance,
+ g5d8xlarge_instance,
+ g5d16xlarge_instance,
+ g4dn2xlarge_instance,
+ # g5d24xlarge_instance,
+ # g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ allow_china_region=True,
+ huggingface_model_id="Qwen/Qwen3-14B-AWQ",
+ modelscope_model_id="Qwen/Qwen3-14B-AWQ",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="The latest series of Qwen LLMs, offers base and tuned models from 0.5B to 72B\n parameters, featuring enhanced knowledge, improved coding and math skills, better instruction\n following, long-text generation, structured data handling, 128K token context support, and\n multilingual capabilities for 29+ languages.",
+ model_type=ModelType.LLM,
+ model_series=QWEN3_SERIES
+ )
+)
+
+
Model.register(
dict(
model_id = "Qwen3-14B",
@@ -709,6 +745,41 @@
# )
+
+Model.register(
+ dict(
+ model_id = "Qwen3-32B-AWQ",
+ supported_engines=[vllm_qwen3_engin084],
+ supported_instances=[
+ g5d12xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ # g5d24xlarge_instance,
+ # g5d48xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ allow_china_region=True,
+ huggingface_model_id="Qwen/Qwen3-32B-AWQ",
+ modelscope_model_id="Qwen/Qwen3-32B-AWQ",
+ require_huggingface_token=False,
+ application_scenario="Agent, tool use, translation, summary",
+ description="The latest series of Qwen LLMs, offers base and tuned models from 0.5B to 72B\n parameters, featuring enhanced knowledge, improved coding and math skills, better instruction\n following, long-text generation, structured data handling, 128K token context support, and\n multilingual capabilities for 29+ languages.",
+ model_type=ModelType.LLM,
+ model_series=QWEN3_SERIES
+ )
+)
+
+
+
Model.register(
dict(
model_id = "Qwen3-32B",
From 7047cae1fc47468bd748a684abc96607c9cf4375 Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Thu, 8 May 2025 12:47:50 +0000
Subject: [PATCH 17/24] fix serialize_utils bugs
---
src/emd/models/utils/serialize_utils.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/src/emd/models/utils/serialize_utils.py b/src/emd/models/utils/serialize_utils.py
index e522bc05..44352125 100644
--- a/src/emd/models/utils/serialize_utils.py
+++ b/src/emd/models/utils/serialize_utils.py
@@ -2,14 +2,15 @@
import argparse
JSON_DOUBLE_QUOTE_REPLACE = ''
+JSON_SINGLE_QUOTE_REPLACE = '<*>'
def load_extra_params(string):
- string = string.replace(JSON_DOUBLE_QUOTE_REPLACE,'"')
+ string = string.replace(JSON_DOUBLE_QUOTE_REPLACE,'"').replace(JSON_SINGLE_QUOTE_REPLACE,"'")
try:
return json.loads(string)
except json.JSONDecodeError:
raise argparse.ArgumentTypeError(f"Invalid dictionary format: {string}")
def dump_extra_params(d:dict):
- return json.dumps(d).replace('"', JSON_DOUBLE_QUOTE_REPLACE)
+ return json.dumps(d).replace("'", JSON_SINGLE_QUOTE_REPLACE).replace('"', JSON_DOUBLE_QUOTE_REPLACE)
From fb9aab63c6ddb43b49c7cd611a7d8554154f44f3 Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Wed, 14 May 2025 05:55:59 +0000
Subject: [PATCH 18/24] modify qwen3 deployment
---
src/emd/models/engines.py | 4 ++--
src/emd/models/vlms/qwen.py | 43 ++++++++++++++++++++++++++++++++++++-
2 files changed, 44 insertions(+), 3 deletions(-)
diff --git a/src/emd/models/engines.py b/src/emd/models/engines.py
index d16ffc38..f451b8a6 100644
--- a/src/emd/models/engines.py
+++ b/src/emd/models/engines.py
@@ -145,9 +145,9 @@ class KtransformersEngine(OpenAICompitableEngine):
vllm_qwen3_engin084 = VllmEngine(**{
**vllm_engine064.model_dump(),
- "engine_dockerfile_config": {"VERSION":"v0.8.5"},
+ "engine_dockerfile_config": {"VERSION":"v0.8.5.dev649_g0189a65a2"},
"environment_variables": "export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True",
- "default_cli_args": " --max_model_len 16000 --max_num_seq 30 --disable-log-stats --enable-reasoning --reasoning-parser deepseek_r1 --enable-auto-tool-choice --tool-call-parser hermes --enable-prefix-caching"
+ "default_cli_args": " --max_model_len 16000 --max_num_seq 30 --disable-log-stats --enable-reasoning --reasoning-parser qwen3 --enable-auto-tool-choice --tool-call-parser hermes --enable-prefix-caching"
})
diff --git a/src/emd/models/vlms/qwen.py b/src/emd/models/vlms/qwen.py
index feb6e9cb..e0dae207 100644
--- a/src/emd/models/vlms/qwen.py
+++ b/src/emd/models/vlms/qwen.py
@@ -114,6 +114,44 @@
)
)
+
+
+Model.register(
+ dict(
+ model_id = "Qwen2.5-VL-7B-Instruct",
+ supported_engines=[vllm_qwen25vl72b_engine073],
+ supported_instances=[
+ g5d2xlarge_instance,
+ g5d4xlarge_instance,
+ g5d8xlarge_instance,
+ g5d12xlarge_instance,
+ g5d16xlarge_instance,
+ g5d24xlarge_instance,
+ g5d48xlarge_instance,
+ g6e2xlarge_instance,
+ local_instance
+ ],
+ supported_services=[
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
+ ],
+ supported_frameworks=[
+ fastapi_framework
+ ],
+ allow_china_region=True,
+ huggingface_model_id="Qwen/Qwen2.5-VL-7B-Instruct",
+ modelscope_model_id="Qwen/Qwen2.5-VL-7B-Instruct",
+ require_huggingface_token=False,
+ application_scenario="vision llms for image understanding",
+ description="The latest series of Qwen2.5 VL",
+ model_type=ModelType.VLM,
+ model_series=QWEN2VL_SERIES
+ )
+)
+
+
Model.register(
dict(
model_id = "QVQ-72B-Preview-AWQ",
@@ -156,7 +194,10 @@
local_instance
],
supported_services=[
- sagemaker_service, sagemaker_async_service,local_service
+ sagemaker_service,
+ sagemaker_async_service,
+ ecs_service,
+ local_service
],
supported_frameworks=[
fastapi_framework
From dd8edb541c30b6b8a1546980ca462a52e3ab6923 Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Thu, 15 May 2025 02:57:19 +0000
Subject: [PATCH 19/24] modify docs
---
docs/en/best_deployment_practices.md | 6 +++---
docs/en/commands.md | 2 +-
src/pipeline/backend/tests/vllm_tester.py | 4 ++--
src/pipeline/pipeline.sh | 4 ++--
tests/sdk_tests/deploy_tests/glm_test.py | 2 +-
tests/sdk_tests/deploy_tests/internlm_test.py | 2 +-
tests/sdk_tests/deploy_tests/qwen_test.py | 6 +++---
7 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/docs/en/best_deployment_practices.md b/docs/en/best_deployment_practices.md
index e1784672..667cdb51 100644
--- a/docs/en/best_deployment_practices.md
+++ b/docs/en/best_deployment_practices.md
@@ -65,7 +65,7 @@ To enable longer context windows, use the `--extra-params` option with engine-sp
```bash
emd deploy --model-id Qwen2.5-7B-Instruct --instance-type g5.4xlarge --engine-type vllm --service-type sagemaker_realtime --extra-params '{
"engine_params": {
- "vllm_cli_args": "--max_model_len 16000 --max_num_seqs 4"
+ "cli_args": "--max_model_len 16000 --max_num_seqs 4"
}
}'
```
@@ -202,13 +202,13 @@ Engine parameters control the behavior of the inference engine.
```json
{
"engine_params": {
- "vllm_cli_args": "--max_model_len 16000 --max_num_seqs 4 --gpu_memory_utilization 0.9",
+ "cli_args": "--max_model_len 16000 --max_num_seqs 4 --gpu_memory_utilization 0.9",
"environment_variables": "export VLLM_ATTENTION_BACKEND=FLASHINFER && export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True"
}
}
```
-- `vllm_cli_args`: Command line arguments specific to vLLM
+- `cli_args`: Command line arguments specific to vLLM
- Common vLLM parameters:
- `--max_model_len`: Maximum context length
- `--max_num_seqs`: Maximum number of sequences
diff --git a/docs/en/commands.md b/docs/en/commands.md
index bdd1f0fb..7e2881f2 100644
--- a/docs/en/commands.md
+++ b/docs/en/commands.md
@@ -87,7 +87,7 @@ emd deploy --allow-local-deploy
Deploy with custom parameters:
```bash
-emd deploy --model-id Qwen2.5-7B-Instruct --extra-params '{"engine_params": {"vllm_cli_args": "--max_model_len 16000 --max_num_seqs 4"}}'
+emd deploy --model-id Qwen2.5-7B-Instruct --extra-params '{"engine_params": {"cli_args": "--max_model_len 16000 --max_num_seqs 4"}}'
```
### status
diff --git a/src/pipeline/backend/tests/vllm_tester.py b/src/pipeline/backend/tests/vllm_tester.py
index 6784268f..ca3e7a9f 100644
--- a/src/pipeline/backend/tests/vllm_tester.py
+++ b/src/pipeline/backend/tests/vllm_tester.py
@@ -19,7 +19,7 @@ def setUpClass(self):
service_type = "sagemaker"
framework_type = "fastapi"
model_s3_bucket = "emd-us-east-1-bucket-75c6f785084f4fd998da560a0a6190fc"
- vllm_cli_args = "--max_model_len 4096"
+ cli_args = "--max_model_len 4096"
# model_id = "Qwen2.5-0.5B-Instruct"
model_id = "bge-m3"
model = Model.get_model(model_id)
@@ -30,7 +30,7 @@ def setUpClass(self):
current_service=model.find_current_service(service_type),
current_framework=model.find_current_framework(framework_type),
model_s3_bucket=model_s3_bucket,
- vllm_cli_args=vllm_cli_args,
+ cli_args=cli_args,
)
self.execute_model = model.convert_to_execute_model(executable_config)
diff --git a/src/pipeline/pipeline.sh b/src/pipeline/pipeline.sh
index fc171d4d..98e5e5b9 100644
--- a/src/pipeline/pipeline.sh
+++ b/src/pipeline/pipeline.sh
@@ -24,7 +24,7 @@ gpu_num=1
instance_type=g5.12xlarge
# python deploy/prepare_model.py --region $region --model_id $model_id --model_s3_bucket $model_s3_bucket || { echo "Failed to prepare model"; exit 1; }
-# python deploy/build_and_push_image.py --region $region --model_id $model_id --backend_type $backend_type --gpu_num $gpu_num --instance_type $instance_type --model_s3_bucket $model_s3_bucket --vllm_cli_args "--max_model_len 4096" || { echo "Failed to build and push image"; exit 1; }
+# python deploy/build_and_push_image.py --region $region --model_id $model_id --backend_type $backend_type --gpu_num $gpu_num --instance_type $instance_type --model_s3_bucket $model_s3_bucket --cli_args "--max_model_len 4096" || { echo "Failed to build and push image"; exit 1; }
# python deploy/deploy.py --region $region --instance_type $instance_type --model_id $model_id --backend_type $backend_type --service $service --gpu_num $gpu_num || { echo "Failed to deploy"; exit 1; }
python pipeline.py \
@@ -39,6 +39,6 @@ python pipeline.py \
--role_name SageMakerExecutionRoleTest6 \
--skip_image_build \
--skip_deploy \
- --vllm_cli_args "--max_num_seqs 20 --max_model_len 16000 --disable-log-stats"
+ --cli_args "--max_num_seqs 20 --max_model_len 16000 --disable-log-stats"
echo "Pipeline executed successfully"
diff --git a/tests/sdk_tests/deploy_tests/glm_test.py b/tests/sdk_tests/deploy_tests/glm_test.py
index 9a4005a8..3d8503fe 100644
--- a/tests/sdk_tests/deploy_tests/glm_test.py
+++ b/tests/sdk_tests/deploy_tests/glm_test.py
@@ -7,7 +7,7 @@
service_type="sagemaker",
region="us-west-2",
extra_params={
- "vllm_cli_args":"--max_num_seqs 4 --max_model_len 16000 --disable-log-stats"
+ "cli_args":"--max_num_seqs 4 --max_model_len 16000 --disable-log-stats"
},
force_env_stack_update=True
)
diff --git a/tests/sdk_tests/deploy_tests/internlm_test.py b/tests/sdk_tests/deploy_tests/internlm_test.py
index 5b9226e8..6b829be9 100644
--- a/tests/sdk_tests/deploy_tests/internlm_test.py
+++ b/tests/sdk_tests/deploy_tests/internlm_test.py
@@ -7,6 +7,6 @@
service_type="sagemaker",
region="us-west-2",
extra_params={
- "vllm_cli_args":"--max_num_seqs 4 --max_model_len 16000 --disable-log-stats"
+ "cli_args":"--max_num_seqs 4 --max_model_len 16000 --disable-log-stats"
}
)
diff --git a/tests/sdk_tests/deploy_tests/qwen_test.py b/tests/sdk_tests/deploy_tests/qwen_test.py
index 061ccae0..9c82564c 100644
--- a/tests/sdk_tests/deploy_tests/qwen_test.py
+++ b/tests/sdk_tests/deploy_tests/qwen_test.py
@@ -8,7 +8,7 @@
service_type="sagemaker",
region="us-west-2",
# extra_params={
- # "vllm_cli_args":"--max_num_seqs 20 --max_model_len 16000 --disable-log-stats"
+ # "cli_args":"--max_num_seqs 20 --max_model_len 16000 --disable-log-stats"
# }
)
@@ -20,7 +20,7 @@
# service_type="sagemaker",
# region="us-west-2",
# extra_params={
-# "vllm_cli_args":"--max_num_seqs 20 --max_model_len 16000 --disable-log-stats"
+# "cli_args":"--max_num_seqs 20 --max_model_len 16000 --disable-log-stats"
# }
# )
# deploy(
@@ -31,6 +31,6 @@
# service_type="sagemaker",
# region="us-west-2",
# extra_params={
-# "vllm_cli_args":"--max_num_seqs 20 --max_model_len 16000 --disable-log-stats"
+# "cli_args":"--max_num_seqs 20 --max_model_len 16000 --disable-log-stats"
# }
# )
From 72f2062effbf09bd888e7f89d104516bafee087a Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Wed, 16 Jul 2025 13:39:56 +0000
Subject: [PATCH 20/24] modify qwen3 engine;add strands client test
---
src/emd/models/llms/qwen.py | 5 +++--
.../client_tests/strands_agents_test.py | 21 +++++++++++++++++++
2 files changed, 24 insertions(+), 2 deletions(-)
create mode 100644 tests/sdk_tests/client_tests/strands_agents_test.py
diff --git a/src/emd/models/llms/qwen.py b/src/emd/models/llms/qwen.py
index 27b6308a..738fa5a8 100644
--- a/src/emd/models/llms/qwen.py
+++ b/src/emd/models/llms/qwen.py
@@ -9,7 +9,8 @@
vllm_qwen2d5_72b_engine064,
vllm_qwq_engine073,
vllm_qwq_engine082,
- vllm_qwen3_engin084
+ vllm_qwen3_engin084,
+ vllm_qwen3_engin091
)
from ..services import (
sagemaker_service,
@@ -504,7 +505,7 @@
Model.register(
dict(
model_id = "Qwen3-8B",
- supported_engines=[vllm_qwen3_engin084],
+ supported_engines=[vllm_qwen3_engin091],
supported_instances=[
g5d2xlarge_instance,
g5d4xlarge_instance,
diff --git a/tests/sdk_tests/client_tests/strands_agents_test.py b/tests/sdk_tests/client_tests/strands_agents_test.py
new file mode 100644
index 00000000..0c132ba0
--- /dev/null
+++ b/tests/sdk_tests/client_tests/strands_agents_test.py
@@ -0,0 +1,21 @@
+from strands import Agent
+from strands.models.openai import OpenAIModel
+from strands_tools import calculator, current_time
+import logging
+
+model = OpenAIModel(
+ client_args={
+ "api_key": "xxx",
+ "base_url": "http://localhost:8080/v1/",
+ },
+ # **model_config
+ model_id="Qwen3-8B",
+ params={
+ "extra_body": {"chat_template_kwargs": {"enable_thinking": False}}
+ }
+)
+
+
+agent = Agent(model=model, tools=[calculator, current_time])
+response = agent("现在几点")
+print(response)
From 0bfa1465f244ac4d8d0f911ad108a4959dc33a15 Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Thu, 17 Jul 2025 01:37:40 +0000
Subject: [PATCH 21/24] modify engine
---
src/emd/models/llms/qwen.py | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/src/emd/models/llms/qwen.py b/src/emd/models/llms/qwen.py
index 738fa5a8..0bcc080b 100644
--- a/src/emd/models/llms/qwen.py
+++ b/src/emd/models/llms/qwen.py
@@ -9,7 +9,7 @@
vllm_qwen2d5_72b_engine064,
vllm_qwq_engine073,
vllm_qwq_engine082,
- vllm_qwen3_engin084,
+ vllm_qwen3_engin091,
vllm_qwen3_engin091
)
from ..services import (
@@ -539,7 +539,7 @@
Model.register(
dict(
model_id = "Qwen3-0.6B",
- supported_engines=[vllm_qwen3_engin084],
+ supported_engines=[vllm_qwen3_engin091],
supported_instances=[
g5d2xlarge_instance,
g5d4xlarge_instance,
@@ -573,7 +573,7 @@
Model.register(
dict(
model_id = "Qwen3-1.7B",
- supported_engines=[vllm_qwen3_engin084],
+ supported_engines=[vllm_qwen3_engin091],
supported_instances=[
g5d2xlarge_instance,
g5d4xlarge_instance,
@@ -608,7 +608,7 @@
Model.register(
dict(
model_id = "Qwen3-4B",
- supported_engines=[vllm_qwen3_engin084],
+ supported_engines=[vllm_qwen3_engin091],
supported_instances=[
g5d2xlarge_instance,
g5d4xlarge_instance,
@@ -644,7 +644,7 @@
Model.register(
dict(
model_id = "Qwen3-14B-AWQ",
- supported_engines=[vllm_qwen3_engin084],
+ supported_engines=[vllm_qwen3_engin091],
supported_instances=[
g5d2xlarge_instance,
g5d4xlarge_instance,
@@ -679,7 +679,7 @@
Model.register(
dict(
model_id = "Qwen3-14B",
- supported_engines=[vllm_qwen3_engin084],
+ supported_engines=[vllm_qwen3_engin091],
supported_instances=[
g5d12xlarge_instance,
g5d24xlarge_instance,
@@ -714,7 +714,7 @@
# Model.register(
# dict(
# model_id = "Qwen3-14B-FP8",
-# supported_engines=[vllm_qwen3_engin084],
+# supported_engines=[vllm_qwen3_engin091],
# supported_instances=[
# g5d2xlarge_instance,
# g5d4xlarge_instance,
@@ -750,7 +750,7 @@
Model.register(
dict(
model_id = "Qwen3-32B-AWQ",
- supported_engines=[vllm_qwen3_engin084],
+ supported_engines=[vllm_qwen3_engin091],
supported_instances=[
g5d12xlarge_instance,
g5d24xlarge_instance,
@@ -784,7 +784,7 @@
Model.register(
dict(
model_id = "Qwen3-32B",
- supported_engines=[vllm_qwen3_engin084],
+ supported_engines=[vllm_qwen3_engin091],
supported_instances=[
g5d12xlarge_instance,
g5d24xlarge_instance,
@@ -817,7 +817,7 @@
Model.register(
dict(
model_id = "Qwen3-30B-A3B",
- supported_engines=[vllm_qwen3_engin084],
+ supported_engines=[vllm_qwen3_engin091],
supported_instances=[
g5d12xlarge_instance,
g5d24xlarge_instance,
@@ -850,7 +850,7 @@
Model.register(
dict(
model_id = "Qwen3-235B-A22B",
- supported_engines=[vllm_qwen3_engin084],
+ supported_engines=[vllm_qwen3_engin091],
supported_instances=[
local_instance
],
@@ -874,7 +874,7 @@
Model.register(
dict(
model_id = "Qwen3-235B-A22B-FP8",
- supported_engines=[vllm_qwen3_engin084],
+ supported_engines=[vllm_qwen3_engin091],
supported_instances=[
local_instance
],
From 223ce70a6e0be0765ff803560f46b5a17584c80d Mon Sep 17 00:00:00 2001
From: zhouxss
Date: Thu, 17 Jul 2025 01:43:41 +0000
Subject: [PATCH 22/24] merge
---
docs/en/api.md | 4 ++++
docs/images/header.jpg | Bin 348190 -> 571413 bytes
src/emd/constants.py | 4 ++--
src/emd/models/llms/qwen.py | 1 -
src/emd/models/model.py | 2 +-
src/emd/utils/aws_service_utils.py | 2 +-
src/emd/utils/profile_manager.py | 5 +++--
.../transformers_embedding_backend.py | 3 +--
.../llm/transformer_llm_backend.py | 3 +--
.../rerank/transformers_rerank_backend.py | 3 +--
src/pipeline/framework/fast_api/fast_api.py | 2 +-
src/pipeline/utils/aws_service_utils.py | 5 +----
12 files changed, 16 insertions(+), 18 deletions(-)
diff --git a/docs/en/api.md b/docs/en/api.md
index a46b97bb..1782a010 100644
--- a/docs/en/api.md
+++ b/docs/en/api.md
@@ -134,6 +134,8 @@ print()
## Embeddings
+> Some embedding models may have additional parameters or usage guidelines specified in their official documentation. For model-specific details, please refer to the provider's documentation.
+
Get vector representations of text.
**Endpoint:** `POST /v1/embeddings`
@@ -188,6 +190,8 @@ print(f"Generated {len(response.data)} embeddings")
## Rerank
+> Some reranking models may have additional parameters or usage guidelines specified in their official documentation. For model-specific details, please refer to the provider's documentation.
+
Rerank a list of documents based on their relevance to a query.
**Endpoint:** `POST /v1/rerank`
diff --git a/docs/images/header.jpg b/docs/images/header.jpg
index e7e299ec71855a51e1a87049177368483c597e6a..39a1917df6e5b8cded407db1ab42bf6e6beb0a04 100644
GIT binary patch
literal 571413
zcmb@t1y~hb*Ec@r96BWqU4qm>Qo6%KHypZ4QW_DF5D`5h9ZE=dcPN6ADk2R6BHbV$
zo&Q1a`+lDH`QG<`z1Mer12glRHEXZE*4le!ueE2N%kj%u;1kl{$pHW~Gyntu05|{^
zgcN{*2m<~A5E=mUHwFL$2<<=E0>beZ4-^2PWdGuiZ~`#?;s<5^-eUf`{&U6#PuL&@
zgHJ$!KNj)_L!dAq^KUvoA73WqUlKE+e@n#W$b|ikF?n*a{^I#v$<^U~Y*8srnu8)_ll01`@gZ^(zT~
zpRe%0wu8|tPXNFW_m!-F(f?P7#LnK^7WAVw$ZcciZR-Z&TG0HrwjN$S0D$EI(pm2L
zcwFHG5R-d>3WB)s3g7vI-(F#xKe+!lkG`$~$g==87lF04*Bt;L+6L)Neztc&ImFjM
z%y-w$=`M(6K`ia&=57b#5D;^KJYb!~SMo(|{!PxEf5X<+Hvh=9w!ZTZ{$mSR60G>P
zledSf_5I%$|1S@2uD+nZesApHk-*VQSrwUeU(ul2Kk{Jw%!V$F98Y=OM1KNsr*IPwX?ph24Xsp?r_&t^Q!Hycpf=8DPHk_
z{)4`B@=?(TF%^gl?Y)tDfAO?=x$FL2@2G>9g4|#96>BdL{i*lD*9CP&2j#;^9qm>A
zk`LnvaMU*h>3AUhwx5%MCWwhZtm*Bdccl-g1I#SIQQ?aIo5#yn_sW(VAl=LUu97^6
zLAzi{4ql2^ez@WP!Q`nUd|EwF9-yK;Z|
z2bN^-ZFHsEl@CYl>~AAM4BC!?V}BQQWiLp_pz`pMzp4u?i-GWP{r#L=(dF%3m9FT_
zAlCQx)BC%upO2UR6&=(Q(d{ro?u
zS2U-8Xn*8!0}j7S`2CjR0xW^Gx&dl{D|qb-VtcU6-x?SI3n0bmpX-%?&OvHbe$3kM=#!|*o#Nx!D!H~v~z)%N|vfwU&A&MdP
zw;liD$6Uf3z?{Pz!JNVTX6f|E>p%Ra0nC6F{qd_k=<|Q`4+a}3QwT#ILkg4*O2(kT
zpaHls1i^abFl4}*g+QvpUlv|@Z$fmYy71F>?y1U)(v|P`v`jrYXF#Ft*}N|
zFRbMX|5d-@@7BHZPi?xt^>G2)SL4qW4*>kq`8}>cD0=@z3;O{8QmvPlmv8<>
zQ*8x+DiQFN(DW}FhbRD$-~&KUldZ3p-yeAxS0_yH%|Q%4ebfL0zyff9_6PvNfCL~5
zAOTfC3(yCQ0SmCdI)eS&3%Caa0-?YY;5iTjBmyZw29N_30%bro&;Yyz-UD60XP_S#
z0VcuTvk0sM+rR;E3V}edA%qYz2sMNe!V2Mn2tY(3(hwv>9ij^{hFC%FK<+|(A%T!E
z$a6>>Bn6TMDS}i%njr5XA0Y#fZ;)BYDr6UO0);^dpyW_`C>xX)Dhic@szLRk=FmG(
zPiO%2F*FML5}FMyh1NsgLq9`Dpfk{Q=spa9;ln6k*J0c+QJ4Zu3uXdyfO)}!U}#tp
zEE`r1YX;kS2sQ)TfE{CCVUS@kVQ_;zSQ!I_VT<935rh$mk%Cc(QIFAqF@!OTv5j$o
zNr*{@$%QG7se);Qc?a_z<`c{$%zVr`%ueuGTEslS!os4!V#g8zpG9LVXRHTUkyz
zlY~=@^A2YaXBp=d_ZluMt~jn1t{v`u+-Tff+-BUbxQn=_cqDl2cv5)!c+PmCcrWqF
z@jCIQ@P6Ur<1^!n;p^Z#;fLVA#IMBf!JoxHCLkq15GW9s5%>~B5fl)#5qu-~MMy}<
zMkq^YLg+&nMOZ}GPB=|?L_|i!L!?4vM-)tyN>ooYNVG+aOUz0vM{GeHK%7KeL;RI^
z;~MTYwrdL4tgi)KOTE@~ZS2}U2^k4Li57_)NhC=z$w!hEQfyK-QY5Jz>0{Cy(st5$
zG8h>PnF5(DSr}O^Str>dITkrPxeB>6`E&9z@;>ryI4N8ZZUFa(r@~v{GZZiiHVPFA
zSBhwgYKl>c6H0nYIZAuVXO!iXgOrCD
z1dR^OJ(|}voirP??@Mm+k>w4qY$ZK0O1y61@lgEBbc&4F(DZ
zSq5i@1crAEtBhof(u_`w35>0bYfNw^IVM-8mrNZ@+t=x?D_{4$o^`$N`YAIzGm7~U
za|QEv7CaVF76+CDmJcl3HyCed+z7l;a$|xOmsOP2ku{mMoAr>5oz0Lff~|pVg`J9B
zg*|}1gnf#GkVA&UlOvmBm=lXrl+%SXowFYSMF=4r5vho;Tu?4yE+?*Zu7R7FH^pw=
zy_tP;jGKU4j@ysBgnO2Uf=8X_5l;ip7ViyS6W&z&0%W=v%%azJ)$@9s3$=AsrDu^pQRA{>myRCE^
zeY+1yibNq(kkg9Higy%C6t|UxlpZLxDPt!j)|>GJCa>UQgq
z=~?O(>mBIJ>qqNPqBu~#s1F9j24)6D28V{X4dV=FjChS68htjVF?KR;G{H78Fv&OB
zH$|Eznl73NnLRNZHD@>XH~(lsW8rGiVo7XiZCPW5X=P|tY;|U>Wu0xkZ=+(9X0vUp
zV4G~aZYOJ(V7F>7Wgln1>>%Y3=df}|>Q4NfRYw`eB*zUW1*a6J9cN|d4Cg}^Etdk9
zOIJhJ3O5`#E4QY*q<3BJcDOUR`@0W#aCtoOnDG?%O!VCLQuE69hIpHMH~Nr+fqt(q
zhi|y=te>=Bs^8H)gL^gpB>s2(d+#IeKfAvipcs(%0ONt}gAak%149F6AId$<3W5aL
z1bql*34R>B7@`zX7>XC_68hy4-=nxkdtruQ&5!9HhdiDSR|+qALiEJz$ykI$M8;E$
zr%q4%p51!(>e&U_4*lsl-}9vBXOXs%pI-34c=_To$|0&RS~&W33|7qDn9*37*uprH
zxPZ9%c#Zh_1f~RZ!d{|f;>RSxq>N;|WZ&eOm+CJYU)^{W^Xe?cF=ZrGKD8o^F6~*`
zVY*%V;A`2}l(Oox*|L*zuyXuzmU0bpyYoczit=glpXZ+!+%1?X
z)Gh2N5-BPwrYnvvftL7|td^RU_Lto*Yb@t3IuPcwPxq`c|%0Syhczt5tuf5v?h&
zWvxxCBdbH#L+bC>Z#OtL%rqJ|4m7DYeQ1_wu6x7trr_=Mx2Y}gmgskQ?;gLqYz=7L
zZ}Vu|cz@^p{0GYqlkGu(;A9C$YuqU|p
zVPAWH`oQhr^bmbSeN=cXdfa_tc(QzY?+p9wX{Lb$GLSQ=e8~r)|8~&3+{=O#+0Oepzud@M0H)8-$
znF|1HpnPI80MI4`0DUn4dh6Cd`(NVz&k!%lUE=cnqfbR~r=khe4rG7zPa7FfcK$HcTwc-y1lF|K7mS8r*QMHr#)<
z-=(2Y4DcTp3lr-f`u`eSeg|hILN40?5*&;O8~_Gk2B0Jm7zyOE1CkB;6!W)&f4da`
z^#m2e#=*tIzXm`dFc=gAh6(Bmaz4D82*Du1B)!frhef7ijm_*yE)W`>chnIyrm$
z`1;-RzaJ3xIQ&V((`V?o_=Loy*^aCo0>ngcXW1j_k8>`
zI5a#mIyU}oVs>tRVR31BWp!<5_t)P3!Qs*I2{<$J#~kqZrw#wj9um+VCK_^yQN5_i2r|<{$
z+^gS3xUTB2ypnGvGaL6Tol0M01UxeO#A7pbdXio2>48&wNf4eEb9
zbPCsWge2%70kG)5@3%)oK#G@_^p#fZwExKYEsI#yL}4&~oX~*f6Eg2{$ka}!L2xVM
z(20KHf)wx$M+#xLs-OAOP%jUV&d0FQ7tBEuxNr-y7ZjM-iz#EL5IK};Aeor<@-mT)
zL5pfS3Wo@jttUOuf}TRoSz6{o+@YWPBQ!-=xy4WB9*`>O>U$0+R)zY>G;;3aDTm4m
z2>~2yh))mKO2yID837?|MS6Ki-2zo2=gNEi56z>u@bV08PalQRk`iSX6k?mau8pMs
z_<+cwjz`J)(d%@V%C_p4aQ4BH;iAD1-p-4%phQ<6xS|+|EpPz$H1^=ABraf|&L4^|
zbHU6CcWkHtsznxH+jQ21y?QaKE%WrkMS+4M&NmEyY!Qdr=&AZ
zpCR}@r{Tqlx~AN;GLDWMZdEM>{h`mqL+FHo75DJ{UV%Or!j0~r;MuipLPyx@>gX#i
z%{o=?4a~>u2HGKr;%mf0Y>Va1&OM7IlwKojNPc?1A!3snX~XgAR)19gCx5J(25xB)
z?RmismE3e^lQO!eroc#u+$suQBrtB0s645oWAe(VN^b$B{TUtO3m;OjCL2J)8_C`e
zk1VX}Tdg^-UBe=jfUuH(|E#!Jmegk4Q&3@2Dn9%^bOzP#bAK^1YQ%;V%ezxgvyS^7
zB=A~*?udUn%EQ@1jvAiFca!K*B-+uw6KMjm$N8>j=U1$9So|S|SV)yPE8(-wa3X(;Ri|S=FhDdir?!afV7@tRmQ)LN%6>Byr$Jqyz`Hm|3{Jx~1`No@A115XwrJDEHdCHH8
z>V6P*+iC>_pgrEN8n=ZFmMN|jLdqU>?B$d(9b|`8Nh7K0i}Ac-hy`&{a^-=-d)ypM
zd&SyT#zmo2#h=;;^w#VCdNd7@;N$35a_t)AD_lc-xVERTW$*~>*o9fAiQL%iEF0=L
zZ3R(vW**MwpXCOS#uQ$h&dmZ_MW7zOI?ki~6+&NZ^qKJDS5a$7CL)(=`pEn9RjlR&
zjcbUQ>|{x`CvPYl3JP;TRdeKeF367X3fHqGPRzhZjMF(+qb9B(LpHdlL?W5`i+(cC
zTr-=zz+rsyXNX2z}rxti8*QSTK{%1#?go)|`}?5LsG&ndd5
z`DEVpF8}I=XL#woy}7L&wGB(s<`A!9M}i}2^OKsmqG*5(w;1NG97hr&@@+zU>8_73hxWQN_g
zLGe@{gWvQkA5^}B4_MM09x`gwH)Kwff7{0o&K7SxC)Cd9X21^>7Md5kSnE)(NK-Mh
z()Zmp%3!Sd6rGe{Oa738vRyro+vXvo)4Ue;v}XPG_nVpl)OuEDef;KXRE_*0kS!`c
zch9JiU71gYPK*kx7U|oUKt}^^A6`w%^F!Uy2+#Z}HF3l2Sn
z;J}#qXOCeg|5C9ki;LtL+skn|(T3`%)tz%pTqFQlxa4mLr4%W#(q&0p1&bfk#G%U-
zVMAqV+C`I^KQ=W5CkG&`aE433S>)6p!!QP-kP?;qV^e3oGUr?476d8b;z#c4u4>kU
zHJ~a-uZZZl+3FEFT9rT141MK6#IIFGcK7j8ws7MG?_gF5L=zqVIp=dG&PtJFvt%<6
z_4~3olDCp%K=-~cy=9dZIM&_Hal0DBV?Yx^{)fFfK^EjNiv+F9i4D=CR*}?OU27t5
zl6?Wr(K>7KMDxVJ>uKa485Ugh5-c%1ufz1
z$;+H8YRW{I&t=hLatC0!+gP>j=oXliW(1USPRfM}siq?^U5AS&6*-^^gSCc{1|G*r
zrO$Rk(Tx%ie?Cclw1;!0s8|uIrql5yg&g=!24C6+29vs64z(|Y51
z6qxFCb|~#5s!mmL^lGNv$n&hwxpeKn)S5!ROk?vl&M1G7Pe*3KaN|Njn2HO
z7ynaV?$jn18(T>GJ6+HxWTL3x`CN(nK>?S;o5!oQ#@JGbPpZXP8oCA({JOB%l)+b+
ztzro0uO-Z`G5jVFYfVB`Dh5miFl-o7bz%{_U75=*;a^x-7SEm-9~P?{0oasawSh|M
z5h<$D`4q*3TWO%^^bSu^h1F`)UU~!Vdz3nog6j7B{LNw=suTwF(pKBXxfsl&5Gr
z;zQb~N+ddU=@HuJRi;!UVGqcXFlO`35Ua1WQS{`__d}H#q|Sd^NW!}
zn&$%Hu!;0#R&C<8xxCD0%yT`Z9thR!7nMt;-E6-iZF15r6F#msaf6)+;Z={tRV`%G
zGOBKW&rZglPJgxwQ}3rPc3T&h(Wg^F!9C|QcSHvPd$A&2
zz?cjdUcPbPif^BKey>2hAN@8xf=Z=~a>g=ZOWPhLT44I#|HvrI&+;dxTdVq9fg9Ty
z{Vs?0!|q+ta=dTTL@9@BtiQyqL&|uc5O1-NdyhNhIF*)l@lge)cj_Eh6JpA!M=!D@N
zaeLY)i(jfTo>TW%&qP^T5&TDYj`eq*yxa^`&)ACM>RmW`F~`!`_=xe)#_Xc0^g#uc
zfA;oP%7JV^V53Eb9Qv5x-k=O2A&0f;anASlBOUX1_eazg9z3mSVdT1PxTYWUs_`;_;Tnl3&d0g|v*a+w>9a*lWeXd4Z$1R%u_MEZ;ne?m66^
zrnI)*={darR%N!t5VImq-7ix3XFC(#*QTAZCv6iY-t04(LDnT*H@pQBli6jK2VZ8P
z{5a;`iSX?-NOw5(WxtI!C7kWjQ}O0_lQW!u^vtG_V#06Zo)g|h2u)x798XcBP+rU1
z8$J^E_)f?ogHX%EhTS$tr(z{reKifSTeJS|n1n^SeIJ&)FL;{+Kl;h6FT0KLyUg}A
zLUKd8si03nTA4aX_`jz`06Z(#OsKrVO~6CpA~8c=Bw0Y^`~#ui;>!XhxN7#wz{jWv
zA!>zY=>|q$YKK?8lTAufe)1H35=i0mnFwef+Grx`}0gBM01V<>WJ^?dOXNi
zd#m$SU;pupL+vfLX^v^|?YW~_j`mhD!o7zUL6@6ilh1hFGR-&SVw>jNw*W^#_3^lv
zG<^qkg1MYqcDCd+rxY4jBJjRt)=zWrqzk{2-cj5^HJs|SPK0G%4*bWcjmW}ZxylNM
z#`(a(C)%nF#M30zSNWp>L^X$s
zN_j$!bo!<#MYnNxr8s)_3PpryQ!+ZNo-;
z=Gj|;d6HCNc9T7Fs=|$w@jAyYX+D@9KX8mW##M?*6e6F;+9Uq^My5}BiqElX3MM-U1NQI`tsgb8-
zdZt^$4^m!q?8p+ZNb33FJtT+V6VTzgNu3P+w-EzP!q2OAGCsxnT@e-L^;I?H09_vw
znBj#H`(RFy%;KYm2=VPIOHz>s6iioHxNcaBF-BBk0s-zyt_70Y8514gTRa=({J}SK
zsa#D@iKcNWtAx)-kczbUF%|h5NBN3t2%^)l;E7l<)XG!0=!=4`5{j@bE>AN7of+jc
z0~xGmQ?9P!)OK#0N9W$~;6u#qX7)SGTRVL`EUE>&4ymzbowYtqF_E}_RgE@g41Zf*
zeo;?>wlzY;ey3k|kmylT3+z>SSo}{^IJIc;>S3{m=U|E=4+R?@WgfSXrjn3&A#+Jg
ztlrkpo9GWA{B7A$vb_8gO>jCrepgSOlA3E}g9RknX`zO2tD9V;p1NTTd9q+Gyp^{d
zDv(z0G~97od(yw0$o7rNdoT~%si$~^xr&{fCG8Dee1nW
z@bRuM?C6XT615S9>!g2-Q(>jeUIa^@x@fmkuLnJSWDrr)QO@kXAvRur$P~M
z&{Vjb-NQDAaw2hqs@%5MX++bBmfku|B}Hs|RkvP@JeT;Q9_2Se_(hLJcw&OjaruDY
zow#1;`>%P11momyQp!72>GD9a#YTBHjosBe2O``
zg?N_A9H&pQuY32oJ6A?{J#vu(o0}tc*vin(W<1T+5t{{_;AtG>`nv9cH|ms4<<9+_
z`^A$fX}R%V%&ht;5|#=y6V*w@^&Urw>x(~=z?1y)jYY-Yo)*cQHa((Z{xolDq?X0A
zQ+U&6t+ykz(*zqHSNe^rMB3$KbozrwoCcj_MOEmF|!i1(T}K41GYw9a??lC=>4YU1nm9{M>Euf%we-YKT6=lsF|_g1wM8;!bT%
z6}Jk)%~*2z(&?#bjI_w}&*{&LnP)!}cS}%`({&eeRLD}e1Q^A0gAY0be=cAeJXHFi
z%{I$A_3{A8_*%3gwd*J=uHR%-tAC%v!@yp|-QHS6jEM)){fbVNSWG5Sn}KD3@bq!N2p@u62R*;zO%{Q0Bk?Jrrvdxf2JbZA{~jqH?G9o6q({+F)|POx50
zj{Xn3&^<*@Ua?hKqU@rn>4^#Gm^bq7^WmOZ{-a93ke$7Q5r!Xc%TVJkxF$kX_T4@n
z78M&Iy5QHX3Tz>$b<(MLtq8pjEZ8&@DdIJTgt%H7yFTBM4W7}(#NIB_4W|E|B`nPl
z2MsbapK2$KJ;HMD(KB&YQ(e8amVvLf>?r(mLutX&{F)6GSKkG#aJ8g((#eLWf}~d>
ze>-~`x*#@U;_-r)|9oBmlvJC{a#aat+wx8=BKc5jsgF4~E2@N1zalYso}@VWulq%ZwKBEag-cb1z!&STYQ4ZqJ0B86F!7z
zmIjPPpDVTb#ffMn@?QjtrTZv;gUKV7!1xjzaCCIP0|iFv#PFj
z33Pln>9Ek8kL_f1*e|29eVE31R_tCA_Sw@^Ufh`KR|00-ey4iSHJ#{;-*ZGSmadfR$X4;O6$rlbF&g7??u01TINdN9g5VxR$x*Ut17mt
z(n+XXfuesY28_Pu*N*Y>FHLiG*9^>+Go;6qfToA7s_43G_Jcl7Se+UrZN}sDc%T!u
zs<%NR57PQZY9TlnR>K0>7&CYop`$TyDb99}TiaLBR~b}xy%Pu){0h9u}9mBDyMB@+yua=wLtkJ1A^yjUPtlTamAdn81oywK`R;GDUyJX^Vv>7KBxBbBtmKW-30A-7mO%SJvd*9VcXXux*-9w*cUTl8
z7s|h?0lZH9C0g-}w&mH(PXh#we&m8pL20IydWK)RhSnR+GT98+V6JBEC$EQV1k0KW
zBMwAQN_6DDh<+RIqZpuw3iBr(h??%CW}D*+#cl40VnZcit~*+ysN9L~*^f7I-BE
zVi%n2FNS576n?HHh)SUDjZ4c2NY&J8k$Yv2EyM$?qIOnwGdls5IC1@wL#IMnI4fWY
z&ZcP7BP}ar-w)7b1mqVQmZ`-~6w{`ak+G}Bvz6!<%bJWwJE+9#jkuyc);QR7s+?us
zAvW*W>BO3RNE~2`)=X#;O4nl=F4mlOuHV}ypM4}+4DDwZ64Iu3
zc14eXb#Oni?(a8`)DZAh?5WkZ4EQq$~U
zMZpxK>UV7y&y@7}_pPK?~I?YuB
z56fDdkn?*}E+jaeGR$>R6dc{-^w1yIa^jDNM)wwog2k5w5D!1DaRvA**N6=*id)t*
z>KzXpw66t=FM%_bWkES)w~Ej^P49(xCmE-kj221uih_pB$+vYX>21sx?-!gG8nUs;
z3utJvMKJp7o@CKQsPdXt6_H+OOvN9M-E6SrD+zWM8rb(y3mtse)S|Vl+v%_A<&t!&
zoOh}X&-JK(aLu>X`7meGf8|G+*Q!9}M<%_)g2M8r{jIX%`)?(bED~DX+yj;m?kygvL61{)uTXZooooh%-*6raZ
zciO&^A+lQ@rnPsJfbM)SGCw
z(WT&%je+iy4@5GEZU(wSS1m6x2^e5(jJ)k^)G3A+-_Rt9{(>vUK^i6R6iouzw
zV-m)JQ`o2W3k**%t{EAcbJMWKg_=$lt}r(|4QdT|XG=R@ZpwJ*?;XULwKjVu+}Y$c
z;&-MgqlrG%P3?IkX8s0K$$c@>5a~V}O`+Utsg(q$v;V|?A;A)MVw2=fvOeZg{h$di
z3UIK&?q|TBHNL5q-q-oHsNz-ULneL=Q(`%#<=_E^FEaRiJNJr2yuD-yrjb++PS-_S
zzr~sHorJRnBjPut=^HKq%i`mR#)j~P?*6?qm-M#0io|aJfppZ7U2@Mp-v!-b<#(C4
zYxj=iCvy!O!p<9%FM&O}poAQwfz0#8@}vmHc8{&~V=m752BA*_mb_1Qh{cM_5EBdw
z{+1Z^p~Et0cX;r?W*HNGy=>VfkabJQ@Dg~OWBE!I{~PoY@RRIljhmra-a-1qy-W3f
z2o_76yiOD76|{A1tMl~rz2)F!ehEz5IG5jgpN;-CV(jg$GPQa@z7c0YXZ+Htzx4B=
zCTFdI!I=FaP2V-AyP`D=`6;4Qba)&E_5(%
zMlzTf=Ca+les(Q4`WPN<4?IX#<(fY?Jm@$x4cMuSViI`!yuD*fAbR9~?+`sJ_Vcv;3O{{`Y-#|FX$^vZ7%rfx)5r4jfoaY=D_v(o4y`b0Oa*Sp7
z53)|p&nW7jwxoVHuU>u}+8_Mm_WWMONB(Xo8aQVh3BJonfD`zIvHe$VSh(>rUcq|h
z0yzC8{&ll%{@D+|@LZ=+r|xNiz5wr$p-Vun;#pxPao)bxMC&~F-nyW8oC&ON(vcaq
z8{gY@R`R)y@%be{UgyAje~w$hWl>q}70M%E5f##1&(-HRf7Gmv-FS2f%wl&&;~bG8
zk7DP!rTu+^Nh@Aid*2*Ttu6$61tK?UefL)cYSp6q62Q0^<*EHSaZhQ;%9G9{y*$f_
zM+t*g%wFyi2)2pdElVew&?3Ef-~7G)6|>}Px}6v4n;q(xfOvb+>e7G_k-t-Dx7_;*
z;5_9F$rvszb0lJx`gCq&(?GNBMih&CyNfpVzU`@mzstDnKk)onW>Z&VEsf2(gjD8qZJ4olzp355es~YV
zhavHoNLw_`_?g(Kq-EZ;f7k^{*YM3zj*29TwBw`(*-fK<`pH9v`OwYo>hVDPZJzMC
z=MNurFi4L1u5vGt(z-k_M30(1lh(bFFyrQML1!P5#~a@LE|l6tW&8*q8M_en^)3z4
zkYy@*)l^v%3;>e7zMYq&>Ih;Z+@7(0?+Jf~JoaxhgKVlZ6}_kG3=Te`eSY$lNblW_
zpA2L^P=?J~=!c#HTU?HVMnvpjk)7fCb_C~ctbw3oX#)#+Y^;Gho+0w%Gs13g@@;ao
zq=`De->1{qsIH~OaeBJWAi+1&K7Dgt@nAn63<)%t7=P1id(rCUE+|?;W{uzXipv`A
zIVT9r7nT(bH3juv4*gs7&82F24UC=4L&)_$&ll|utWj*uC$!91PNFL-a9
zhX3R`s#ux`%j+$LP#T2$^_HT@aPc8DR6urJM}!@1xUOEN?+Vd#*GAAJiK?fIqad^z
z416+)e0-94EeFVhT;YuGodAjHg8iChjgnqrTPHVwI+eHtfVr|_K5c~lX&qyFpK!{W
zer(^mP^POG_F$>f6Axn5SZ3A_DFbRK!ina2RYTFHPXk>!Tc~lSX(A=n8p39*Ra4;u
zTUyW2<{Y_wfz;}zWIBS9aPsUau`aRl#G6z+T#Az#+Tcf(wHyuZs9@JVN2e{|hC%EQ
zJROA!gHMygdcbtud3B~rpr_>8#~vW~y2Ze@$V_?GPRU$dbo)wRyW`|fAuK8^E=T_u
zyG1$T@b~lOT-Awi_WQ0cICT?^Z$!T~RC2WE?|q|smiu5NC+|HCWrCFt-MureQ^IYT
zGn)Ciil?uy4V{_htR+wQ1e7{?F{PYL3mYwJ3{+Xxyn_)3aeWEmy#(IZi~nrng|20g
zzq@gi?czqO`nadJIk7}cA_#R`ILDJcL_n_nPgX-C)-CwsaRLg6N
zY?5uurK-6o);_zwQQqKMZH)}Z^9>HSka@SQSVR}-BNAST+4_Dgzva&^>U^TJ|!{rfVSY^$azKF931l>qhxl
z2Nm$yxgON3Br7jIq@tUAWXceSY+@52ly6fb!}sbseJG!QxiGvbr~~Uly8BXcBQ<
zFR;nk&(|-F@D;Jn9WYzClP6CSQeuiz_*Gqh7yJq&`&AWZax@pkG#uu>gC|B(>nvD`
zWK@ky*t{S?s|G%;aP1*BG0)XQ)9q+cvpb(dHdB`T&F?aNOSID+h|9@SBG84!iI!0<
zCf>pwFKtmfD{TGAKSO*8pj2+&qGa{^*udx$B11U-b=FkpgWN|ii3+(-cNuAs6p@Ur
zoro^rgHH)DKH(cQ@5r$2SumdD^T*hSc9C|BIvZT7dntic!C}n4dxv&Ug@sU#tT&b6
zxhp#{RJI`jwq=3_K1_=|MjtJ!Ml?gpnQ3PEQOuEv)iI630Wuw#f?5+
zbW};zbRfr(#C&w51>5s>27(4&uJWb$kf*Jix2$*zgnEy@en`m(dyBj`n{L62*!`eK
zP_?Z*`6JroeBeCxg3R=Yc;;P(WL4;W~D{QK=RqhPCvD0q?&hd*<
zf8r!#|6ZU6&RUn~33Q723r=X?>IBrm#X3&tT$+biUb!##6|G|s!&b$BQ|0-mq&HdB
zlvPhr=nCBkdPJ0gbZ%5mK@mQRutN>WB(|z%3K3o5Y%de^A+IHodVaMIV=orpQ(yLD
zTM0==&o@D8%F)}{DOLK=g&l+T5Pn%wa6O+h0M2+D05&14^v*M}xNXJqX$3AYixA49
zPqLE3T+CH%MTo4X$E!D=1!hEv%-h{g8Rgb?2O@NMntY~GrxS@
znG?NHHtWuNx<|^1f<4WsycE7nhW5c6zScUvPIlFA>K~Jfh*hx%b9bN3HD7Or=BZH?
zoTr_j0~8t>%l9^5$$+<3{(algJfj|Xt{u!t2`jyFU@>L0of{{CjIqUWs`j;7=xa_4
zb$|F9B_UD?Bc`W9=El8_CyJp0S_%y*9?4
z!(Fw!_0s=i+|VHD(w>yKBz|OuUEuWLL6y?dn_|fKsDYEY+MCF}%=fM~am!Mf3hWml
z#Ef|lC2SwMx}m+LE116VaIYkGCu`M>`kt*~e6OUl`Jd!cHKToR;0LpLAOgI%;2Jen~a@
z2R~LFObtqESn%EshL#EH}99rg_$}A{s
z)nuiO0|T=07SCzjfnJgB8)5c_kGK0EDXY9=_Rm%;o!nbWTyd?vF=!D+V8@V{rr
z{yAsfK`R{tA7N{<7e0WF#`mrVcwT28mw3OgUVzJjb*p;{Z%oL)eted*-)Uoc6yF}8
z8(-usTxnar8e_}wb_WeiwXc?{kD?Ei^nu+4eAB7Hub6ZX3TXwkH%g*
zGuHnMaT5@9V?LQ}wtvNJ=C@|K+g8z=KiT!ZJ}yN1Cz$P-2O&ALSA@5h-`0Tf@OFMNnTbbs+N{e)AWUnxnSbV(uHM^R0BYi)5M7yn`_kZrzj>{xJ>L
z^%5`KgU<~lzp=h+M6mZ&lajGt8+-3Cc*pdE_~UMrx%jTFAF0PX=dNc)XS0#c0
z=Mf$BE@n+M>N^wduI&=mk@+$L!RbCu2y(iy=Z@Z`G^NB*V%YHYGX^Eqi~bYgdHw3<
z1VhCjh3;|FIk
zMnb5HPr*fEhRhr_O^#D_rv5qpIkAvCUIQ)8_29A$L(P3g(+zCL-K=_ec0Ul@4A|7mJnPQZw=8Cs37fDdWc!jZL?R6V`dtBroI41G@h7G^7d{|EPYi+>Z>0|aHY}KqUdq&3mi3<
zQ>`Uj4?jBtR279DlqnuKa!(k~8_yf*-nilR;!y)Tp+#JQqTqQsA~04dC-ifk
z35Co{;u>W!yS7U}dZfVJtr^Zj)qT8aIiVZ;zAT8N5vrHx@5M98JXUwxLSoa@b7Upy6*8VTgzSB&QA`+P0#b9i_+$8w-k`;
z=Z?Nhjjv;*1J5gh$k{uIaSC1tzI^Utg4dATn5G7sd26%*0g9HXy0~^;y95imVtdFn
ztVaC^50(>fmR}Mxm?x$-Pt_{-j?U-WwLOSCvXaD{Z}NRZ^;lfG;?+#I9MdIW5=M{x
zeL}+9azI*WoAKtT1VPIWUWHlyD#ImdiI)0t)%iTTRLOF2&sFojEEncCxRg$Ndwb_x
zl9TyAKD4H=Dp-b}cgOa9m1wMX45jXqDvnNQFS|!c(Ij6@kmgsaRw7Vk{}2r7gv@4+
z5x3V4s`>CX;jr%3iHa%N^)N;*lyLR-NW%8o1dU$dWL3ruQ&Z|fKM`va4brNQg}AYr
zBRZjGi5!hNnfW?ZgC*82T!vpnyD*hvSB(iJsK?z^y^5s#OO6>5Mj?faw>Sl22V=38
zCj3jXJd_x@k%(fIW!~>|%~_bXMjL!>ZSyCFN;5y9N6Fn2ovlyKSR4<>ZpBbgbz68Q
z%6w<+(B$K)Invqw+G$Do*3@<;1K0GGJMYWQ`9-UOE$y=xF7hk1`WgIKFdqoHjmEXjvg-jp$F)@g?QB@S*
zi9ApK?7&X_`Tt|?y`q}>;;6yUl_H`bpg>SSI!F}}2#8cgKuYMLO7BH#f+AIV6;wca
z5kl|1Bhove_bLG?ffVQdzxme8TJz1rta+F<56Mch@^a6;=bZi9zrFWyPfA*e+ltyL
zM+wG$L7pjw2HqJI-PeldQL1gkym9UOa;Vr;TnOmKIEKmzw{gtg{Inr8zEjb+1=Fr(
zM+w6Cn08wLahWnZi>FQ$uqR%e)-7VW6)@1*Y8G?-*C(M@sco~&Yd#8*A%##H`j&(Z
z3rrTPx(PZRgFR8aAUiP&r5UM3@1Hr$RKyIE_B(gQIoWbd7BXC2{a(X
z5W6KCGx;nifQ>MXm%gf}3X9dqMo@wtoXE^Z2fo3uqKDS2BvwA3^@@Ge{C}?afiYtv``}GU=@xU(C=Ry7f(1Y-9*RbVV7>j}!NJpKv
z;hiVeBtln*Jr}Z4#OkEx@ci?jY2@y%
z4Oo=6FPlEkpSv!O_4g3|n4^Yi`joy@LBRY}kqLI9KF_U;(jW(XxyMuze-T|*4
zwc;qWR}XqxPEMpVc>tR;yXl?^`@ZQ_tn+;{_t!cuZm^*`ykxaZMsE_7lCDD)OQ?58c`oP7TLYGuaCW5rwRXFdd;3`m#8T!
zKom`9Ak6u@e<-kj-hd>okOBS8{!IVZUzwLDPqM1E_vS7|!S_UTbBc$}_^r^Wwd2F~
z6XSK7)A{+mG8;xi?j|-9ccf(iA?5tI?|A-mCzu9e%WONIBJBNLyjO5~Gn^(&CfjV$
z>lu#!%wuBT_wK02Qy!{#k<-`^R){)g=VA#HN*;nPhf)4qU2%L}DNXHG>7j;`hVHZ&
zuPVRlny220OiW%n1fJu?A*rDU>F-4HHC3}dPdqxd#kH33RmjWezRteIkNe}U5AnUI
zVpgJuLaO7e)V&}0i0vqgCXy*u0?Pz(G_CrtpLdK{$=}bHLz-3odo%JAx1N<^es>d^
z#zfy|T3d9`Vm__*W3Q^oUcrDeStnin+kIy%ZNqtx?pJ(?G{2B8=~sLaV6_-mc|`Pk
zzF=JGb;XK9Q&ZTQ{i%UU^;@^>d3CMVgSqra>G$olq;-mj7a<=1*{hOgQ&Q@p0N3EnJr!*Y
zofPt9cU?EpUWWUC_>D{VDIK3z;pE+XxjJI^|6OnRk-Gvzz?9a?vlc%3slp*{J%??*$1aLIN)2?>Kfvq6YH;G^ilN?ow(Q>Y^~Z>i8TO-C8uEnF@x
ziA+hh*2~^`x4-|Ra%4S{ueV)Mk0Da3IN_I9S?Iv$8XXSHp(M@h!R%6Y>-AE-cz2B(
zh_p9)sMpK(`<|vZQ5F(o-ZGDs?Nda97IyO`=R?))1u1Tk^Qz=?kwh)-2nsw`eOBD}
zJgRu-X|3Ed=C9-#KEx9xY}4$Y*uLp-l=4nA*-zUOne>K4Pfh_bO9ebMk2`fss{C#r
zzg*<_VH!W3!h5KKIyz3y?iN>?s-O~iB(RV{;mB9RrNu^k|GKb
zFYDM`g|Xd0q2)UBOzQ({Eal`9=}G>M)_Q5Dtuz+F387gV$v3py
zfO6PI|3?2R>gyHsd*ntm8Pi|sc6U_xTFdLDGOHcr!;Vpy4Fyg_;nPFPQ}*j`@NkM#
zWMP@i1N8R5pE!-wzxc0T6M?Sh!$UlOcHQUh1^=vy-KIc?_)hOS+)M3_&@#)(J*7|se`e~({h+6wnw_;Nq$P|9zpUF(oIU13e^7n9~
zMh6Gg;Z#JkWIjLZbuz*+O{oD5_tZ)Y@>`NUW~&AE*XTlv{^?Urq2Og#9CnQf;e-`Sgf5DuGyN;
z&AX?DpR-Vc4Utz`Ka`I#INtlUaccGtq^>p2oGSTPVk2$gX1|G$@5U)I6PdZehw}z6
z`)dys3FwsQ;xKCFBsUpy!Vt!%&yGjxS=g}`-@lZkB-xsr=gqR?xb_DBIfyOS|^^0
zKzTDeP#NtL>1bt(#N694oczfV^O6{E9cq1=d;BRdla{%UU)bisBFBv~C%Bw1$%Xbf
zP6K-%gT44LYpgSg(^xkT!@-Uq88w?~%uSF~!v
z-_|^fT9-(GEOBgY>_v^$4cg^Ik^aZT|1U2c^+tB?)6sMAuiz?@@R1-uzYxnqG^O{v
zdFU0%*S(1)O-=C<#IBOcfyv|I6U3TJ=+lH!Ojs8;i}T;7Y*M?yV-&gLFlmRZTFp*V
z9`&q+yQXUQH>nJ$qk=*Nwz3DT9Ylc**$cx&T$^lvupl8<|Mq-k82^gzz|aQ{HJP
zs=2%oK}`h9wE&6=;V5Wny=KkiVJjtPG=g>FHiGYO5cDD$GhI5oJzTDKqvL_O>N*Re
zsdi0Qi^8=Sr+hMkt_)hQ_vhH`^VB~ekJUF>e+8@16L?Q!`#H4|-t
zlg$U(JJFVj|JN{u|7YL%478qaJa+?X?lxsa0pJ~bqCa@Xah6Rx51B`3#c5&aev~wN
zI$XNl2_Sk-5B^B^VbAOm#!BdEO8NN@MB}Z&%#>C$6*xnId27I-*I()7kEQXfaXHYI
z;139S)BBnEH?(MO&46~KwL+?&VQnx=gb3RR>^v4V4ba+{xpQsURJ*d1MkND!$OUka
z-2K~rd%ZQ8dj=5USTFgYw66%$Wa8f_30)r{d;(X5DL=yWD?sC325R29rql#>OP}In
zaaZyCeSE#6GS_c`zrXU!6LL3g8{ybeI)*I7K^Cf0^||S{6N1+>J{r>-rkZR-g(GTT
zveX+nick<2-Mqx=hUDw(B)kQ*^bH(F+M1sKrvD?6zO_1DlC1Sjj*-mdb)4mQu<`&I
z9{GawZECJxw21Yh?6k;vf#&|(mqczfMu5ULm}fd7!O%mAvxC~)vZPw{8*i^8SvDiC
z2+uEpvE&ULgtRy~x-rMDWmNF%0@rt-1Uuaz?Ba@Fuy2BZ3979mL4Nk1t+lPZ!?G{R
zBbWY~r;OLaFx1}IyvW9~+5JHOHucn3-E-n5207DPK7D=UV-8yC(Ue}=ej%~b9?vyX
zS3V
z>-|x2I1pHGXyGR-Yiw-vF-TlexgPu$8YY97RDKB0gXU7cfp3a8>h5{glEOoP`t)hL
zZS!bMJDWIOYr({H-cC$Wu*vC4$aU+X;6w9fUvtCAskP@P}{xAe9hawFB0}v<_(w*Kbi>kK$aOZx?jU{;jd^b&6B`9)Fvb
z{6~t&U8YXXjnISsH!XT0vr+M%I7q4ACVOj8h(X8@t3?R(8`lN?i)Bpn563o!6Y?yp
zHBA~+MO}4+K)GukQ{oF1COvH4cDq>NKhQ@~_!Kd_fH(e;0>1Mh2(eL%kEV}NmL~~@
z5WOBiPB%dNfOX<=7haQ$i|L!UTa?SrNg}Y1$|F!b>W`~i$28L>y-|fQi@Ub~om(@3
z28I8LbIhZm?HOFdmU=l4Kx(-(V)jK`vu4?*@l~O(H;LXBy|$ihj<{Ank$cSc%Tt5n
z`B)4dX
zq6pLfK&z|5C?DY{1icf&3Q4qdl|dUCwSWICK5J>vx9|z7PD&1@KN<-Ad-nvk_78;P
zUZ!iV38p<_ZMUm++4`_pn%N+=3BAZ7#z52GedDZt-N&m|CO)%7J6he;;5)6|?F83f
zl*f=fc!tePNs&=fT{2CX(jhiF@%fe4A9pDQDf67@vT?eGt{eP@R3sP4j`!bu{snt4
zN7hm8sABHt6{BOy8S-(NoVOL+M`4RQpG_B@!o?6iH9xKl(jaoCZ8-PKdylVbKOZ1lO&O-hVB;JGnw4Dwo*y%3Gs@ccQFZ0yr^teK2_a#?vdwnWQL5+6ll9Qq|fL6Hf7`u
z-CF+_jkjUpix7H(l&|nAAwm2aziwtm6d}GFA^QB*@1aZmb_N0uPX7elD@-@IuHmEq
z?u*mQhw{1HWu24M(J^O
zeeP@=nYj+EO+hH|IF7#y<5cK(w)SQD#_@#
zv2z78_xKn76Ps2&&G`d>$9s|-b_pW}dVcFMclku1yf-L5@~4(eQAhDjskeC&lk<&7
zQmztR>2i{rKHSe;{@L>2?)!Xbd2zlu`MR?9m@eRPbdbBsA7tf^QT!^;O~OLSN1Y*R
zkJr7qTO?{Xn78M#OM3p`;>XQlwGRfvL}pO|(QLda?DQWPrFsQ<4%80C4ZOAPG6;YJ
z&;@GayeX%ctcb<2oKkuJ5U3Wlz8lpoz+;dTH8o;a3HSl@I-eC6i3wpHcE4s;3wTJg|;U;lnS?z5E)Z
z4yZSzc6r3U)Cxf0ImH&(Gv?IYoT$TmbVje8`gh*5o;`hfnpXZ7T#XFXPu9x-W9JKU
zns|5pBF;*smwDUyC1I^9e#I{p|78y~+&Vj&+mNWZ7g7~X36sD(FzkgWRw+MKeGv!ex0`#W!F8n1
z*IIx5M((%F@%1*%ta<&^r
z3*bagi#3$82oCi<`cS3O)`XzAXG#f^EvCvhS}K+sK|v$E7wqKV75f33>skQOFs+b4%>GfIMQQtLz#&
zd9=>zzxH0kTNHcq0nYK^VEmu>Kisn!e?{aT)%^o`Q*qR#k#!=2G9_Hm0CouR_RdG~
zVAqbpzTiIsq+skWRM8O)Q+X}B6G58oZES2Wb+NePf*>*rt@Yg~RQJ5IK{M>ffZ1W-
zHIwEj&!SvgZ8UzyL%5H7T9I`*>F_=mTP6Mkxx*v?;ajRf*LS15ZR*Ez8|9Ax~A$$b7N=ltE!rp>@gKD
zRXKi;C4Y-uoK6o^LB5u3T@3c;U4U@)TC(|%o4A5ak0t%7$dsHJF7F8m{rNLF)R?lV
zJ;EHhr}v^Hn$p`ptFt`>OxGsodpf(;-c?F_L}R(j_;R^2r|--1;UCK27H@`8zC?%L
zOA3MsTiSeGs2^AGZ#tcPsD7G*mA}YFIUZmyl53N}u}+C|pNv%y24`!jf&{&vtF?_g
zQ(ubS05$T~zq_wAUrqEi>iSX>z&&cdPMBFejBI%zbkh8!qK?r=+WL0}Nx#1+T2SU0=Nygx4>fSaC#Xgx2urmHK`PAZY_eZgN22J^9#OBH7H)&0djUHs%-AU6{tXS1Bqp5R&QU
zr~+ic!o_hG7@c*<$1HjYWT=^txp2*^5c=U8D`0tf+K&D;D1ini(SV8E7``*9H(9HX
z#%~uo+Xr~tk~l|l#d9#k7n^3YB%v5Cp5hn~@FQsJYq;!>_|K
ze^}1)@&?>BJx{S0vG{~@S_=4VQ`+G;T6E^GQF#G=9-93Z$`|TEx
z1kCLwaIyDk@>~yVFd3y*qG`k#$pqmp?(PY)+jZXYA`zbw==m70BLB(Z9zotl
z&C0imOIKzsLm+q_8HhPYun)4?kZ8_d_D&=vZ_eYVIFs{7(jtjgxbe
zN@Q3s$a%2Wk7o906f0MFsmPebcBOgdBoS8ESjQjPP&~|HfnyGlZ$Hm
zm}i?|*G>tsI-d7jg>=K{;p1%gT29KVPoAg%@Rk+^EaMdaAD~}oA=2$4N
z8M|OrmmR`HIRBHZL#2{=!^OQ7I&o%i*BkqcbPt<9bh=8o%$
zzZk`5NvwP@W56(DjWGOgPqfjHRB^{LIjg%e?&f^e#J48MC#0=nQzBMsgroj}oM=&>
zyCHYI@G2T;vhpyC`zB4vzvGSO=R?F)9=?^sA(}aI?+^`S-#WO+9hVplfuY~fhJYz#
zGz$A|kq-5u%MNxWxz;mdugj^d{2m#;c}CSIve0IPPd{&;3sFH6@`aC-at|^_ex^}g
zdl~5__S4nzkUyS#ac_~9CK}p|=+s5njgsO5Fw?nv?KJ07wa(zK8A0cZqPET1B&mR~
zXoQcX)~8E}E4aKx*rs(>&p9Lp1g}OOrC`Z0yn&;D2#|=T=fo}U=VFmnEJo2y60iDf
z^cT6<3HV6tKgs7^XN7ny99m@PWZinq&>luai
zS5ntn6vy0M|3Ih*Ywf*|`LA8zTL8q2e!4;~qqHV|Zy{s>wJ`KJ*YQro`e}aVa!|S!
z?O<)|!9bue)&)n3@x*Fj`uk2yeUpsPZH;TlLj$4Xd`HyK;CkWH)F!bK>YfBNSUepqP6`VGk^P
z;ZgE*Z7GlS8)nW_35#>G4=e%DZw~D>$WB&p+)+<@IH`QqU&>;J`+^89ce#PUZgV{fmctj*c`mdIoP$wf
z>p?4DQd6&kN^Cc&?pD)c=;Cb&S4h$6cDv)~>mFm<*=@a@2nI`3=6sA6{uv-D6FqUh
z8l^hpIPc@noC?%iGv(lT+g@ACKReo})Q_WJX21j#V^;e72T-L4jP+Jb_@f?Vf8p=G
zMypY7fb%P!YJ&taPe#VVZZACNCVko4$o7R{r>XwP5ACQKli6iFbebJsN}~A9XDFHy
zmqFbfY$woPoJ~9a`4|-^&uq%k{kOg_%cBT#(u3GC3n+yzEK7f*%iWvNcbw1r%Z($K
zsf67fkM_!@%{hlGH{c)n?UW%)w#pV53BSx@pauG_CX7`F)}AVcV+#lc
zh&2V0vvRY8;2eHLc|#?
zD-F%D7aA=r*ceyxoA&YN5SjEp3xqRZ>&Fkkht*q}SJQE^8%8V`m}24L)XY1S-etge
z^KyGjqrjqO?}kPnAK=V;ChDK@nWI+tP4XcSCTf}cj(t_nk11s4TR6XId{pI+$J@E?
zC7a(StfxPiyH9*nJJ36|8_cExB~0Y{I88sB4}0Q31GsVN?bRF{fvgUTl)p38f&J$a
z&hXu9y}|EmEfW=)UO%W>q@tJfZQ-X4Qw`Y+g{ctA^!bY~^t=(>hWx?m*0-Im2(k
zZgyZNv|C&H~+I#NBy+QHKbDF*05RotR|3Lyt!8Jnm@*rOx8tN^l21mTj(u
ztN2o_!@}EyFti$&q&3f`Nu4^C4vNk1utFDya|V2;WHRnYxi
zWdS`|H)^n^&>l(22~AsjA2X>!8WjykJWxZ^o145rJ*Ii4aQ
z;X8}Wh$2>f{v<{DI892%d%azeDyWPra@N(7_60w#tKYg-^mkyzd9Dkzgg;SC<>cHR
z5O1&%rbOfu4pt}wC2T#NCKr~vykD&>!WJ51*MY7=4)rUlQvTvjGB^3MW&}nG*v1;K
ztbtpEh8#Y0gIt9AuHYD-UOY}(h4e*29E3qe0FaW{NdL8eQCVc*3vAwv)Ku)6-sn=e*;_gloDA!K7
zFHSMjkZujewKF5LuZ0U#&G?tYtyU^hx~pyz=7+3kM-0Luz+I|xGS~6z=Y&lZsz*#<
za^OXt`-kZ-Xns5iK=~y>Dn;RaD^)Y;1(;Nx&6EC!F$bfgF+FOotQ`aT*kZ~x#z_X;
z_*v?z(9N}PCE``TB$1DUxNljx`$l3%70(q|4M})6>U@!Gn9UWsuK-^b`E?ajFZY=6
zyGesr=#3y@40@@oh;osJ{_L+=)3TBk>sRNBl86aM2Wygi+o(XUl`utg0gZ2W-Rx0G)Oh&&Lo;(HcjZ=T7GJS$-J2x+hrKgf*ibn
zF#Sdgeb-%((>~$Ck4AW>pMUP6o~3E6XjUD4JuB#KN5p(q-ga)-9(Yh$r5ktY4^J6g
zzAfMHKoA)&l|J8sT}8fC5ta$C?(*PEsDJC>Xo`6zk-2$~6Pk9IKQ}?{I-h)BnaRvy
z{6NFy8}<0%e6hS|N3oV=9#I^6J!E8@)A;k
zfIq!rCV+4T5t?dAz2Bv3*Yv6GoFOGoD5AU|G<$a(7S1>C`(%D9)$}-EF|NFPFG;ax
z+M8JojuLMc^dK%H&v$11CHk#cUysFVrCuBM0Z~8Nr}3&E#`GufuJuH3fp1{#k2s%u
zYFgx?7QP!%1Yjn{-CaxEjPDtTI`aqh|LwnpZodfid*Lu+yZ3XY7IwHhaxuF8RlCAN
zmkx|0ibR>le%LrB^Ie}WZ+TE+w6Fb;C|WGH;_Mgi0CF2K+?3)`ol%|Z8)An3lOv%)
zxo=Dr-`O(vy}G|%9cT6ET7UaVqHoEoB;=24R!yo3k2nEw+q0FY58V$w7&3CY0<4@6
zoQ$y4v`9k{Dks#JcJI%xtOY4Khg<3KJbRN&d$7Wp*=yy>!h*u=MXsg1sQbLR&s1Pm
z2S+dz8?$d|@F)-O+eFRU(E&_&Sm`RCA&!eznNvdRyg21|IkW-rwDaE%a)(tk=3N>7
zO}sQsSB*<85qaQS(p2!{UKTiuKVoBp^JF$E+5D{Eb;{k~FSoXmi%y8~kL>I5M$hOd
z$6_kX1eucQsX5(5(|#wfr{Jt2*IU7P8mrsF0$=rcCIf$JqWk|^wu9OP4~Etu5k@1$
zR9BIn-VcHMZz~1PhquEgvgm#iSaIS_*3`lhdRTELQkHrXz|j^_RqSVwy{me#2)&9t
zYhA}|xSw<>EU747gS%ONy!~;smH+z$oGMc}=C10m_tPp0kw%7Ktbv#O-qPr&$bYt`
z%WTy6=*3$x4TGpCpT4|4sZz;pAjE|}v(pQJE6ydr3e4aVwQ=EE
zVN&26?=6*W%)9Ggul@Cb{q2ap+4UUwe#?hux}fhAq!WICPSZc~hH%2`{E77)CDWmO
z-sxwx3s3!Z=^r=Ih+l9;>V2i@)!bCs=PLoyz8`P>+zwOK(+MrGe(Pcs(!dn&Xsf01oTybhcRk{>csizv7OAr(D#TH+C3;6okm
zrJ`Wq+H*-5v@)bzqzhTe6m()g)^00%+8OcW>j}8!q<dU=tYQ1)?IWZ`q`L;q6cgYvq+lkA<$ZfGG52m;DFRW`iAXQGNLbiT
z70OH@U(FJ8BlDVJV*pYai(Z97fXM>S|oIf_j#W6
z)@|cO4oqd!4pCNj#2hlYqFJb^91V@-=6ybcW8o>E{sxLepGpfcbLk)ET5Zv#N@-_h
zmqt(u%`(ZQTu2~0T*Fe=XpXi^{o-1_=7fp;0|f%GHwmg|i|$_i1K2&-&oh_T_3#PHRbM+90^V~gr?}=ekkhJ7%C4?1jNS@gx&Pa7CH`mtPRsuTbn3QN-S`o|vmtCkL(c4lin^&dyh^4d##bPxU
zn@of!33Px(K&p2RQ`Tv3IAoHeVO>GwlW*&SIMgOxHd~t7GWa7l!;B3qpIh_|_jB-1
zMVn8=L3V1u
zjHV^_Qg=+78k*ftOJ6-qNdljbSH|Vr-kIO3$8Lau;ENKRIKDuRG*Dy;VwXj<4^1?~
zAo?{tWMpMinj^OT0{a%VV#bM$kDt&3s1(3!FW~f}=vQ5BH!)cBUZBl7jJTvfdzqQR
zT#;gkKB;(=#IlZD%`D|gyiHVG8NxUb(RJDag*<4R>nHFkX(
zcFO6R;>%rm4IK#O)LYzzvgI-`+h|d_No!G*2dZL@PCjo)5V8VJ6nhyLHg+qoq&=5@
z(hWL&v*thnjB|zDUlyxqD04Yxvad}biYE|SzJLFlyR(SkQBOUwY$FkL_#NN39f!4m
z^~N;X`{CXXgWP+~9Wr^)+mSaLW`whAW0YSFJ}QYT9w7q-X`FyB5>Iw|X+zp*G2H84
z9fc1^No!(k#6{NaWd?A$E!bNWKxUtIMBm7ljW~x>8l8)9jBt+dwOPCCW5^E5yW<~d
zKz({gy8b8u{r)vKRs!iV`tu$ZSRoLmkoauc;^c6XLUtXGxIQrqw9CK#XfjVqFdqY7
zS0>rQYW&)et*(>~;vexAMmhFh8WMg_I{h5A>{HW4bi4u~fZLnys@cTb6D;b>a7FW<
zxkQ-H->@%}ST26~3@-
zZ;T>kHC)oA9cek%!%U+;t13!dMhZyAjt$fQ79sq$4TAG61EmRMK`3AB*@o!W*T0s&
zIESRCHhWia!27pROqfBCiqG_x{}9~4yVW9Qf8WFnyGdsqA<={x<#4qLq)?xnzN-+!
zXag4zO_(mOMX_|bQb%YOPG?K_VPqV)CaQk8GGu$T-m(J4m$!*J_$-^&o|lD>W-S^Q
z2m0Gg&w}M?0xOk3{v*{Vqw6y*#QSXQEnPdrywoIFb}wuT0g?G|t)P{`Ou^stfOMfv
z5~6J@4s)NyE2)b;!!s=sU1|^Bss9;C_J5cz|G)Xpc7PQ#6AbT1P>=lKP;Nd=|FTM!
zz)ZXAc;WUjuhh+R;Ea<5_R;LtRM_YB*5`(P)N1yg^uN2Sa?Afv$cl9!SB#h1UDA_s
zB~`|PRraDlWu}s`yAL@YHD*jqa>a@#x%Vw=PP8kdlh`V7N@1?0mFp*DTmk|K6I=F<
zG_HOu)5tpEGDP9s?q85Qm6(@2r@Jvi$>|IVF50m-i(f|g>zn*+Q<<@Kb|OOB}acUbDQ
zM{%j8E`XiRC}%m?;7YdWO17kJE7xQ4x(NJlmT;d!8%T>UDn0pe|wv<_}+i2_%hU6t(22EC{@$J10^L}a>Ve3
z?q#T0MrstllGBN_2#kX1cw99nEc
zu)R++H2xs%6ga>Fr5R~4{gC7XpL_Fr7P=$Yq#3RI3$N7sjTg)=&pbS;&%!S#@a6CD
zaqad&{C(12E75hW^{;0Y+c?6AJsH+x$t_$S{OH_}<;NyZ96OzV7nObW(k5ehUm}B-
z?t{L{gAiKFTaLJ;a}HN=4}-;=LGX`FhJ8l16g;q;cha^yU0{mQeFqnIm{0@>=5wbu
zXNJ$VoOfUH3eP)EkMXC!y1&u1)nY~`5M4V8KaXix+2STv(6sN#j0ESwNLUTHLz4tP
z0lQI~6@s{Zq<{Qs);Xr;io}iSmo(e&vgiQl)ewB_ABarG6)TFfo+Xe50-cxQwQV}T
z!)H^@Sp-?vVmFD5h3?_mu7^^+P>a~1WwYd|<`r5WBcGJc~_Mi!)iT>ITW+Cuk
z5Nj0ObJR`!5y(3e^OCMNmhKzs#4RZeJ>|&WgY6esWN8nb?tl4pWT?E0+o1{GE*_F7
z#scV{ln);n*AIZCD)=~DqULW47y8sq@D=i|!(r}_4XFpa@(vc+$;^OnFp9J=F;%K}
zUAmz#@{GHVqBd|A%K{&kBgQAR+pf^yC^4?Y{T4Y3M~NP}!%dAz+nM`Nu)ksc;)KY^
z8-64{r%ejOap8iIT^Ejl(AnI!yJjd|(x|~5&Rub6$5u(wTNNL{HfSSFY$7(OPxbB<
z52f;>^p*MFNl6jurprUI10_$0`iP1qbE?J-ha}@Ld|f8m{CzPNafQ1SY;PZ8Whb
zME}lZHeH(v<$t##X5eKC;c_z#SaJw!DFdkP;u;
z_6Xhwu7N}n7=4f4`FWw@yY^ppnJlQCqRf<+#={42)=&28fk;MU)=KiLf&R8dgfxx?
z7^gi%r{B3#
zp=d{+V$_=dp;u>~?ct*g3WpZ+jWX!%1j9jtHfxoN@ro&|Ds#cCtg7VMeQQ#yD6QKz
zk({fb_U`Dv*Ivbx+Zh}~=4gKPCl~Ou_sm!TXpnAW_N+D_pW4yI5|O_%+QT3F2~LK5
zl?=@E9A}J%ZX=FT)|_FTSv5~T3rKvyMVwP1>y0X}>XoEFfR{#>J1>}a7&h7|;at9-
zS4U%<0ugIr^X}H3mA9t8(=MyYQ#=HeBz*W#%-7Yp@b>!pt^n5vplMvbR&(+=?SXPd
z(r=dyVf!bmhn5s6>?~e=03%=hzQ>SErY*PDC`cxCS*1^>6R*g2&>Ikqdtz{(o~sh0
z$NhFV!8H7{%(HTgqHz|>G7~q#<_o*zm`_&vS~IW>uv=nCrqLLj~EVG-89a%AE{Fn#p=`s@QJ@NLU
zQn9BC&_}8(jbPzlvdyyx?^4!TxqFQ`%^spRt}b*wY*k&5x+g5Ok3q%yIYfZGIT
zh+&Um{oc*kRu%UlYu)_YmLC!p54
zcN5N{AN%N>;}0&D`yv`M8*pOWr@1-&ov-K1(Q}Z($O&QAH9v5h`Rld~rKZXIOPqsR
zN9x-NqME-oZnC`J{(%@J2=?xB0WgVP84Z`hGd2>T{1?%7{npxu7JJZOuj;^790Pke
zDa#o$J%!QpXXu*w_mZ}5Bhi1LQi(T}#DZa&xg_-ayb{^B>z9kvS*8=ypA}uZYqQ>u
z^TJO
zEWwd$@zA?j|B+OfL8#tf#}SMc%!)B8Q&O43R=FMKHu
z^Ynl~ner&|Zijzha4eJKOkf;#I~R<0EyOX1TpXdv!hZvU@DK$dJZ=}u-)N`mGeI(d
z1vNGCYW;}O0yt;OkK~e%zDE3zJBnk>7vNS
z^(kwm5p#;&|2%vv**mw8e)Z?GM+L?-cj;=VFY^iq{}W3x0b)tM;r2zGL#<&wHY>)4
z(Oj?;B=VLL2@rfW+sb;Z;s4`UM{S+yUYM1TH4pQuZb`Gl?Op?rSVjeE_Lx@zB}@1+
z{I=ki;wz%wmv1yQIBn=<3e7J8Z}CLIq&T+KVRd0Qp%xeNQ@`l%fL!tYN<4pwpn-#M
zC5s|lWC1ywvkN96h&uM)e;QiL3px1tV)m@b4xxVRjIj9!`urDFJ3M9NlUXFHbhO@s
z0Db0xe76v=;cAvN@M{$a#5UUwN8|ar!ULU5c905jWkFUJ*vAe59F5a0+J#+($NkRq
z)fM2w_FvxuqK{qH&kbF7wPJ?v~;=?#T4f-xFJO!|q
z#)JS`lCb>KLJp6|n*=lm!!=_ejvB*X;C3xByt^+q=_f=k8_$C~Kvtp=-7zAm8j!9g
z>87B0&Kk8XlF8p`?j48Hl-vJ6hW5y?$H}L*2tV)_{f(oyHTMn^rO#F8p2w94bDWkR
zzrr}4@EgV}IbZRD5$H(YuA`w%6Y>)=PW5xvDD5
zWD;X)v(s}j#lwd46
z0-&4>+GB!}ac2%TJ(9u3$@(Y^`c*gcoIofKH
zs(;T==qcdV+*ouWc&5i&>h&CV?sl}SM;%2R|mBz27{l$1RXIlF8
z06!ZSF`_5@Wpg9sY-u9`v624|lmZGWXJVrdA?#6H>E=A;s0$*ZxPoqP*J3?9(2kAT6Te#UVEp
zKhF0A>oJO>nbMP|Ys+C>vc{&9M|06f0TJC+bGATI?=lkm21h@>^+XbR5e7o^x4c!)
z3Mo0l`y%_jJxW7K&i{e#+uK5Xw}TAE?f@Hz2)!PRZ(B!)}G&7eE6O
z3sGN1BJ2?{Ps=#!nq}a&4=fCFP5eQ3uV+KFMBHi
zqnxsALoIVnKMtjYi>5(S41s>j=s{
zHm(}#%Kdwilfjt9r!QOG`+SdJA8%XBp+$*EPGl+eU^TGn@|^3!j$@j|f#%n4YR=I+
z1lj!mjk@=UYN~zX06|0$DT4GC6cLanAP7<-DqWfgp+`Zf(xrz)KzfOwfFe>vdI`Nl
z=)EXN?}T0xN(hkToBKbrn9ZCso0-KdvPsUpC&_)^=Y7hrII0n!Zt^u6(p3G!x7>xE
zZ{>rq+@bk3oGEf-91v4mi|1EhPjkMn?6f9nGCEL;D}Rv*AEDjICp&2mZ}lh*9Rn)%EMxXlTCcKH|KaGu
zGIc&vT%&Qe_}`!a4v&=5(u2e~>P{Uhz(w8*sRiRsXgeY-}xL9ujCIOUzBq$`aHp{4aa
z3;@&T>k%dALv2buR#|GfcY?1JS${n={w#W;ZuUQ8D(mFYd41$~Fr#a$Q=dzRsO2X^UM`50$*lW5Z%J63H4y6Y
zEN2YX@V^?&0FQuoHZnGF@7jnZ&+Ylpj
zv=CfqGp-JZO|3l7Y)%6U`wdsap?wf@>@&<^JlLKo;W15bW@ORZ#y@3OzjWlpHUXmr
z?X$@4hzCJqoI;pt1VPf;bUaf0_zfwriB%1kul{}Ry8wgD=v_tE%^EaQbQU8^iW^OnG%ubLISK76padxW2{b0-dUL|@iz!$3gIP0!
zZ{b=zt#8`j2|L;JH0|4|jhzmwVqQ$%;b5(Q{m@Ylpv`-<$OqBmLcYb5!5U
zJT&Z)*CYNM{Ndx9C)+1yDio(EOfi{}o>W)bDhN}nA|aJ&g2c8LIWm+M5Pbsb5*y&Z
z`)}Pe56nu+zK!@E$HkCi!OJ`Y35<9T+jBQgVOEU7W}P-
zQEb7e^M4eIrN)rS=4YilHg|CHSr$Uv{+h(?ZJ<9gz?oX#gz
zsu5Z_WjB=)V56H{)rNSV$!!69hO6m~*{rNCM^B*aThn+Hx*TI4+yx}r?(nmDx1B0a
zbyjA*5J9Qo9HQ572@RgB*A06&l2~MkwD}ZFD2ruD)l-C^XNpYti=GmXm_Ks|3OVMd
z(q-VuiKkt>rk@6GOvn^xBpe!ti0%CM2K?YUyN3J2vWh=hq$j6o81nP4vNb_COgo>gsTAKO}!h%zhWt=5RBf
z`(kO_W?HCk@|8R8c?Gl0t}>=i$M0*CuSs}Z!TI(adwsHE)9hnG9iITyy9$ek%uGow
z;FY>=5EoHsYP^el?H@yY8(k9SK$kmxFFBA+Mdm@<2Z5m6PPBIsZ>XP>Z6p)x^D?d5p(OqF$ho*Zx-KUw^Z08GPY;6%6Cko>HR`Waaj){V+
zH;RqM-vnZw<4gm-lld{qF@iZ9f9GAydiPJ89vYU1{Bo|{JknaT9aiT^_saBbK
zGH2X-46=57$7wz5_8=m|SBoBVM!h34fpxZPmgb5v`lUx|r0?%~sMMGR;7m=KM`D}*
zQAB|`TJOMgLfLS6zoxdCE`G6H`#UXa!hn3D2Uh`5JLkfatreg1uO9}pkukgeh&w$m
zJA=f%UY>wW;5_gt@C{fHLrp@V6keT(7Sr_dX0~fH`4ZB<+L~R_Fm^c5p0AZk!{sFS
zMhdi`1h;OffDTtZV~u%fLTyuO12E8LVX!ezP|JnMqdN8YTfn>vtBIEq@z&whIs?$ZT0
zVpOc6V=g{fq{a#B78hJwlC#t5=X(IFdE%K+KMYq|1+%OxP73ZSC>*KInqAE2i*&yc
zEi%a;8kL4u52P2UILS-EU#@Nj7$-zs#IU&QQc&^T=iY%yt&|#crO%TQ+&0Et-R)mx|X0ZkK(?rDI26u*_2=db>T_Yf0U-Fw=B3unA7|~
zCE-VRyxobAhHe4ez^kvuB>?Y<=sW*v-?^Q(r~|#`DbY9>Wyts}dKCEJw=Jf(kwv@j
zQ6gfC7jnUaXy^KHC`G=$yW&YP3pJUnBG~kSKckM0@sX?G6?vK6;cSxMGM&x9%htmq
zu--~tEP{&HceT}tRC{{u57;g<`l6+kwSA>&vVYCN{#tNr@~J|&ZJS^k!GTQY0+9SV
zRDdsqB}|o8beI!tR|ppA0GMwvNUk|K=Mz{rv_BJlGj?zcS()N^E=jKMI*+x;ag@
zN-x29RtEqdAC)DSkKm4-vFn|;&6#Fu1XG%?QFw@vQ@!4az9}rw+$&o^9vAa&>}ppJ
zY=*_?7*8hEC5sAkxQ>|Lt5Z{#KDxEOnjfzgRjYWV_QzK{-_kSoZ9O5$mt0f(xAg~CVmW&}xca27j=9W^
zkMeMPUw`~g-6ag%#H+`Bwsly2c2cOXK$S%S|2hmNKP?y8-t4{bChijN4iwkL=2#rA
z>P#Jj^XwPO
z9bFuMU*SGD8oNDO{;iz&3iKXleVV!Fx|--)zMDplbiFRXQJdk?L-kJ}g7=bP_mpkp
z-$?oP!|z2MugJ!j0DU%RAzOdBvxUgLpyjpC?W;^|R{Bfrq)ZoHtDxvbE4a_18F>hF
zrF!eB)%Hbe7KuJ-p`R|3Ku-t3E<^flWi1eF34O2ME+B^@=4tQlLt%EO-R5;Ei_g~8
zWGZ-x`GUv1kX`D!4rFNHo9L2e;oOqM;3=OJ&HpGuQr_PJm`ZG0jXryU#?c>?f|OKp
z9OWnf^nO(7u&>Vu=G?vN(<|ZW=ddRE9q9RSxOm=O1dX9isXdeB@l`tcq;Xc)v=b5-
z??YH9w74;dU|9HVb>*m~S`A)wTpSn3Mz0iygXT>=KB)8e_6d-6AXS1>#3W{ZdmJSZ
z$Y+8_%BE}zFzuWXuB1+~
zuJjM8M11{%Uo_%GFAJ|*N?Fys5&nLMZ2_$8{1`-xJdBYl_A
z8qi!QZa!YP6Q|A(H_h-{RQ{x$avm-PO%4Da1x
z3izL$%xaGI7D>T}8MkY`N|}5-M-iw|k0-(JnZhlcuRlC#X{Di{q)EQ$dx9DOsy6E~
zZK^z{hI3zER>}*x68K!>yCxJ~GW4i$cXOmL1Yjg~W%fbRoHJ`TyZm5If$&VX-^`Jk
zUy=IHV-(Jyoi#99PNc!W7uB|43RcR)i@JR^9O~8kG8Eq60w19eb#b!+6oOKkfQt~d
zrxYe?cF)^4ule<^(XzGweZnCrcVn)z@dA>b5TPM~%yX{A(hOvQj9&cucvogXIi@k&;NI?`N1>9h}dZ=Ub+=feOcbCcyZa7YMeSx#+d3{_U(_%^e#VYu6GvS2tY?}Vs)MaNF(&(k&w;6N9
z$F-M=#)9vc?SGhP)Crl%Eik{NGxStGG)gWxwEvjFiX%*XXjKa5mr(NAQ2SfU0$#p4
z_4ppB6S}u~Wg;H-gTb=dh(HF@3|eUOIYZ^)R`6FFjmeFx4UP`}b9eE@g?<^QCzTy}
zqmMt#TPK7neW%zreR<}*QeidSEY>D>e?C;kS+yXNTjHlkBWE$fnXNy#(9@LscN!wt
ze$KYXH74|D{{F00IujH(Tr6ZRoqJc{1x!_0L(>SIqd%_p3H4K
zDe!Ql$OsJyLfnB-j(r19h8TU;G>WOBnMXbSniTV==Veoaypl=NOK%eom#6bZWlX%L
zF)G@RB)(y|N~a@E=q0|~_8O-0iC?eK&c1R;Z~;2X^!}^0)*7Xp_&*o)
zC=S-I!r7!qL_0TPIfG1rLw8y~!+#_FQkHTrWV)Hih_owyk|b_g{xi=j&NormWcf8@
z-;bzti5Kw#k=dZ(m5<%Vv+pvM!e97<$&9q5@aEeq#ZnC07LrwVuK67)czo~PlWtZW
zW{&9KYY6TIfQmJRm!y9(CYM97K;UM{uU2DtO}^Xh*pk*VCWGL@vxZ%7zWO-q)J3A-
zFimf^Uyx9-$`aVY&c|-K`C66je%CU2y`S$6RC`HsX^md}fGYEB65EB2w*5yD7WW^8
z9ZQ2X$VxfzfA`*1sMN8Vz0OzgeLf_i*ohQheKKdh+8f2
zL?ewF;pNOsd;aV*J15W=dJ)0rFX=vhLJcN#Q6RX3mU;1P&p#H+6
z9XL4MUb1=Nua5tlkRbpeSNQ4}G~4P2;GPkTbMwmbjjr82Jz`(K(<~@#6ClrU<2|S>
zg7rf_NpC@u8?E~bOqLLuq>G9>tq@R+BRR?@N)}0Dk)s^MP7xh
zG^EA`Y>3fgr_&0pE#DNqHGVkP>ZY0uFs_GnjldSfiP}^9a7~kiKc-goEZmdsEhhJB
zjs?70^t}HeAFeQNoX{%HB{Y91+iZv4fO%t5nf>i_f+pPfeeOQZx%~7Wg^`&J_xO?x
zjv+I0Ut`<{SoFvzdo*h9Pt?YOyAVvm(bdtepP_9ly`t^p)1WCL>$Q#@
zFgxMZAWu{44_Owo4CU_L_1Q*)5Ks*?rr7~Q5?eX!I%dyzDjZ(xS@-xm|2o-|+}~aV
zt~=U!9N7h>^LKyF=*B(R-^MpskXeH(F+t_*tdvbCHH!8nd9Qghy?v
z4{*t2W&{__lnd`IEegc_ZI~z4VAUanW)`NMZ8iJ+$>8}5920>hjbw;BMRpm4F40v3
zr55bgSVpkUiF8YgR@he?RlR9U#$ru7d;~5@IP*kV5sWd6KU5A9{gb%ZH)Q0cR|2Tq
z^^*shg|mmSCdO!qgBB&O@l`854IJy3p$!7d)QA7bq9YHfl>rO5OewQrl~&!6b}ar7
zz3=1XpvTB6%uM4{EdWf?|B(+LAA?Da{WdRso52xc{IddmC`44~4mQ4gx|_+REQmLt
zgfWb*SN`3TSm~Z`K9nOm_#ga
zC6_Ai&LR~7lMj0QKy@(4D?+xf++KCyGTegaBJwNP)4<;5pLX9dr0WnJG8
zZk3_Dn9_qXdQ9LxIPP;Dd{|Y;VWyKQ=G;T3z~r!YtH`+1=xPR8?FL!P+8N@qpw{T6
zzJkrq_7Vdw;(c6i=dfl!P9s&Qm1a;tF+Npc0Ik7w(j8oQ`Gho
z3YDE#UXgKUAMPTqeT
zMN}-_Vu@EWt#yIeh1?=x107V_ify>D&?sdmZ&K=t^<&GK&&zu+y$SRPKu!!U3v&c?
zyY>xqYyMT&i%WeV%z!6c?dpALrfPymgL3^-mqG2i61P?&zMZAvb8EFF?Esr+5#d^e
zZ7VC;mG_>6*{X=@>;<#K?oGM!la-qF4Y(JfZrQy%lX6C=H>jooL);-`cT>_k1O3
z=}a%+S^pJqYE9nEALKd#vhC0g<{|%`T$BzoAQb0WEjgyf^^4Sh5~2PY5=!mw0Kko}
zjN(7lV!;um*TA!COn4)L~4Uu1Uv1g$?rL4eanwp@50AkRfsyGKKw^PwTY&N
zi33tJH##y{$JQJ8MrhUco3e6o`5kCp0^LboOTJt^M%eFLj^Je{vIA|X`e+Ay3-IR)
z99%hU8@{8Jb@N)<25nd>)w+tNRYGz&))}*kPB^g~r6f9zZDuIt-kS`HijhgSpT
zd{lvPpW@^bD?BL$`
zs`aQCMG?gr083caK@IHW$}k$@1!FGl@X-WPj)i@TWm5jeXr>JEhkFrY
z7o^+0`3f?E&fjfQj^Mz){_nwQfQZh26k;H|h*DF-mxVX1Vm=pT{7Q)-9GB+{Q1-FT
ze^y2wn(XBD%^}%xC4`2jCqvK1m6kr?=K68EPd|Av%)e$LP$OVVSe;R}-9_LO}ga~`Vko7ZvytDt1u?
zlDa2|lj^j&LhwfI*a%p=>jrHG>vQHL%Mawx-7$Hp`kpF59d6QBjDMm9Lb8#r14&sI
zha-Sl27UTK=(9JtKC4u)!&W9tr?q$8lxizj?EGASla)W2JWJyD|H1POo4~<_^!r+d
z__=s@*=hN>!7!hXZ4^N;#@cn-C}ZORc8=VPWn%|WJ>yJRoHeUlDbPnG?!Kn99R}){yUBGgX&LrBD@vLs|
zz#0@$?;-9`_=t_@kmldsrEuYvAj`6rR&hzTZd26f0;EXBDz?$sdSx2}8TH!-eOdHp
zZTM|)D0^zoYX-su7D4dL5KBT#ok`Xq8QP#De}3M;-XSyvdHpyz6>^
z?gMIa_gUMGpwU^&Rh90kdj=dX`59jNIbMjL{vUE!mOX8HaVHM=Pq<^oOzf9k#D&!<
zrkAX0@`Og1Q$>uNOi)AZl_*1wq<}l|)hEHe?Nl7g!(_9ZGlZx1A2P5klu1dZ-@V`3
z(oYkb<)9jt*oXSNbOpvhaK&DX+(VC-`V7BRwZU&&XxYJW9bxMtEsq{-Vo~1_@5FTN
zFnUiN26Zz)jVU^lE@BAGe>lfQ-
z)j1U{Y>yx+Va3#Nw0*Q|@3P>H-l;Upc!QT;zh)emdrwJrf!fk8GM6EH$rx~4S^$o(
zyJM^IMT1rumvaAGv8lI+`8}S0Ae|F~XmojMw6fAK)w<}I3ZZgMm2V+p_XhD*%oxGj9M$2;UH0Nc|Ahsk%YqpB~Lr3h;tkawap5pF?cWdBZIA_wCW$
zHMnQWoBOL3hVAVx5-LmywiW+`K90J(D0TQter~!KbY-B)n=m0RYy&2lkhc5{RyISF
zsaN`7JS%+nCI@xvo<~c#uA1m8^e-JH1=V3s_o3lvtm)|VXd9dZtBk1j>y%LCyYFB2
za@?#c{HbL>Uk_wd9r{(36ifbd7mwOdrMR$Ot}2;Tw0;k;E`7sV4v#7;F6x;5rJ~wi
z^UJ+@SvgIlx@=|?+XjCGS1xsdkX6IM+3zdHTu1s3-ClhQu_=nF7m5f#rArEG%AlSJ
zyxTlDbV{F`dSh<(r_jbSQXx_SI$R1DA`sQ3@1X9RY^c;$w5R(AB<8u37QL
zGS4Jc?j`zxH?&bv*e_iraHp6QCiZ_huH34E9blF!`f(0%ewnfOYR7Z#7!lN#NWM!o
z;;FilHDEvZ;*C11A?5%1ihCM-AVTSwclhMVuoCeScw2Hj4in=R(0h_a{VOpF8LbLM
zu?@NK%U~0tzE8?oianvdI!;hm0Mn;H)qel?em2EY;C
z`)?WYeFt4@(80qwcc~&(HvNIQOVF6mgiw0|GFxL5=|hFd8O(jr(W=Py4}3|kn!k@)
zi*gzDLCir{>DTyJItGT$Z0XO5JZ>UXC(}VjbAHam@CmdtC@yt8+IB*(=%%)%V@@1s
zNF`vh3a7NHQKej5)lOdR?8n|W$E@s(mU~H=ULrRZoW3iP)&Ja
zlYMDPWU3mNwYFM(@E=8n?_u4sYjWETxVA3z&Q+&0iM^r6?4sNt0^_R7v3y2i|Lq&x
zNd^>@Q`=8Ko-Ux;&6v&4y(B?M=8{1TthU3q%v
zH}l&nHZ9E4u8$&A>k<%5WH<0nKHlwm9Gdtah4RJ`%{u$Qv8%4hgj_7L#`#QUK?f1z
z;NU(Sjaop=J_3}dfy^S2JeYY!E^`*tBhSw>ae}q4
zJf>8wW@fK++_7Ts_li-cKs+XMjFr})Z<;$!L2jC>`zagB4Y$v+-qF0ip@g8zQu>Cc
zm^v}Q9g=BXu=(wo)b4<%L`#j62SVN*rT~)AcXD53d%O{tHtSF|MAN@H4(UqQ@
z+p~>_a~SgV8<*VMOXV(z_9uvCU^8UY`W`yHi&rhL#6mix!c0@rT+!j|Jqx9B(|5|XbA$T1r5+6?f_-ju
z4OXfLuNvk$Qk?_!+~sZZSY{1~cvlz?!`kO#sIfnz|H*mgb_rkdk|W62Iba*Vq1AG&|M3i&SFEn`4{5
zFJCkd`_4QlifV491b&`!D+d`+_^U&Ap$TGs?b_lfgaap;a|dw~##YQw>h4sq>kR($
zOKF6tEzde0w`|1vseHl_&N`>S+`*=S({mYm~PkwCT_rYYb?~E+d2c+x*sbhc=
z$+LH?-l$jS1k68ISW)wx@i|!JPIED!c{7E`X?3ia7YZ-B05o_uW-_=Z)|?L9E$m*{
zl@RGHNKG?>AuEabd^2;Vn8tILYcVQmf;$CUs*|lXc5uZ}o%J33$tl~=*7Hrg{e=x|
z!5L9=!M!o@hyPbD%k5=mBXQ;TQuZGz7P!Cc`wahHgz9|Rw{OS|k5$_n$(C$2GM(IW
zW$V9NG+3YJ3*2YWlA4F63jE&~nU`YpN?yVv$kmu=Rc!IZWo4);FMuq~0&IWt^m}}S
z-ennoc3TfLcAET;Vh1P-cf*D3(u^DAR348%8D+aucSR^_M`f$jiW&Cg1GrtzvQ>
zi7`uoo2b(MKq#nx(E`E3PYy9)pu_B9t5S~Z4u)QW;C)Lh==W%(g?pPpWRKEylwiv0
z2Klwc4NFH_mIe{c;Spvpnc#5PFukQ#n({kF4HrBUK*zaM^Jl^blIp;fWP$R^a$H8T
zWJKLqV^OE)=K1H%yA)OIa~ady&{V?TANC6LDJ@oYgcz2p+L2k1Zij*n8}1b*YWx*^
z+6K|y0L(o3P@Q$yjObN>umu1&49oYet@
zS8Dz~X8kVzDe(!Sy$!@pWWtHzSI3mamK$E~7+m$Y=Mywcd&G!#bh2<46C?+S_F|x&
z(C1)wK$T~BcjcI??&`w%mvb``rpM;yfJcMRV*s=@>y%A!c|eW!JM-**;=er66=84a
zdo#<2{B`S(%z+@)R1G43gh9yr+_bj)q4DjG@#g%KQZ?o;>k%^2OEG@aWgq(u)-A?D
z?O$!@y}LT=EqtZqp%O*6)g|w|o6Y^b&^t7B&Zsx2&qYs1`nry)0P}BJ5P^p6H!SYwF+@EF?<*^E*Wk=^71r^?ppzIk48pHX*_W^kf+!>
zn~l}3JkAcg(cmcL`b*Bb*W+c8+PhJ}B{=6swQK5AB}jHeU#ZcVHJ6t?km>5U<6b
zwM@9OsScZaVH#70^L_meUMMFqO~GDqZGjG5-Y{xDPGSkI*jasdK*=N8CNJ_tqB-8oKYiPx=q-c0iNsjz9KkCEU&(RZr;Sl;3(f*Xkg{#7YB3IqJT}^p{
z-P`b`4y%fw`yc0ekA@f@4Bol(vH!EB00dWeX$${na~C)n9QVEPA91YLM#lDAhksif
zxGy*jXn)!R`~2=XdO*9S4u=sXNx4$W!q`z0g1L8*)y($AtVC;$3p=d1*fC)X14De6
zL=z=Zlnz%`YEjpZPHY~avHW4PXnQO;%*!hBVc}2P``wB=U$)SwKl{V7G0C=tCn`@l
z*QLWfuE{PIFOV|E7A=dXj`lH6$IxfKqc7vnct7d5p$Foi)Q~2?A;mJ
z31|EHbwo3LNIG$v9eg?%$GH!n?qgcVTvZ#*@K?kz%*<@9di5zw;yNjF~k!l%6t#
zU|(;4;C!rke8pe1BVj4t$n6}pLO&&+A;x_FcxOP?@?e4|^VB4ub>9fsoe-P1xkHBn
zvODrG4cLU(vRG$$eC~@fSCUP{f_GdEBn$D9w`zMfL*;I-Aa0ovVxCjKFOxXXESvrz
zj4VJ+y3YdWuiUdf8555yl&~sdT?T)A$H-o}l_n<~FX+`0>r7uwQ!@
zP<{l+r3OrgpvE;$lvVg)0IRal*KZ4=76iO6pDo8--iXXmW|W2>Sl6i{6xvOTE!Ya(
zZvg-^sDJ>fXbj*H<>uR#^E
z8lU1Kw7-7In1_5OEa7_zXpH3#r<@d(dt!dt%0CPm*J%Yk1$eoSP;pD#&BnMKi^@r5
zBVME6m`Bl9p$qR3;lUYF@m}fotjhhL|1(y;Ft?HM&y6aYoCNETANXT1_wup38iWum
z2=Y^WL8~cdQOL_IH1Jhptvw_9SaGyqwiNSAxu^1534
z={8iJC`G{Ggjb$0`e|dzDBqIG8Xn-r
zys1y_h$5@c>OU$xm_CliM#DU!(DZn!RG+c