diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 09b767ed..6932b9e8 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -5,6 +5,7 @@ on: branches: - feature/* - feat/* + - chore/* - bugfix/* - hotfix/* - fix/* diff --git a/dapr_agents/types/agent.py b/dapr_agents/types/agent.py index ce645484..9b096d1c 100644 --- a/dapr_agents/types/agent.py +++ b/dapr_agents/types/agent.py @@ -70,13 +70,15 @@ class AgentActorState(BaseModel): """Represents the state of an agent, tracking message history, task history, and overall status.""" messages: Optional[List[AgentActorMessage]] = Field( - default_factory=list, description="History of messages exchanged by the agent" + default_factory=list[AgentActorMessage], + description="History of messages exchanged by the agent", ) message_count: int = Field( 0, description="Total number of messages exchanged by the agent" ) task_history: Optional[List[AgentTaskEntry]] = Field( - default_factory=list, description="History of tasks the agent has performed" + default_factory=list[AgentTaskEntry], + description="History of tasks the agent has performed", ) overall_status: AgentStatus = Field( AgentStatus.IDLE, description="Current operational status of the agent" diff --git a/dapr_agents/types/llm.py b/dapr_agents/types/llm.py index f32731ee..1b6a35d2 100644 --- a/dapr_agents/types/llm.py +++ b/dapr_agents/types/llm.py @@ -1,4 +1,4 @@ -from typing import List, Union, Optional, Dict, Any, Literal, IO, Tuple +from typing import List, Union, Optional, Dict, Any, Literal, IO, Tuple, cast from pydantic import BaseModel, Field, model_validator, field_validator, ConfigDict from pydantic_core import PydanticUseDefault from pathlib import Path @@ -143,7 +143,7 @@ class OpenAIModelConfig(OpenAIClientConfig): type: Literal["openai"] = Field( "openai", description="Type of the model, must always be 'openai'" ) - name: str = Field(default=None, description="Name of the OpenAI model") + name: str = Field(default="", description="Name of the OpenAI model") class AzureOpenAIModelConfig(AzureOpenAIClientConfig): @@ -157,7 +157,7 @@ class HFHubModelConfig(HFInferenceClientConfig): "huggingface", description="Type of the model, must always be 'huggingface'" ) name: str = Field( - default=None, description="Name of the model available through Hugging Face" + default="", description="Name of the model available through Hugging Face" ) @@ -166,7 +166,7 @@ class NVIDIAModelConfig(NVIDIAClientConfig): "nvidia", description="Type of the model, must always be 'nvidia'" ) name: str = Field( - default=None, description="Name of the model available through NVIDIA" + default="", description="Name of the model available through NVIDIA" ) @@ -412,6 +412,14 @@ def sync_model_name(cls, values: dict): elif configuration.get("type") == "nvidia": configuration = NVIDIAModelConfig(**configuration) + configuration = cast( + OpenAIModelConfig + | AzureOpenAIModelConfig + | HFHubModelConfig + | NVIDIAModelConfig, + configuration, + ) + # Ensure 'parameters' is properly validated as a model, not a dict if isinstance(parameters, dict): if configuration and isinstance(configuration, OpenAIModelConfig): @@ -423,12 +431,27 @@ def sync_model_name(cls, values: dict): elif configuration and isinstance(configuration, NVIDIAModelConfig): parameters = NVIDIAChatCompletionParams(**parameters) + parameters = cast( + OpenAIChatCompletionParams + | HFHubChatCompletionParams + | NVIDIAChatCompletionParams, + parameters, + ) + if configuration and parameters: # Check if 'name' or 'azure_deployment' is explicitly set if "name" in configuration.model_fields_set: - parameters.model = configuration.name + parameters.model = ( + configuration.name + if not isinstance(configuration, AzureOpenAIModelConfig) + else None + ) elif "azure_deployment" in configuration.model_fields_set: - parameters.model = configuration.azure_deployment + parameters.model = ( + configuration.azure_deployment + if isinstance(configuration, AzureOpenAIModelConfig) + else None + ) values["configuration"] = configuration values["parameters"] = parameters @@ -550,7 +573,7 @@ def validate_file( elif isinstance(value, BufferedReader) or ( hasattr(value, "read") and callable(value.read) ): - if value.closed: + if hasattr(value, "closed") and value.closed: raise ValueError("File-like object must remain open during request.") return value elif isinstance(value, tuple): @@ -614,7 +637,7 @@ def validate_file( elif isinstance(value, BufferedReader) or ( hasattr(value, "read") and callable(value.read) ): - if value.closed: # Reopen if closed + if hasattr(value, "closed") and value.closed: # Reopen if closed raise ValueError("File-like object must remain open during request.") return value elif isinstance(value, tuple): diff --git a/dapr_agents/types/message.py b/dapr_agents/types/message.py index ce6b9b9e..1f9f89de 100644 --- a/dapr_agents/types/message.py +++ b/dapr_agents/types/message.py @@ -1,3 +1,4 @@ +from typing import Any from pydantic import ( BaseModel, field_validator, @@ -183,7 +184,7 @@ class ChatCompletion(BaseModel): object: Optional[str] = None usage: dict - def get_message(self) -> Optional[str]: + def get_message(self) -> Optional[Dict[str, Any]]: """ Retrieve the main message content from the first choice. """ diff --git a/mypy.ini b/mypy.ini index 4e4aa72b..a0759d2a 100644 --- a/mypy.ini +++ b/mypy.ini @@ -40,8 +40,5 @@ ignore_errors = True [mypy-dapr_agents.tool.*] ignore_errors = True -[mypy-dapr_agents.types.*] -ignore_errors = True - [mypy-dapr_agents.workflow.*] ignore_errors = True \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 6ee1dc62..d1dd4fb4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,4 +14,5 @@ pyyaml==6.0.2 rich==13.9.4 huggingface_hub==0.30.2 numpy==2.2.2 -mcp==1.6.0 \ No newline at end of file +mcp==1.6.0 +dotenv==0.9.9 \ No newline at end of file