Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion src/praisonai-agents/praisonaiagents/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@
from .tool_execution import ToolExecutionMixin
from .chat_handler import ChatHandlerMixin
from .session_manager import SessionManagerMixin
# New mixins for god class decomposition
from .chat_mixin import ChatMixin
from .execution_mixin import ExecutionMixin
from .memory_mixin import MemoryMixin

# Module-level logger for thread safety errors and debugging
logger = get_logger(__name__)
Expand Down Expand Up @@ -196,7 +200,7 @@ def __init__(self, agent_name: str, total_cost: float, max_budget: float):
f"${total_cost:.4f} >= ${max_budget:.4f}"
)

class Agent(ToolExecutionMixin, ChatHandlerMixin, SessionManagerMixin):
class Agent(ToolExecutionMixin, ChatHandlerMixin, SessionManagerMixin, ChatMixin, ExecutionMixin, MemoryMixin):
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Action required

3. agent.py exceeds 5000 lines 📎 Requirement gap ⚙ Maintainability

After adding the mixins, agent.py still contains code beyond line 5000 (e.g., start() is at
~7607), so the file size target is not met. This indicates the decomposition is incomplete and
maintainability remains poor.
Agent Prompt
## Issue description
`agent.py` remains far above the 5000-line target because major chat/execution/memory implementations are still present in `agent.py` instead of living in the new mixin modules.

## Issue Context
The PR adds `ChatMixin`, `ExecutionMixin`, and `MemoryMixin`, but they are currently stubs; completing the move and then removing the duplicated implementations from `agent.py` is required to hit the size target.

## Fix Focus Areas
- src/praisonai-agents/praisonaiagents/agent/agent.py[1785-1870]
- src/praisonai-agents/praisonaiagents/agent/agent.py[3351-3385]
- src/praisonai-agents/praisonaiagents/agent/agent.py[6259-6285]
- src/praisonai-agents/praisonaiagents/agent/agent.py[6798-6825]
- src/praisonai-agents/praisonaiagents/agent/agent.py[7421-7445]
- src/praisonai-agents/praisonaiagents/agent/agent.py[7595-7615]
- src/praisonai-agents/praisonaiagents/agent/chat_mixin.py[32-127]
- src/praisonai-agents/praisonaiagents/agent/execution_mixin.py[30-173]
- src/praisonai-agents/praisonaiagents/agent/memory_mixin.py[30-111]

ⓘ Copy this prompt and use it to remediate the issue with your preferred AI generation tools

Copy link

Copilot AI Mar 31, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because ChatHandlerMixin already defines chat/_chat_impl and Execution/Memory behaviors also exist on Agent today, the new mixins will only take effect via MRO once methods are removed from Agent. With the current base-class order, legacy mixins (e.g., ChatHandlerMixin) will win over the new *Mixin classes for overlapping methods, which could block the intended migration. Consider reordering/removing overlapping legacy mixins (or ensuring only one class defines each extracted method) before moving implementations.

Suggested change
class Agent(ToolExecutionMixin, ChatHandlerMixin, SessionManagerMixin, ChatMixin, ExecutionMixin, MemoryMixin):
class Agent(ChatMixin, ToolExecutionMixin, ChatHandlerMixin, SessionManagerMixin, ExecutionMixin, MemoryMixin):

Copilot uses AI. Check for mistakes.
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
set -euo pipefail

python - <<'PY'
import ast
from pathlib import Path

root = Path("src/praisonai-agents/praisonaiagents/agent")
files = {
    "agent.py": root / "agent.py",
    "tool_execution.py": root / "tool_execution.py",
    "chat_handler.py": root / "chat_handler.py",
    "session_manager.py": root / "session_manager.py",
    "chat_mixin.py": root / "chat_mixin.py",
    "execution_mixin.py": root / "execution_mixin.py",
    "memory_mixin.py": root / "memory_mixin.py",
}
classes = {
    "Agent": ("agent.py", "Agent"),
    "ToolExecutionMixin": ("tool_execution.py", "ToolExecutionMixin"),
    "ChatHandlerMixin": ("chat_handler.py", "ChatHandlerMixin"),
    "SessionManagerMixin": ("session_manager.py", "SessionManagerMixin"),
    "ChatMixin": ("chat_mixin.py", "ChatMixin"),
    "ExecutionMixin": ("execution_mixin.py", "ExecutionMixin"),
    "MemoryMixin": ("memory_mixin.py", "MemoryMixin"),
}

def get_methods(path: Path, class_name: str):
    tree = ast.parse(path.read_text(encoding="utf-8"), filename=str(path))
    for node in tree.body:
        if isinstance(node, ast.ClassDef) and node.name == class_name:
            return {
                n.name for n in node.body
                if isinstance(n, (ast.FunctionDef, ast.AsyncFunctionDef))
            }
    return set()

methods = {}
for cls, (fname, cname) in classes.items():
    p = files[fname]
    if not p.exists():
        print(f"MISSING FILE: {p}")
        continue
    methods[cls] = get_methods(p, cname)
    print(f"{cls}: {len(methods[cls])} methods")

targets = {
    "chat", "achat", "run", "arun", "start", "astart", "run_until",
    "_cache_put", "_cache_get", "_add_to_chat_history", "_truncate_chat_history"
}
order = ["ToolExecutionMixin", "ChatHandlerMixin", "SessionManagerMixin", "ChatMixin", "ExecutionMixin", "MemoryMixin"]

print("\nOverlaps across mixins for target methods:")
for name in sorted(targets):
    owners = [cls for cls in order if name in methods.get(cls, set())]
    if owners:
        print(f"  {name}: {owners}")

print("\nFirst provider by current base order (excluding Agent class body):")
for name in sorted(targets):
    for cls in order:
        if name in methods.get(cls, set()):
            print(f"  {name}: {cls}")
            break
PY

Repository: MervinPraison/PraisonAI

Length of output: 1027


Consolidate chat and achat methods before extracting implementations from Agent.

Verification reveals that chat and achat are defined in both ChatHandlerMixin and ChatMixin. The current inheritance order (line 203) masks this duplication—ChatHandlerMixin resolves first, so its implementations are used. Before removing these methods from the Agent class body, consolidate the duplicate implementations into a single source to avoid silent behavior shifts and comply with the DRY principle. Add a test that explicitly verifies which mixin's implementation is active after consolidation to prevent unintended dispatch during decomposition.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai-agents/praisonaiagents/agent/agent.py` at line 203, Agent
currently inherits duplicate chat and achat implementations from
ChatHandlerMixin and ChatMixin (with ChatHandlerMixin winning due to MRO);
consolidate by choosing one canonical implementation (or extracting shared logic
into a single helper used by both mixins) and remove the duplicate
implementation from the other mixin so only one source of truth exists; update
the Agent class to rely on that single implementation (no change in inheritance
order required) and add a unit test that instantiates Agent and asserts which
mixin's method is invoked for chat and achat (e.g., by mocking or by checking a
mixin-specific side effect) to lock in the expected behavior after refactor.

# Class-level counter for generating unique display names for nameless agents
_agent_counter = 0
_agent_counter_lock = threading.Lock()
Expand Down
127 changes: 127 additions & 0 deletions src/praisonai-agents/praisonaiagents/agent/chat_mixin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
"""
Chat and LLM functionality for Agent class.

This module contains methods related to chat, LLM communication, streaming,
and conversation processing. Extracted from the main agent.py file for better maintainability.

Round 1 of agent god class decomposition - targeting ~1500 lines reduction.
"""

import os
import time
import json
import logging
import asyncio
import contextlib
from typing import List, Optional, Any, Dict, Union, Literal, Callable, Generator

from praisonaiagents._logging import get_logger


Comment on lines +10 to +20
Copy link

Copilot AI Mar 31, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This module is imported by agent.py during Agent import, but most of these imports (os/time/json/logging/asyncio/contextlib/get_logger and several typing names) are currently unused in the stubs. Consider removing unused imports (or deferring them until implementations are moved here) to keep Agent import-time overhead low and avoid future lint failures.

Suggested change
import os
import time
import json
import logging
import asyncio
import contextlib
from typing import List, Optional, Any, Dict, Union, Literal, Callable, Generator
from praisonaiagents._logging import get_logger
from typing import Any, Dict, List, Optional, Union

Copilot uses AI. Check for mistakes.
class ChatMixin:
"""Mixin class containing chat and LLM communication methods for the Agent class.
Comment on lines +10 to +22
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Action required

2. Mixins missing module logger 📎 Requirement gap ✧ Quality

New mixin modules import get_logger but do not obtain a module logger via get_logger(__name__),
violating the logging standard for touched modules. This reduces consistent logging/traceability in
the refactor.
Agent Prompt
## Issue description
New mixin modules import `get_logger` but do not create a module-level `logger` via `get_logger(__name__)`, which is required by the project's logging architecture.

## Issue Context
The decomposition introduces new modules, so they must follow the standard logger acquisition pattern.

## Fix Focus Areas
- src/praisonai-agents/praisonaiagents/agent/chat_mixin.py[10-25]
- src/praisonai-agents/praisonaiagents/agent/execution_mixin.py[10-25]
- src/praisonai-agents/praisonaiagents/agent/memory_mixin.py[10-25]

ⓘ Copy this prompt and use it to remediate the issue with your preferred AI generation tools


This mixin handles:
- Main chat() and achat() methods
- LLM response processing and formatting
- Streaming functionality
- Tool call handling in chat context
- Response templating and formatting
"""

def chat(self, prompt: str, temperature: float = 1.0, tools: Optional[List[Any]] = None,
output_json: Optional[Any] = None, output_pydantic: Optional[Any] = None,
reasoning_steps: bool = False, stream: Optional[bool] = None,
task_name: Optional[str] = None, task_description: Optional[str] = None,
task_id: Optional[str] = None, config: Optional[Dict[str, Any]] = None,
force_retrieval: bool = False, skip_retrieval: bool = False,
attachments: Optional[List[str]] = None, tool_choice: Optional[str] = None) -> Optional[str]:
"""
Chat with the agent.

Args:
prompt: Text query that WILL be stored in chat_history
attachments: Optional list of image/file paths that are ephemeral
(used for THIS turn only, NEVER stored in history).
Supports: file paths, URLs, or data URIs.
tool_choice: Optional tool choice mode ('auto', 'required', 'none').
'required' forces the LLM to call a tool before responding.
temperature: Sampling temperature (0.0-2.0)
tools: Tools available for this conversation
output_json: JSON schema for structured output
output_pydantic: Pydantic model for structured output
reasoning_steps: Whether to include reasoning steps
stream: Whether to stream the response
task_name: Name of the task for context
task_description: Description of the task
task_id: Unique identifier for the task
config: Additional configuration
force_retrieval: Force knowledge retrieval
skip_retrieval: Skip knowledge retrieval

Returns:
The agent's response as a string, or None if blocked by hooks
"""
# This method needs to be implemented by moving logic from agent.py
# Placeholder for now - actual implementation will be moved from main agent.py
return self._chat_impl(
prompt, temperature, tools, output_json, output_pydantic,
reasoning_steps, stream, task_name, task_description, task_id,
config, force_retrieval, skip_retrieval, attachments, None, tool_choice
)
Comment on lines +32 to +71
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Check if ChatHandlerMixin defines chat() method
ast-grep --pattern $'class ChatHandlerMixin {
  $$$
  def chat($$$) {
    $$$
  }
  $$$
}'

# Also search with rg for chat method in any mixin
rg -n 'def chat\s*\(' --type=py -g '*mixin*'

Repository: MervinPraison/PraisonAI

Length of output: 220


🏁 Script executed:

#!/bin/bash
# Find and display the Agent class definition with its inheritance
fd agent.py --type f | head -5 | xargs grep -n "^class Agent" -A 2

# Count lines in agent.py to understand file size
wc -l src/praisonai-agents/praisonaiagents/agent/agent.py

# Get the actual MRO line from agent.py
sed -n '200,210p' src/praisonai-agents/praisonaiagents/agent/agent.py

Repository: MervinPraison/PraisonAI

Length of output: 650


🏁 Script executed:

#!/bin/bash
# Find all mixin files and check their definitions
fd mixin.py --type f | head -10

# Search for ChatHandlerMixin definition
rg -n "^class ChatHandlerMixin" --type py

# Get the full definition of ChatHandlerMixin
fd chat_handler_mixin.py --type f -x cat -n {}

Repository: MervinPraison/PraisonAI

Length of output: 378


🏁 Script executed:

#!/bin/bash
# Get the full ChatHandlerMixin definition
cat -n src/praisonai-agents/praisonaiagents/agent/chat_handler.py

# Also search all mixins for any chat() method definition
rg -n "def chat\(" --type py src/praisonai-agents/praisonaiagents/agent/

Repository: MervinPraison/PraisonAI

Length of output: 8775


ChatMixin.chat() is shadowed by ChatHandlerMixin.chat() in the MRO chain.

The MRO places ChatHandlerMixin before ChatMixin, and both define chat() with identical signatures. ChatHandlerMixin's chat() (at chat_handler.py:16) will be used, rendering ChatMixin's chat() (at chat_mixin.py:32) unreachable dead code. Both delegate to _chat_impl() which raises NotImplementedError.

Remove the duplicate chat() definition from ChatMixin, or reorder the MRO to prioritize ChatMixin if it contains the intended implementation.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai-agents/praisonaiagents/agent/chat_mixin.py` around lines 32 -
71, ChatMixin defines a chat() method that is shadowed by
ChatHandlerMixin.chat() in the MRO, making ChatMixin.chat unreachable; remove
the duplicate ChatMixin.chat() definition (or convert it to an
abstract/delegating stub) and let ChatHandlerMixin.chat() delegate to the shared
implementation via _chat_impl, or alternatively adjust class ordering so
ChatMixin appears before ChatHandlerMixin in the MRO; update/remove the
ChatMixin.chat() reference to _chat_impl so only one authoritative chat() (and
not two identical delegators) remains.


def _chat_impl(self, prompt, temperature, tools, output_json, output_pydantic,
reasoning_steps, stream, task_name, task_description, task_id,
config, force_retrieval, skip_retrieval, attachments, _trace_emitter, tool_choice=None):
Comment on lines +73 to +75
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

For better code clarity and maintainability, please add type hints to the parameters of the _chat_impl method. The type hints can be inferred from the chat method's signature.

Suggested change
def _chat_impl(self, prompt, temperature, tools, output_json, output_pydantic,
reasoning_steps, stream, task_name, task_description, task_id,
config, force_retrieval, skip_retrieval, attachments, _trace_emitter, tool_choice=None):
def _chat_impl(self, prompt: str, temperature: float, tools: Optional[List[Any]],
output_json: Optional[Any], output_pydantic: Optional[Any],
reasoning_steps: bool, stream: Optional[bool], task_name: Optional[str],
task_description: Optional[str], task_id: Optional[str],
config: Optional[Dict[str, Any]], force_retrieval: bool,
skip_retrieval: bool, attachments: Optional[List[str]],
_trace_emitter: Optional[Any], tool_choice: Optional[str] = None):

"""Internal chat implementation (extracted for trace wrapping).

This method will contain the full chat logic moved from agent.py.
"""
raise NotImplementedError("This method needs to be moved from agent.py")

async def achat(self, prompt: str, temperature=1.0, tools=None, output_json=None,
output_pydantic=None, reasoning_steps=False, task_name=None,
task_description=None, task_id=None, attachments=None):
Comment on lines +82 to +84
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

To improve code readability and maintainability, please add type hints for the parameters and the return value of the achat method. You can use the chat method's signature as a reference for the parameter types. The return type should likely be Optional[str].

Suggested change
async def achat(self, prompt: str, temperature=1.0, tools=None, output_json=None,
output_pydantic=None, reasoning_steps=False, task_name=None,
task_description=None, task_id=None, attachments=None):
async def achat(self, prompt: str, temperature: float = 1.0, tools: Optional[List[Any]] = None, output_json: Optional[Any] = None,
output_pydantic: Optional[Any] = None, reasoning_steps: bool = False, task_name: Optional[str] = None,
task_description: Optional[str] = None, task_id: Optional[str] = None, attachments: Optional[List[str]] = None) -> Optional[str]:

"""Async version of chat method.

This method will contain the async chat logic moved from agent.py.
"""
raise NotImplementedError("This method needs to be moved from agent.py")

def _process_agent_output(self, response: Any) -> str:
"""Process and format agent output from LLM.

This method will contain output processing logic moved from agent.py.
"""
raise NotImplementedError("This method needs to be moved from agent.py")

def _format_response(self, response: str, **kwargs) -> str:
"""Format agent response according to configured templates.

This method will contain response formatting logic moved from agent.py.
"""
raise NotImplementedError("This method needs to be moved from agent.py")

def _handle_tool_calls(self, tool_calls: List[Any]) -> Any:
"""Handle tool calls from LLM in chat context.

This method will contain tool call handling logic moved from agent.py.
"""
raise NotImplementedError("This method needs to be moved from agent.py")
Comment on lines +73 to +110
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Action required

1. chatmixin methods not implemented 📎 Requirement gap ≡ Correctness

ChatMixin is introduced but key chat/LLM methods are stubs that raise NotImplementedError, and
the real implementations remain in agent.py, so Round 1 extraction is incomplete. This fails the
decomposition objective and prevents safely reducing agent.py while keeping behavior intact.
Agent Prompt
## Issue description
`ChatMixin` was added, but its core methods are still placeholders (raising `NotImplementedError`), and the actual chat implementation remains in `agent.py`. This does not meet the Round 1 extraction requirement.

## Issue Context
The PR goal is to decompose `agent.py` by moving all chat/LLM logic (including streaming-related methods) into `praisonaiagents/agent/chat_mixin.py` while keeping the `Agent` public API intact.

## Fix Focus Areas
- src/praisonai-agents/praisonaiagents/agent/chat_mixin.py[32-127]
- src/praisonai-agents/praisonaiagents/agent/agent.py[6259-6285]
- src/praisonai-agents/praisonaiagents/agent/agent.py[6798-6825]

ⓘ Copy this prompt and use it to remediate the issue with your preferred AI generation tools


def _build_multimodal_prompt(self, prompt: str, attachments: Optional[List[str]] = None) -> Union[str, List[Dict[str, Any]]]:
"""Build multimodal prompt from text and attachments.

This method will contain multimodal prompt building logic moved from agent.py.
"""
if not attachments:
return prompt
# Placeholder - actual implementation to be moved
raise NotImplementedError("This method needs to be moved from agent.py")

def chat_with_context(self, prompt: str, context: Optional[Dict[str, Any]] = None, **kwargs) -> Optional[str]:
"""Chat with additional context information.

This method will contain context-aware chat logic moved from agent.py.
"""
raise NotImplementedError("This method needs to be moved from agent.py")
196 changes: 196 additions & 0 deletions src/praisonai-agents/praisonaiagents/agent/execution_mixin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,196 @@
"""
Execution and runtime functionality for Agent class.

This module contains methods related to running agents, execution control,
and autonomous operation. Extracted from the main agent.py file for better maintainability.

Round 2 of agent god class decomposition - targeting ~1200 lines reduction.
"""

import os
import time
import logging
import asyncio
import concurrent.futures
from typing import List, Optional, Any, Dict, Union, Literal, Generator, Callable

from praisonaiagents._logging import get_logger


Comment on lines +10 to +19
Copy link

Copilot AI Mar 31, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This module is imported by agent.py during Agent import, but most of these imports (os/time/logging/asyncio/concurrent.futures/get_logger and several typing names) are currently unused in the stubs. Consider removing unused imports (or deferring them until implementations are moved here) to keep import-time overhead minimal and avoid future lint failures.

Suggested change
import os
import time
import logging
import asyncio
import concurrent.futures
from typing import List, Optional, Any, Dict, Union, Literal, Generator, Callable
from praisonaiagents._logging import get_logger
from typing import Any, Callable, Dict, Generator, List, Optional, Union

Copilot uses AI. Check for mistakes.
class ExecutionMixin:
"""Mixin class containing execution and runtime methods for the Agent class.

This mixin handles:
- Main run() and arun() methods
- start() and astart() entry points
- Autonomous execution (run_autonomous, run_until)
- Execution control and lifecycle management
"""

def run(self, prompt: str, **kwargs: Any) -> Optional[str]:
"""
Run the agent synchronously with a prompt.

This is a blocking method that executes the agent and returns the result.

Args:
prompt: The input prompt/query for the agent
**kwargs: Additional keyword arguments passed to underlying methods

Returns:
The agent's response as a string, or None if failed
"""
# This method needs to be implemented by moving logic from agent.py
raise NotImplementedError("This method needs to be moved from agent.py")

async def arun(self, prompt: str, **kwargs):
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Please add a return type hint to the arun method for better type safety and code clarity. Based on its synchronous counterpart and the underlying achat call, it should return Optional[str].

Suggested change
async def arun(self, prompt: str, **kwargs):
async def arun(self, prompt: str, **kwargs) -> Optional[str]:

"""
Run the agent asynchronously with a prompt.

This is the async version of run() for non-blocking execution.

Args:
prompt: The input prompt/query for the agent
**kwargs: Additional keyword arguments

Returns:
The agent's response
"""
# This method needs to be implemented by moving logic from agent.py
raise NotImplementedError("This method needs to be moved from agent.py")

def start(self, prompt: Optional[str] = None, **kwargs: Any) -> Union[str, Generator[str, None, None], None]:
"""
Start the agent with optional prompt.

This is the main entry point for agent execution, supporting both
streaming and non-streaming modes.

Args:
prompt: Optional input prompt. If None, agent may run autonomously
**kwargs: Additional configuration options

Returns:
Agent response (string) or generator for streaming, or None
"""
# This method needs to be implemented by moving logic from agent.py
raise NotImplementedError("This method needs to be moved from agent.py")

async def astart(self, prompt: str, **kwargs):
"""
Start the agent asynchronously.

Async version of start() method.

Args:
prompt: Input prompt for the agent
**kwargs: Additional configuration options

Returns:
Agent response
"""
# This method needs to be implemented by moving logic from agent.py
raise NotImplementedError("This method needs to be moved from agent.py")

def run_autonomous(self, initial_prompt: Optional[str] = None, max_iterations: int = 10,
goal: Optional[str] = None, **kwargs) -> Any:
"""
Run the agent autonomously with self-direction.

Args:
initial_prompt: Starting prompt for autonomous execution
max_iterations: Maximum number of autonomous iterations
goal: Optional goal for the autonomous agent
**kwargs: Additional configuration

Returns:
Results from autonomous execution
"""
# This method needs to be implemented by moving logic from agent.py
raise NotImplementedError("This method needs to be moved from agent.py")
Comment on lines +95 to +110
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Signature mismatch: run_autonomous stub differs from agent.py implementation.

The stub signature:

def run_autonomous(self, initial_prompt: Optional[str] = None, max_iterations: int = 10, 
                   goal: Optional[str] = None, **kwargs) -> Any:

The actual implementation in agent.py:2548-2620:

def run_autonomous(self, prompt: str, max_iterations: Optional[int] = None, 
                   timeout_seconds: Optional[float] = None, 
                   completion_promise: Optional[str] = None, 
                   clear_context: bool = False):

Key differences:

  • initial_prompt vs prompt
  • goal parameter doesn't exist in agent.py
  • Missing timeout_seconds, completion_promise, clear_context

When moving implementations, align signatures to avoid breaking changes.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai-agents/praisonaiagents/agent/execution_mixin.py` around lines
95 - 110, The run_autonomous stub in execution_mixin.py must be updated to match
the implementation signature in agent.py: rename initial_prompt to prompt (type
str), change max_iterations to Optional[int]=None, add timeout_seconds:
Optional[float]=None, completion_promise: Optional[str]=None, and clear_context:
bool=False; remove the extraneous goal parameter (or map its value into prompt
before calling the implementation) so callers remain compatible, then move the
implementation from agent.py (lines ~2548-2620) into this method and preserve
the same return type and behavior.


async def run_autonomous_async(self, initial_prompt: Optional[str] = None, max_iterations: int = 10,
goal: Optional[str] = None, **kwargs) -> Any:
"""
Run the agent autonomously asynchronously.

Async version of run_autonomous().

Args:
initial_prompt: Starting prompt for autonomous execution
max_iterations: Maximum number of autonomous iterations
goal: Optional goal for the autonomous agent
**kwargs: Additional configuration

Returns:
Results from autonomous execution
"""
# This method needs to be implemented by moving logic from agent.py
raise NotImplementedError("This method needs to be moved from agent.py")

Comment on lines +95 to +130
Copy link

Copilot AI Mar 31, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The run_autonomous/run_autonomous_async stubs here don't match the current Agent.run_autonomous/Agent.run_autonomous_async API in agent.py (which uses prompt/max_iterations/timeout_seconds/completion_promise/clear_context). Since these stubs are meant to receive the extracted implementation, aligning the signatures/docstrings now will reduce migration risk and prevent accidental API drift.

Copilot uses AI. Check for mistakes.
def run_until(self, condition: Callable[[], bool], prompt: str, max_iterations: int = 50,
**kwargs) -> Any:
"""
Run the agent until a specific condition is met.

Args:
condition: Function that returns True when execution should stop
prompt: Input prompt for execution
max_iterations: Maximum iterations before stopping
**kwargs: Additional configuration

Returns:
Results when condition is met or max iterations reached
"""
# This method needs to be implemented by moving logic from agent.py
raise NotImplementedError("This method needs to be moved from agent.py")
Comment on lines +131 to +146
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Signature mismatch: run_until stub differs significantly from agent.py implementation.

The stub signature:

def run_until(self, condition: Callable[[], bool], prompt: str, max_iterations: int = 50,
              **kwargs) -> Any:

The actual implementation in agent.py:3351-3410:

def run_until(self, prompt: str, criteria: str, threshold: float = 8.0, 
              max_iterations: int = 5, mode: str = "optimize", 
              on_iteration: Optional[Callable[[Any], None]] = None, 
              verbose: bool = False) -> "EvaluationLoopResult":

The stub describes condition-based termination with a callable, while agent.py implements criteria/threshold-based quality evaluation. These are fundamentally different APIs.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai-agents/praisonaiagents/agent/execution_mixin.py` around lines
131 - 146, The stubbed run_until in execution_mixin.py has the wrong signature
and behavior compared to the real implementation in agent.py; replace the stub
with the implementation from agent.py (the function defined at
agent.py:3351-3410) so run_until(self, prompt: str, criteria: str, threshold:
float = 8.0, max_iterations: int = 5, mode: str = "optimize", on_iteration:
Optional[Callable[[Any], None]] = None, verbose: bool = False) ->
"EvaluationLoopResult" is used, ensure any required imports/types (Optional,
Callable, Any, EvaluationLoopResult) are available in execution_mixin.py, remove
the NotImplementedError stub, and update any callers to match the
criteria/threshold-based API rather than the Callable condition variant.


async def run_until_async(self, condition: Callable[[], bool], prompt: str,
max_iterations: int = 50, **kwargs) -> Any:
"""
Async version of run_until().

Args:
condition: Function that returns True when execution should stop
prompt: Input prompt for execution
max_iterations: Maximum iterations before stopping
**kwargs: Additional configuration

Returns:
Results when condition is met or max iterations reached
Comment on lines +131 to +160
Copy link

Copilot AI Mar 31, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The run_until/run_until_async stubs here use a different contract (condition callback + max_iterations) than Agent.run_until/Agent.run_until_async in agent.py (evaluation loop with criteria/threshold/mode/etc). Since this file is intended to host extracted implementations, please update these stubs to mirror the existing Agent method signatures and semantics to avoid breaking changes when the extraction happens.

Suggested change
def run_until(self, condition: Callable[[], bool], prompt: str, max_iterations: int = 50,
**kwargs) -> Any:
"""
Run the agent until a specific condition is met.
Args:
condition: Function that returns True when execution should stop
prompt: Input prompt for execution
max_iterations: Maximum iterations before stopping
**kwargs: Additional configuration
Returns:
Results when condition is met or max iterations reached
"""
# This method needs to be implemented by moving logic from agent.py
raise NotImplementedError("This method needs to be moved from agent.py")
async def run_until_async(self, condition: Callable[[], bool], prompt: str,
max_iterations: int = 50, **kwargs) -> Any:
"""
Async version of run_until().
Args:
condition: Function that returns True when execution should stop
prompt: Input prompt for execution
max_iterations: Maximum iterations before stopping
**kwargs: Additional configuration
Returns:
Results when condition is met or max iterations reached
def run_until(
self,
prompt: str,
criteria: Optional[Union[str, List[str]]] = None,
threshold: Optional[float] = None,
mode: Literal["any", "all"] = "any",
max_iterations: int = 50,
**kwargs,
) -> Any:
"""
Run the agent in a loop until evaluation criteria are satisfied.
This mirrors Agent.run_until in agent.py, using an evaluation loop
controlled by criteria/threshold/mode instead of a condition callback.
Args:
prompt: Input prompt for execution.
criteria: One or more evaluation criteria used to decide when to stop.
threshold: Optional score/metric threshold to consider the criteria satisfied.
mode: How to aggregate multiple criteria, e.g. "any" or "all".
max_iterations: Maximum number of iterations before stopping.
**kwargs: Additional configuration options forwarded to the Agent.
Returns:
Results when the evaluation criteria are met or max iterations are reached.
"""
# This method needs to be implemented by moving logic from agent.py
raise NotImplementedError("This method needs to be moved from agent.py")
async def run_until_async(
self,
prompt: str,
criteria: Optional[Union[str, List[str]]] = None,
threshold: Optional[float] = None,
mode: Literal["any", "all"] = "any",
max_iterations: int = 50,
**kwargs,
) -> Any:
"""
Async version of run_until().
This mirrors Agent.run_until_async in agent.py, using an evaluation loop
controlled by criteria/threshold/mode instead of a condition callback.
Args:
prompt: Input prompt for execution.
criteria: One or more evaluation criteria used to decide when to stop.
threshold: Optional score/metric threshold to consider the criteria satisfied.
mode: How to aggregate multiple criteria, e.g. "any" or "all".
max_iterations: Maximum number of iterations before stopping.
**kwargs: Additional configuration options forwarded to the Agent.
Returns:
Results when the evaluation criteria are met or max iterations are reached.

Copilot uses AI. Check for mistakes.
"""
# This method needs to be implemented by moving logic from agent.py
raise NotImplementedError("This method needs to be moved from agent.py")

def _run_verification_hooks(self) -> List[Dict[str, Any]]:
"""
Run verification hooks during execution.

Returns:
List of hook results
"""
# This method needs to be implemented by moving logic from agent.py
raise NotImplementedError("This method needs to be moved from agent.py")

def _start_run(self, input_content: str) -> None:
"""
Initialize a new execution run.

Args:
input_content: The input that started this run
"""
# This method needs to be implemented by moving logic from agent.py
pass

def _end_run(self, output_content: str, status: str = "completed",
metrics: Optional[Dict[str, Any]] = None) -> None:
"""
Finalize the current execution run.

Args:
output_content: The output from this run
status: Completion status (completed, failed, etc.)
metrics: Optional execution metrics
"""
# This method needs to be implemented by moving logic from agent.py
pass
Loading
Loading