Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
83 changes: 83 additions & 0 deletions src/uipath/_cli/cli_debug.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
import asyncio
import json
import uuid
from pathlib import Path

import click
from uipath.core.tracing import UiPathTraceManager
Expand All @@ -14,6 +17,17 @@

from uipath._cli._chat._bridge import get_chat_bridge
from uipath._cli._debug._bridge import get_debug_bridge
from uipath._cli._evals._models._evaluation_set import (
EvaluationItem,
EvaluationSimulationTool,
LLMMockingStrategy,
MockingStrategyType,
)
from uipath._cli._evals._span_collection import ExecutionSpanCollector
from uipath._cli._evals.mocks.mocks import (
clear_execution_context,
set_execution_context,
)
from uipath._cli._utils._debug import setup_debugging
from uipath._cli._utils._studio_project import StudioClient
from uipath._utils._bindings import ResourceOverwritesContext
Expand All @@ -26,6 +40,60 @@
console = ConsoleLogger()


def load_simulation_config() -> EvaluationItem | None:
"""Load simulation.json from current directory and convert to EvaluationItem.

Returns:
EvaluationItem with LLM mocking strategy if simulation.json exists and is valid,
None otherwise.
"""
simulation_path = Path.cwd() / "simulation.json"

if not simulation_path.exists():
return None

try:
with open(simulation_path, "r", encoding="utf-8") as f:
simulation_data = json.load(f)

# Check if simulation is enabled
if not simulation_data.get("enabled", True):
return None

# Extract tools to simulate
tools_to_simulate = [
EvaluationSimulationTool(name=tool["name"])
for tool in simulation_data.get("toolsToSimulate", [])
]

if not tools_to_simulate:
return None

# Create LLM mocking strategy
mocking_strategy = LLMMockingStrategy(
type=MockingStrategyType.LLM,
prompt=simulation_data.get("instructions", ""),
tools_to_simulate=tools_to_simulate,
)

# Create a minimal EvaluationItem for debugging
# We use empty inputs since the actual input comes from the debug command
eval_item = EvaluationItem(
id=str(uuid.uuid4()),
name="debug-simulation",
inputs={},
evaluation_criterias={},
mocking_strategy=mocking_strategy,
)

console.info(f"Loaded simulation config for {len(tools_to_simulate)} tool(s)")
return eval_item

except Exception as e:
console.warning(f"Failed to load simulation.json: {e}")
return None


@click.command()
@click.argument("entrypoint", required=False)
@click.argument("input", required=False, default=None)
Expand Down Expand Up @@ -114,6 +182,17 @@ async def execute_debug_runtime():
debug_runtime: UiPathRuntimeProtocol | None = None
factory: UiPathRuntimeFactoryProtocol | None = None

# Load simulation config and set up execution context for tool mocking
eval_item = load_simulation_config()
span_collector: ExecutionSpanCollector | None = None
execution_id = str(uuid.uuid4())

if eval_item:
# Create span collector for trace access during mocking
span_collector = ExecutionSpanCollector()
# Set execution context to enable tool simulation
set_execution_context(eval_item, span_collector, execution_id)

try:
trigger_poll_interval: float = 5.0

Expand Down Expand Up @@ -163,6 +242,10 @@ async def execute_debug_runtime():
)

finally:
# Clear execution context after debugging completes
if eval_item:
clear_execution_context()

if debug_runtime:
await debug_runtime.dispose()
if chat_runtime:
Expand Down
Loading
Loading