diff --git a/.gitignore b/.gitignore index 2c4a19f6f8..a7c492d655 100644 --- a/.gitignore +++ b/.gitignore @@ -34,4 +34,7 @@ package-lock.json # Env .env -/assembly/.env \ No newline at end of file +/assembly/.env + +# Tests +test-results/* \ No newline at end of file diff --git a/cookbook/basic-python.mdx b/cookbook/basic-python.mdx new file mode 100644 index 0000000000..d61ba188ba --- /dev/null +++ b/cookbook/basic-python.mdx @@ -0,0 +1,188 @@ +--- +title: Hello World +description: Simple example demonstrating how to call an LLM from Temporal using the OpenAI Python API library. +tags: [foundations, openai, python] +source: https://github.com/temporalio/ai-cookbook/tree/main/foundations/hello_world_openai_responses_python +priority: 999 +--- + +This is a simple example showing how to call an LLM from Temporal using the [OpenAI Python API library](https://github.com/openai/openai-python). + +Being an external API call, the LLM invocation happens in a Temporal Activity. + +This recipe highlights two key design decisions: + +- A generic activity for invoking an LLM API. This activity can be re-used with different arguments throughout your codebase. +- Configuring the Temporal client with a `dataconverter` to allow serialization of Pydantic types. +- Retries are handled by Temporal and not by the underlying libraries such as the OpenAI client. This is important because if you leave the client retires on they can interfere with correct and durable error handling and recovery. + + +## Create the Activity + +We create wrapper for the `create` method of the `AsyncOpenAI` client object. +This is a generic activity that invokes the OpenAI LLM. + +We set `max_retries=0` on when creating the `AsyncOpenAI` client. +This moves the responsibility for retries from the OpenAI client to Temporal. + +In this implementation, we include only the `instructions` and `input` argument, but it could be extended to others. + +*File: activities/openai_responses.py* +```python + +from temporalio import activity +from openai import AsyncOpenAI +from openai.types.responses import Response +from dataclasses import dataclass + +# Temporal best practice: Create a data structure to hold the request parameters. +@dataclass +class OpenAIResponsesRequest: + model: str + instructions: str + input: str + +@activity.defn +async def create(request: OpenAIResponsesRequest) -> Response: + # Temporal best practice: Disable retry logic in OpenAI API client library. + client = AsyncOpenAI(max_retries=0) + + resp = await client.responses.create( + model=request.model, + instructions=request.instructions, + input=request.input, + timeout=15, + ) + + return resp +``` + +## Create the Workflow + +In this example, we take the user input and generate a response in haiku format, using the OpenAI Responses activity. The +Workflow returns `result.output_text` from the OpenAI `Response`. + +As per usual, the activity retry configuration is set here in the Workflow. In this case, a retry policy is not specified +so the default retry policy is used (exponential backoff with 1s initial interval, 2.0 backoff coefficient, max interval +100× initial, unlimited attempts, no non-retryable errors). + +*File: workflows/hello_world_workflow.py* +```python +from temporalio import workflow +from datetime import timedelta + +from activities import openai_responses + + +@workflow.defn +class HelloWorld: + @workflow.run + async def run(self, input: str) -> str: + system_instructions = "You only respond in haikus." + result = await workflow.execute_activity( + openai_responses.create, + openai_responses.OpenAIResponsesRequest( + model="gpt-4o-mini", + instructions=system_instructions, + input=input, + ), + start_to_close_timeout=timedelta(seconds=30), + ) + return result.output_text +``` + +## Create the Worker + +Create the process for executing Activities and Workflows. +We configure the Temporal client with `pydantic_data_converter` so Temporal can serialize/deserialize output of the OpenAI SDK. + +*File: worker.py* +```python +import asyncio + +from temporalio.client import Client +from temporalio.worker import Worker + +from workflows.hello_world_workflow import HelloWorld +from activities import openai_responses +from temporalio.contrib.pydantic import pydantic_data_converter + + +async def main(): + client = await Client.connect( + "localhost:7233", + data_converter=pydantic_data_converter, + ) + + worker = Worker( + client, + task_queue="hello-world-python-task-queue", + workflows=[ + HelloWorld, + ], + activities=[ + openai_responses.create, + ], + ) + await worker.run() + + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Create the Workflow Starter + +The starter script submits the workflow to Temporal for execution, then waits for the result and prints it out. +It uses the `pydantic_data_converter` to match the Worker configuration. + +*File: start_workflow.py* +```python +import asyncio + +from temporalio.client import Client + +from workflows.hello_world_workflow import HelloWorld +from temporalio.contrib.pydantic import pydantic_data_converter + + +async def main(): + client = await Client.connect( + "localhost:7233", + data_converter=pydantic_data_converter, + ) + + # Submit the Hello World workflow for execution + result = await client.execute_workflow( + HelloWorld.run, + "Tell me about recursion in programming.", + id="my-workflow-id", + task_queue="hello-world-python-task-queue", + ) + print(f"Result: {result}") + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +## Running + +Start the Temporal Dev Server: + +```bash +temporal server start-dev +``` + +Run the worker: + +```bash +uv run python -m worker +``` + +Start execution: + +```bash +uv run python -m start_workflow +``` \ No newline at end of file diff --git a/cookbook/deep-research-python.mdx b/cookbook/deep-research-python.mdx new file mode 100644 index 0000000000..ea943e92cc --- /dev/null +++ b/cookbook/deep-research-python.mdx @@ -0,0 +1,525 @@ +--- +title: Deep Research +description: Build a simple deep research system embodying the standard deep research architecture. +tags: [agents, toolcalling, python] +priority: 399 +source: https://github.com/temporalio/ai-cookbook/tree/main/deep_research/basic_openai_python +--- + +Deep research systems combine multiple agents with information retrieval from +the web or other sources to produce evidence-based reports on specific topics. +Commercial implementations include +[Anthropic Research](https://www.anthropic.com/engineering/multi-agent-research-system), +[OpenAI Deep Research](https://openai.com/index/introducing-deep-research/), and +[Google Gemini Deep Research](https://gemini.google/overview/deep-research/). + +This recipe demonstrates a simple deep research system embodying the standard +deep research architecture. Deep research spans the following four phases: + +- **Planning**. Task decomposition and research strategy formulation. This + involves identifying separate aspects of the research problem that can be + worked on independently. +- **Question Development/Query Generation**. Designing queries for each of the + research questions. +- **Web Exploration/Information Retrieval**. Searching the web to retrieve + documents relevant to the research question. Extracting and summarizing + relevant information. +- **Report Generation/Synthesis**. Synthesizing findings into comprehensive, + well-cited reports. + +Deep research tasks can involve dozens of searches and process hundreds of +documents. This creates many possible failure modes that durable execution helps +protect against. + +This recipe uses OpenAI's Responses API, which includes a tool for web search. +It also uses OpenAI's +[Structured Outputs API](https://platform.openai.com/docs/guides/structured-outputs), +which asks the model to generate outputs corresponding to desired data +structures. + +## Create the data structures + +We will use Python classes to ensure information passes between agents in a +structured way. + +The Planning Agent creates a `ResearchPlan`, which includes a research question, +a list of `ResearchAspects`, expected sources, a search strategy, and success +criteria. ResearchAspects include an aspect name, a priority, and a description. + +```python +class ResearchPlan(BaseModel): + research_question: str + key_aspects: List[ResearchAspect] + expected_sources: List[str] + search_strategy: str + success_criteria: List[str] +``` + +```python +class ResearchAspect(BaseModel): + aspect: str + priority: int + description: str +``` + +The Query Generation Agent creates a `QueryPlan`, and generates a list of +`SearchQueries`. + +```python +class QueryPlan(BaseModel): + queries: List[SearchQuery] +``` + +```python +class SearchQuery(BaseModel): + query: str + rationale: str + expected_info_type: str + priority: int +``` + +The Web Search Agent creates a `SearchResult`, which includes a query, a list of +sources, a key finding, a relevance score, and a list of citations. + +```python +class SearchResult(BaseModel): + query: str + sources: List[str] + key_findings: str + relevance_score: float + citations: List[str] +``` + +Finally, the Report Synthesis Agent creates a `ResearchReport`, which includes +an executive summary, a detailed analysis, a list of key findings, a confidence +assessment, a list of citations, and a list of follow-up questions. + +```python +class ResearchReport(BaseModel): + executive_summary: str + detailed_analysis: str + key_findings: List[str] + confidence_assessment: str + citations: List[str] + follow_up_questions: List[str] +``` + +## Create the Agents + +The deep research system uses four specialized agents, each implemented as +Temporal activities. In this implementation, each agent is implemented as a +single call to the OpenAI Responses API. + +This is possible because we are using structured outputs, which guarantee the +response will be in the correct format, eliminating the need for retries. + +The web search agent also requires only a single API call because OpenAI +integrates the web search tool into the Responses API. + +These agents run in the Workflow and use the `invoke_model` activity to make +OpenAI API calls. It is critical to set the `start_to_close_timeout` for these +activities to a value that is long enough to complete the task. If it is too +short, the activity will fail with a timeout error, causing a retry loop that +never completes. Response times for reasoning models such as `GPT-5` can vary +significantly depending on the nature of the request. Web search times also vary +depending on the size and content of the documents located by the search. + +### Research Planning Agent + +Analyzes research queries and creates comprehensive research strategies. Takes +an unstructured question and decomposes it into specific research aspects with +priorities, identifies expected source types, and defines success criteria. + +*File: agents/research_planning.py* + +```python +from .models import ResearchPlan +from .config import COMPLEX_REASONING_MODEL +from activities.invoke_model import invoke_model, InvokeModelRequest +from temporalio import workflow +from datetime import timedelta + +RESEARCH_PLANNING_INSTRUCTIONS = """ +You are a research planning specialist who creates focused research strategies. + +CORE RESPONSIBILITIES: +1. Decompose the user's question into 3-7 key research aspects +2. Identify required sources and evidence types +3. Design a practical search strategy +4. Set clear success criteria + +OUTPUT REQUIREMENTS: +- research_question: Clarified version of the original query +- key_aspects: Specific areas requiring investigation, each with: + - aspect: The research area name + - priority: 1-5 ranking (5 highest priority) + - description: What needs to be investigated +- expected_sources: Types of sources likely to contain relevant information +- search_strategy: High-level approach for information gathering +- success_criteria: Specific indicators of research completeness +""" + + +async def plan_research(query: str) -> ResearchPlan: + result = await workflow.execute_activity( + invoke_model, + InvokeModelRequest( + model=COMPLEX_REASONING_MODEL, + instructions=RESEARCH_PLANNING_INSTRUCTIONS, + input=f"Research query: {query}", + response_format=ResearchPlan, + ), + start_to_close_timeout=timedelta(seconds=300), + summary="Planning research", + ) + return result.response +``` + +### Query Generation Agent + +Converts research plans into optimized web search queries. Creates 3-5 diverse +queries that target different information types (factual data, expert analysis, +case studies, recent news) with varied search styles and temporal modifiers. + +*File: agents/research_query_generation.py* + +```python +from .models import QueryPlan, ResearchPlan +from .config import EFFICIENT_PROCESSING_MODEL +from activities.invoke_model import invoke_model, InvokeModelRequest +from temporalio import workflow +from datetime import timedelta + +QUERY_GENERATION_INSTRUCTIONS = """ +You are a search query specialist who crafts effective web searches. + +CORE RESPONSIBILITIES: +1. Generate 3-5 diverse search queries based on the research plan +2. Balance specificity with discoverability +3. Target different information types (factual, analytical, recent, historical) + +APPROACH: +- Vary query styles: direct questions, topic + keywords, source-specific searches +- Include temporal modifiers when relevant (recent, 2024, historical) +- Use domain-specific terminology appropriately + +OUTPUT REQUIREMENTS: +- queries: Search queries, each with: + - query: The actual search string + - rationale: Why this query addresses research needs + - expected_info_type: One of "factual_data", "expert_analysis", "case_studies", "recent_news" + - priority: 1-5 (5 highest priority) +""" + + +async def generate_queries(research_plan: ResearchPlan) -> QueryPlan: + # Prepare input with research plan context + plan_context = f""" +Research Question: {research_plan.research_question} + +Key Aspects to Research: +{chr(10).join([f"- {aspect.aspect} (Priority: {aspect.priority}): {aspect.description}" for aspect in research_plan.key_aspects])} + +Expected Sources: {", ".join(research_plan.expected_sources)} +Search Strategy: {research_plan.search_strategy} +Success Criteria: {", ".join(research_plan.success_criteria)} +""" + + result = await workflow.execute_activity( + invoke_model, + InvokeModelRequest( + model=EFFICIENT_PROCESSING_MODEL, + instructions=QUERY_GENERATION_INSTRUCTIONS, + input=plan_context, + response_format=QueryPlan, + ), + start_to_close_timeout=timedelta(seconds=300), + summary="Generating search queries", + ) + + return result.response +``` + +### Web Search Agent + +Executes searches using OpenAI's web search tool and analyzes results. +Prioritizes authoritative sources, extracts key findings, assesses relevance, +and provides proper citations with reliability assessments. + +*File: agents/research_web_search.py* + +```python +from .models import SearchResult, SearchQuery +from .config import EFFICIENT_PROCESSING_MODEL +from activities.invoke_model import invoke_model, InvokeModelRequest +from temporalio import workflow +from datetime import timedelta + +WEB_SEARCH_INSTRUCTIONS = """ +You are a web research specialist who finds and evaluates information from web sources. + +CORE RESPONSIBILITIES: +1. Execute web searches using the web search tool +2. Prioritize authoritative sources: academic, government, established research organizations, prominent news outlets, primary sources +3. Extract key information relevant to the research question +4. Provide proper citations and assess reliability + +APPROACH: +- Focus on information directly relevant to the research question +- Extract specific facts, data points, and evidence +- Note conflicting information and limitations +- Flag questionable or unverified claims + +OUTPUT REQUIREMENTS: +- query: The search query that was executed +- sources: URLs and source descriptions consulted +- key_findings: Synthesized information relevant to research question (2-4 paragraphs) +- relevance_score: 0.0-1.0 assessment of how well results address the query +- citations: Formatted sources with URLs +""" + + +async def search_web(query: SearchQuery) -> SearchResult: + search_input = f""" +Search Query: {query.query} +Query Rationale: {query.rationale} +Expected Information Type: {query.expected_info_type} +Priority Level: {query.priority} + +Please search for information using the provided query and analyze the results according to the instructions. +""" + result = await workflow.execute_activity( + invoke_model, + InvokeModelRequest( + model=EFFICIENT_PROCESSING_MODEL, + instructions=WEB_SEARCH_INSTRUCTIONS, + input=search_input, + response_format=SearchResult, + tools=[{"type": "web_search"}], + ), + start_to_close_timeout=timedelta(seconds=300), + summary="Searching web for information", + ) + return result.response +``` + +### Report Synthesis Agent + +Directs the agent to synthesize all research findings into comprehensive, +well-cited reports. These should include structured narratives with executive +summaries, detailed analysis, key findings, confidence assessments, and +follow-up research questions. + +*File: agents/research_report_synthesis.py* + +```python +from typing import List +from temporalio import workflow +from datetime import timedelta +from .models import ResearchReport, ResearchPlan, SearchResult +from .config import COMPLEX_REASONING_MODEL +from activities.invoke_model import invoke_model, InvokeModelRequest + +REPORT_SYNTHESIS_INSTRUCTIONS = """ +You are a research synthesis expert who creates comprehensive research reports. + +CORE RESPONSIBILITIES: +1. Synthesize all research into a coherent narrative +2. Structure information logically with evidence support +3. Provide comprehensive citations +4. Assess confidence levels and acknowledge limitations +5. Generate follow-up questions for deeper research + +REPORT STRUCTURE: +1. **Executive Summary**: Core findings and conclusions (1-2 paragraphs) +2. **Detailed Analysis**: Examination organized by themes with evidence +3. **Key Findings**: Bullet-point list of important discoveries +4. **Confidence Assessment**: Rate findings as High/Medium/Low/Uncertain +5. **Citations**: Complete source list with URLs +6. **Follow-up Questions**: Up to 5 areas for additional research, as warranted + +APPROACH: +- Address contradictory findings transparently +- Weight authoritative sources more heavily +- Distinguish facts from expert opinions +- Be explicit about information limitations + +OUTPUT REQUIREMENTS: +- executive_summary: 1-2 paragraph summary of core findings +- detailed_analysis: Multi-paragraph analysis organized by themes +- key_findings: Bullet-point discoveries +- confidence_assessment: Assessment of finding reliability +- citations: All sources referenced +- follow_up_questions: 3-5 specific questions for further research +""" + + +async def generate_synthesis( + original_query: str, research_plan: ResearchPlan, search_results: List[SearchResult] +) -> ResearchReport: + # Prepare comprehensive input with all research context + synthesis_input = f""" +ORIGINAL RESEARCH QUERY: {original_query} + +RESEARCH PLAN: +Research Question: {research_plan.research_question} +Key Aspects Investigated: { + ", ".join([aspect.aspect for aspect in research_plan.key_aspects]) + } +Search Strategy Used: {research_plan.search_strategy} +Success Criteria: {", ".join(research_plan.success_criteria)} + +SEARCH RESULTS TO SYNTHESIZE: +{ + chr(10).join( + [ + f"Query: {result.query}{chr(10)}Findings: {result.key_findings}{chr(10)}Relevance: {result.relevance_score}{chr(10)}Sources: {', '.join(result.sources)}{chr(10)}Citations: {', '.join(result.citations)}{chr(10)}" + for result in search_results + ] + ) + } + +Please synthesize all this information into a comprehensive research report following the specified structure and quality standards. +""" + result = await workflow.execute_activity( + invoke_model, + InvokeModelRequest( + model=COMPLEX_REASONING_MODEL, + instructions=REPORT_SYNTHESIS_INSTRUCTIONS, + input=synthesis_input, + response_format=ResearchReport, + ), + start_to_close_timeout=timedelta(seconds=300), + summary="Generating research report synthesis", + ) + + return result.response +``` + +## Create the Workflow + +The `DeepResearchWorkflow` orchestrates the four-phase research process with +built-in resilience and error handling: + +First, planning and query generation agents are run sequentially. Then, the +workflow executes searches concurrently. For robustness, the workflow continues +with partial results if some searches fail. Finally, the report synthesis agent +pulls together the findings into a comprehensive report. + +*File: workflows/deep_research_workflow.py* + +```python +from temporalio import workflow +from temporalio.exceptions import ApplicationError +import asyncio +from typing import List + +from agents.research_planning import plan_research +from agents.research_query_generation import generate_queries +from agents.research_web_search import search_web +from agents.research_report_synthesis import generate_synthesis +from agents.models import SearchResult + + +@workflow.defn +class DeepResearchWorkflow: + @workflow.run + async def run(self, query: str) -> str: + # Step 1: Research Planning + research_plan = await plan_research(query) + + # Step 2: Query Generation + query_plan = await generate_queries(research_plan) + + # Step 3: Web Search (parallel execution with resilience) + search_results = await self._execute_searches(query_plan.queries) + + # Ensure we have at least one successful search result + if not search_results: + raise ApplicationError( + "All web searches failed - cannot generate report", + "NO_SEARCH_RESULTS", + non_retryable=True, + ) + + # Step 4: Report Synthesis + final_report = await generate_synthesis(query, research_plan, search_results) + + # Format the final output + formatted_report = self._format_final_report(query, final_report) + return formatted_report + + async def _execute_searches(self, search_queries) -> List[SearchResult]: + """Execute web searches in parallel with resilience to individual failures""" + + # Create individual search coroutines + async def execute_single_search(search_query): + try: + return await search_web(search_query) + except Exception as e: + workflow.logger.exception( + f"Search failed for query '{search_query.query}': {e}" + ) + return None + + # Execute all searches in parallel + search_tasks = [execute_single_search(query) for query in search_queries] + results = await asyncio.gather(*search_tasks) + + # Filter out None results + return [result for result in results if result is not None] + + def _format_final_report(self, original_query, report) -> str: + """Format the final report for display""" + return f""" +# Deep Research Report + +**Research Query:** {original_query} + +## Executive Summary +{report.executive_summary} + +## Detailed Analysis +{report.detailed_analysis} + +## Key Findings +{chr(10).join([f"• {finding}" for finding in report.key_findings])} + +## Confidence Assessment +{report.confidence_assessment} + +## Sources and Citations +{chr(10).join([f"• {citation}" for citation in report.citations])} + +## Recommended Follow-up Questions +{chr(10).join([f"• {question}" for question in report.follow_up_questions])} + +""" + +``` + +## Running + +Start the Temporal Dev Server: + +```bash +temporal server start-dev +``` + +Run the worker: + +```bash +uv run python -m worker +``` + +Start execution: + +```bash +uv run python -m start_workflow +``` + +To start execution with a specific query: + +```bash +uv run python -m start_workflow "What is the latest news on the stock market?" +``` diff --git a/cookbook/durable-agent-with-tools.mdx b/cookbook/durable-agent-with-tools.mdx new file mode 100644 index 0000000000..2b1e3e3e64 --- /dev/null +++ b/cookbook/durable-agent-with-tools.mdx @@ -0,0 +1,206 @@ +--- +title: Durable Agent using OpenAI Agents SDK +description: Build a durable AI agent with OpenAI Agents SDK and Temporal that can intelligently choose tools to answer user questions +tags: [agents, python, openai] +priority: 700 +source: https://github.com/temporalio/ai-cookbook/tree/main/agents/openai_agents_sdk_python +--- + +In this example, we show you how to build a Durable Agent using the [OpenAI Agents SDK Integration for Temporal](https://github.com/temporalio/sdk-python/tree/main/temporalio/contrib/openai_agents). The AI agent we build will have access to [tools](https://github.com/temporalio/sdk-python/tree/main/temporalio/contrib/openai_agents#tool-calling) (Temporal Activities) to answer user questions. The agent can determine which tools to use based on the user's input and execute them as needed. + +This recipe highlights key implementation patterns: + +- **Agent-based architecture**: Uses the OpenAI Agents SDK to create an intelligent agent that can reason about which tools to use and handles LLM invocation for you. +- **Tool integration**: Temporal Activities can be seamlessly used as tools by the agent. The integration offers the **activity_as_tool** helper function, which: + - Automatically generates OpenAI-compatible tool schemas from activity function signatures + - Wraps activities as agent tools that can be provided directly to the Agent + - Enables the agent to invoke Temporal Activities as tools leveraging Temporal's durable execution for tool calls +- **Durable execution**: The agent's state and execution are managed by Temporal, providing reliability and observability +- **Plugin configuration**: Uses the `OpenAIAgentsPlugin` to configure Temporal for OpenAI Agents SDK integration + +## Create the Activity + +We create activities that serve as tools for the agent. These activities can perform various tasks like getting weather information or performing calculations. + +*File: activities/tools.py* + +```python +from dataclasses import dataclass +from temporalio import activity +import math + +# Temporal best practice: Create a data structure to hold the request parameters. +@dataclass +class Weather: + city: str + temperature_range: str + conditions: str + +@activity.defn +async def get_weather(city: str) -> Weather: + """Get the weather for a given city.""" + return Weather(city=city, temperature_range="14-20C", conditions="Sunny with wind.") + +@activity.defn +async def calculate_circle_area(radius: float) -> float: + """Calculate the area of a circle given its radius.""" + return math.pi * radius ** 2 +``` + +## Create the Workflow + +The workflow creates an agent with specific instructions and tools. The agent can then process user input and decide which tools to use to answer questions. Since LLM invocation is an external API call, this typically would happen in a Temporal Activity. However, because of the Temporal Integration with OpenAI Agents SDK, this is being handled for us and we do not need to implement the Activity ourselves. + +*File: workflows/hello_world_workflow.py* + +```python +from temporalio import workflow +from datetime import timedelta + +from agents import Agent, Runner +from temporalio.contrib import openai_agents + +from activities.tools import get_weather, calculate_circle_area + +@workflow.defn +class HelloWorldAgent: + @workflow.run + async def run(self, prompt: str) -> str: + agent = Agent( + name="Hello World Agent", + instructions="You are a helpful assistant that determines what tool to use based on the user's question.", + # Tools for the agent to use that are defined as activities + tools=[ + openai_agents.workflow.activity_as_tool( + get_weather, + start_to_close_timeout=timedelta(seconds=10) + ), + openai_agents.workflow.activity_as_tool( + calculate_circle_area, + start_to_close_timeout=timedelta(seconds=10) + ) + ] + ) + + result = await Runner.run(agent, prompt) + return result.final_output +``` + +## Create the Worker + +Create the process for executing Activities and Workflows. +We configure the Temporal client with the `OpenAIAgentsPlugin` to enable OpenAI Agents SDK integration. + +*File: worker.py* + +```python +import asyncio +from datetime import timedelta + +from temporalio.client import Client +from temporalio.worker import Worker +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin, ModelActivityParameters + +from workflows.hello_world_workflow import HelloWorldAgent +from activities.tools import get_weather, calculate_circle_area + +async def worker_main(): + # Use the plugin to configure Temporal for use with OpenAI Agents SDK + client = await Client.connect( + "localhost:7233", + plugins=[ + OpenAIAgentsPlugin( + model_params=ModelActivityParameters( + start_to_close_timeout=timedelta(seconds=30) + ) + ), + ], + ) + + worker = Worker( + client, + task_queue="hello-world-openai-agent-task-queue", + workflows=[HelloWorldAgent], + activities=[get_weather, calculate_circle_area], + ) + await worker.run() + +if __name__ == "__main__": + asyncio.run(worker_main()) +``` + +## Create the Workflow Starter + +The starter script submits the agent workflow to Temporal for execution, then waits for the result and prints it out. +It uses the `OpenAIAgentsPlugin` to match the Worker configuration. + +*File: start_workflow.py* + +```python +import asyncio + +from temporalio.client import Client +from temporalio.common import WorkflowIDReusePolicy +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin +from workflows.hello_world_workflow import HelloWorldAgent + +async def main(): + client = await Client.connect( + "localhost:7233", + # Use the plugin to configure Temporal for use with OpenAI Agents SDK + plugins=[OpenAIAgentsPlugin()], + ) + + # Start workflow + print( 80 * "-" ) + + # Get user input + user_input = input("Enter a question: ") + + # Submit the Hello World Agent workflow for execution + result = await client.execute_workflow( + HelloWorldAgent.run, + user_input, + id="my-workflow-id", + task_queue="hello-world-openai-agent-task-queue", + id_reuse_policy=WorkflowIDReusePolicy.TERMINATE_IF_RUNNING, + ) + print(f"Result: {result}") + + # End of workflow + print( 80 * "-" ) + print("Workflow completed") + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Running + +Start the Temporal Dev Server: + +```bash +temporal server start-dev +``` + +Run the worker: + +```bash +uv run python -m worker +``` + +Start execution: + +```bash +uv run python -m start_workflow +``` + +## Example Interactions + +Try asking the agent questions like: + +- "What's the weather in London?" +- "Calculate the area of a circle with radius 5" +- "What's the weather in Tokyo and calculate the area of a circle with radius 3" + +The agent will determine which tools to use and provide intelligent responses based on the available tools. Use the [OpenAI Traces dashboard](https://platform.openai.com/traces) to visualize and monitor your workflows and tool calling. diff --git a/cookbook/hello-world-litellm-python.mdx b/cookbook/hello-world-litellm-python.mdx new file mode 100644 index 0000000000..42135e4a19 --- /dev/null +++ b/cookbook/hello-world-litellm-python.mdx @@ -0,0 +1,240 @@ +--- +title: Hello World with LiteLLM +description: Integrate LiteLLM into a Temporal Workflow in Python. +tags: [foundations, python, litellm] +priority: 980 +source: https://github.com/temporalio/ai-cookbook/tree/main/foundations/hello_world_litellm_python +--- + +[LiteLLM](https://github.com/BerriAI/litellm) is a library for calling LLMs from Python. It makes it easy to access, and switch between, many providers, including OpenAI, Anthropic, Google, and more. + +This recipe mirrors the [Basic Python recipe](./basic-python), but swaps the OpenAI SDK for LiteLLM. The workflow still delegates LLM calls to an Activity, letting Temporal coordinate retries and durability, while LiteLLM forwards those calls to your configured provider. + +Key points: + +- A reusable Activity that wraps `litellm.acompletion` and keeps retries in Temporal. +- The most common LiteLLM parameters are on `LiteLLMRequest` ensuring type checking and IDE completion. Others may be passed via the `extra_options` dictionary, which functions as `kwargs` for `litellm.acompletion`. +- The Activity returns the full LiteLLM response for processing by the workflow. + +## Create the Activity + +`activities/models.py` + +```python +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional, Type, Union + + +@dataclass +class LiteLLMRequest: + model: str + messages: List[Dict[str, Any]] + temperature: Optional[float] = None + max_tokens: Optional[int] = None + timeout: Optional[Union[float, int]] = None + response_format: Optional[Union[dict, Type[Any]]] = None + extra_options: Dict[str, Any] = field(default_factory=dict) + + def to_acompletion_kwargs(self) -> Dict[str, Any]: + kwargs = { + "model": self.model, + "messages": self.messages, + } + + optional_values = { + "temperature": self.temperature, + "max_tokens": self.max_tokens, + "timeout": self.timeout, + "response_format": self.response_format, + } + + for key, value in optional_values.items(): + if value is not None: + kwargs[key] = value + + if self.extra_options: + kwargs.update(self.extra_options) + + return kwargs +``` + +`activities/litellm_completion.py` + +```python +from typing import Any, Dict + +import litellm +from temporalio import activity +from temporalio.exceptions import ApplicationError + +from activities.models import LiteLLMRequest + + +@activity.defn(name="activities.litellm_completion.create") +async def create(request: LiteLLMRequest) -> Dict[str, Any]: + kwargs = request.to_acompletion_kwargs() + kwargs["num_retries"] = 0 + + try: + response = await litellm.acompletion(**kwargs) + except ( + litellm.AuthenticationError, + litellm.BadRequestError, + litellm.InvalidRequestError, + litellm.UnsupportedParamsError, + litellm.JSONSchemaValidationError, + litellm.ContentPolicyViolationError, + litellm.NotFoundError, + ) as exc: + raise ApplicationError( + str(exc), + type=exc.__class__.__name__, + non_retryable=True, + ) from exc + except litellm.APIError: + raise + + return response +``` + +LiteLLM supports many providers. Configure credentials via environment variables (for example `OPENAI_API_KEY`) before running the Activity. For Google-hosted models (Vertex AI or Gemini), the sample relies on the `google-cloud-aiplatform` and `google-auth` dependencies included in `pyproject.toml`; set the usual Google application credentials (`GOOGLE_APPLICATION_CREDENTIALS`, `GOOGLE_CLOUD_PROJECT`, `VERTEXAI_LOCATION`, etc.) so LiteLLM can obtain an access token. + +## Create the Workflow + +`workflows/hello_world_workflow.py` + +```python +from datetime import timedelta + +from temporalio import workflow + +from activities.models import LiteLLMRequest + + +@workflow.defn +class HelloWorld: + @workflow.run + async def run(self, input: str) -> str: + messages = [ + {"role": "system", "content": "You only respond in haikus."}, + {"role": "user", "content": input}, + ] + response = await workflow.execute_activity( + "activities.litellm_completion.create", + LiteLLMRequest( + # LiteLLM lets you keep the same code and swap models/providers. + # model="gpt-4o-mini", + model="gemini-2.5-flash-lite", + messages=messages, + ), + start_to_close_timeout=timedelta(seconds=30), + ) + message = response["choices"][0]["message"]["content"] + if isinstance(message, list): + message = "".join( + part.get("text", "") + for part in message + if isinstance(part, dict) + ) + return message +``` + +Temporal manages Activity retries, so LiteLLM's retry helper is disabled via `num_retries=0`. Use the `extra_options` escape hatch on `LiteLLMRequest` if you need to surface additional LiteLLM parameters without editing the sample. + +## Create the Worker + +`worker.py` + +```python +import asyncio + +from temporalio.client import Client +from temporalio.worker import Worker + +from activities import litellm_completion +from workflows.hello_world_workflow import HelloWorld +from temporalio.contrib.pydantic import pydantic_data_converter + + +async def main(): + client = await Client.connect( + "localhost:7233", + data_converter=pydantic_data_converter, + ) + + worker = Worker( + client, + task_queue="hello-world-python-task-queue", + workflows=[ + HelloWorld, + ], + activities=[ + litellm_completion.create, + ], + ) + await worker.run() + + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Create the Workflow Starter + +`start_workflow.py` + +```python +import asyncio + +from temporalio.client import Client +from temporalio.contrib.pydantic import pydantic_data_converter + +from workflows.hello_world_workflow import HelloWorld + + +async def main(): + client = await Client.connect( + "localhost:7233", + data_converter=pydantic_data_converter, + ) + + result = await client.execute_workflow( + HelloWorld.run, + "Tell me about recursion in programming.", + id="my-workflow-id", + task_queue="hello-world-python-task-queue", + ) + print(f"Result: {result}") + + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Running + +Start the Temporal Dev Server: + +```bash +temporal server start-dev +``` + +Install dependencies + +```bash +uv sync +``` + +Set the appropriate environment variables before launching the worker (for example `export OPENAI_API_KEY=...` or export `GEMINI_API_KEY=...`) so LiteLLM can reach your chosen provider. + +Run the worker: + +```bash +uv run python -m worker +``` + +Start the workflow: + +```bash +uv run python -m start_workflow +``` \ No newline at end of file diff --git a/cookbook/http-retry-enhancement-python.mdx b/cookbook/http-retry-enhancement-python.mdx new file mode 100644 index 0000000000..072b8559a8 --- /dev/null +++ b/cookbook/http-retry-enhancement-python.mdx @@ -0,0 +1,235 @@ +--- +title: Retry Policy from HTTP Responses +description: Extract retry information from HTTP response headers and make it available to Temporal's retry mechanisms. +tags: [foundations, openai, python] +priority: 920 +source: https://github.com/temporalio/ai-cookbook/tree/main/foundations/http_retry_enhancement_python +--- + +This recipe extends the [Basic Example](./basic-python) to show how to extract retry information from HTTP response headers and make it available to Temporal's retry mechanisms. + +HTTP response codes and headers on API calls have implications for retry behavior. +For example, an HTTP `404 Not Found` generally represents an application-level error that should not be retried. +By contrast, a `500 Internal Server Error` is typically transient, so should be retried. +Servers can also set the `Retry-After` header to tell the client when to retry. + +This recipe introduces a utility function that processes the HTTP response and populates a Temporal `ApplicationError` to provide inputs to the retry mechanism. +Temporal combines this information with other configuration, such as timeouts and exponential backoff, to implement the complete retry policy. + +## Generate Temporal ApplicationErrors from HTTP responses + +We introduce a utility function that takes an `httpx.Response` object and returns a Temporal `ApplicationError` with two key fields populated: `non-retryable` and `next_retry_delay`. + +The `non-retryable` is determined by categorizing the HTTP status codes. +The `X-Should-Retry` HTTP response header, when present, overrides the status code. + +**Example Retryable Status Codes:** + +- **408 Request Timeout** → Retry because server is unresponsive, which can have many causes +- **409 Conflict** → Retry when resource is temporarily locked or in use +- **429 Too Many Requests** → Retry after rate limit cooldown (respect `Retry-After` header when available) +- **500 Internal Server Error** → Retry for temporary server issues +- **502 Bad Gateway** → Retry when upstream server is temporarily unavailable +- **503 Service Unavailable** → Retry when service is temporarily overloaded +- **504 Gateway Timeout** → Retry when upstream server times out + +**Example Non-Retryable Status Codes:** + +- **400 Bad Request** → Do not retry - fix request format/parameters +- **401 Unauthorized** → Do not retry - provide valid authentication +- **403 Forbidden** → Do not retry - insufficient permissions +- **404 Not Found** → Do not retry - resource does not exist +- **422 Unprocessable Entity** → Do not retry - fix request validation errors +- **Other 4xx Client Errors** → Do not retry - client-side issues need fixing +- **2xx Success** → Do not expect to see this - call succeeded +- **3xx Redirects** → Do not expect to see this - typically handled by httpx (with `follow_redirects=True`) + +If the error is retryable and if the `Retry-After` header is present, we parse it to set the retry delay. + +This implementation duplicates logic present in the [OpenAI Python API Library](https://github.com/openai/openai-python), where it is part of the code generated by [Stainless](https://www.stainless.com/). +Duplicating the logic makes sense because it is not accessible via the public library interface and because it applies to HTTP APIs in general, not just the OpenAI API. + +_File: util/http_retry.py_ + +```python + +import email.utils +import time +from datetime import timedelta +from temporalio.exceptions import ApplicationError +from temporalio import workflow +from typing import Optional, Tuple + +with workflow.unsafe.imports_passed_through(): + from httpx import Response, Headers + + +# Adapted from the OpenAI Python client (https://github.com/openai/openai-python/blob/main/src/openai/_base_client.py) +# which is generated by the Stainless SDK Generator. +def _parse_retry_after_header(response_headers: Optional[Headers] = None) -> float | None: + """Returns a float of the number of seconds (not milliseconds) to wait after retrying, or None if unspecified. + + About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After + See also https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After#syntax + """ + if response_headers is None: + return None + + # First, try the non-standard `retry-after-ms` header for milliseconds, + # which is more precise than integer-seconds `retry-after` + try: + retry_ms_header = response_headers.get("retry-after-ms", None) + return float(retry_ms_header) / 1000 + except (TypeError, ValueError): + pass + + # Next, try parsing `retry-after` header as seconds (allowing nonstandard floats). + retry_header = response_headers.get("retry-after") + try: + # note: the spec indicates that this should only ever be an integer + # but if someone sends a float there's no reason for us to not respect it + return float(retry_header) + except (TypeError, ValueError): + pass + + # Last, try parsing `retry-after` as a date. + retry_date_tuple = email.utils.parsedate_tz(retry_header) + if retry_date_tuple is None: + return None + + retry_date = email.utils.mktime_tz(retry_date_tuple) + return float(retry_date - time.time()) + +def _should_retry(response: Response) -> Tuple[bool, str]: + # Note: this is not a standard header + should_retry_header = response.headers.get("x-should-retry") + + # If the server explicitly says whether or not to retry, obey. + if should_retry_header == "true": + return True, f"Server requested retry via x-should-retry=true header (HTTP {response.status_code})" + if should_retry_header == "false": + return False, f"Server prevented retry via x-should-retry=false header (HTTP {response.status_code})" + + # Retry on request timeouts. + if response.status_code == 408: + return True, f"HTTP request timeout ({response.status_code}), will retry with backoff" + + # Retry on lock timeouts. + if response.status_code == 409: + return True, f"HTTP conflict/lock timeout ({response.status_code}), will retry with backoff" + + # Retry on rate limits. + if response.status_code == 429: + return True, f"HTTP rate limit exceeded ({response.status_code}), will retry with backoff" + + # Retry internal errors. + if response.status_code >= 500: + return True, f"HTTP server error ({response.status_code}), will retry with backoff" + + return False, f"HTTP client error ({response.status_code}), not retrying - check your request" + + +def http_response_to_application_error(response: Response) -> ApplicationError: + """Transform HTTP response into Temporal ApplicationError for retry handling. + + This function implements generic HTTP retry logic based on status codes and headers. + + Args: + response: The httpx.Response from a failed HTTP request + + Returns: + ApplicationError: Always returns an ApplicationError configured for Temporal's retry system: + - non_retryable: False for retryable errors, True for non-retryable + - next_retry_delay: Server-provided delay hint (if valid) + + Note: + Even when x-should-retry=true, this function returns an ApplicationError with + non_retryable=False rather than raising an exception, for cleaner functional style. + """ + should_retry, retry_message = _should_retry(response) + if should_retry: + # Calculate the retry delay only when retrying + retry_after = _parse_retry_after_header(response.headers) + # Make sure that the retry delay is in a reasonable range + if retry_after is not None and 0 < retry_after <= 60: + retry_after = timedelta(seconds=retry_after) + else: + retry_after = None + + # Add delay info for rate limits + if response.status_code == 429 and retry_after is not None: + retry_message = f"HTTP rate limit exceeded (429) (server requested {retry_after.total_seconds():.1f}s delay), will retry with backoff" + + return ApplicationError( + retry_message, + non_retryable=False, + next_retry_delay=retry_after, + ) + else: + return ApplicationError( + retry_message, + non_retryable=True, + next_retry_delay=None, + ) + +``` + +## Raise the exception from the Activity + +When API calls fail, the OpenAI Client raises an `APIStatusError` exception which contains a `response` field, containing the underlying `httpx.Response` object. +We use the `http_response_to_application_error` function defined above to translate this to a Temporal `ApplicationError`, which we re-throw to pass the retry information to Temporal. + +_File: activities/openai_responses.py_ + +```python + +from temporalio import activity +from openai import AsyncOpenAI +from openai.types.responses import Response +from dataclasses import dataclass +from util.http_retry import http_response_to_application_error +from openai import APIStatusError + +# Temporal best practice: Create a data structure to hold the request parameters. +@dataclass +class OpenAIResponsesRequest: + model: str + instructions: str + input: str + +@activity.defn +async def create(request: OpenAIResponsesRequest) -> Response: + # Temporal best practice: Disable retry logic in OpenAI API client library. + client = AsyncOpenAI(max_retries=0) + + try: + resp = await client.responses.create( + model=request.model, + instructions=request.instructions, + input=request.input, + timeout=15, + ) + return resp + except APIStatusError as e: + raise http_response_to_application_error(e.response) from e +``` + +## Running + +Start the Temporal Dev Server: + +```bash +temporal server start-dev +``` + +Run the worker: + +```bash +uv run python -m worker +``` + +Start execution: + +```bash +uv run python -m start_workflow +``` diff --git a/cookbook/structured-output-openai-responses-python.mdx b/cookbook/structured-output-openai-responses-python.mdx new file mode 100644 index 0000000000..e14473ac19 --- /dev/null +++ b/cookbook/structured-output-openai-responses-python.mdx @@ -0,0 +1,287 @@ +--- +title: Structured Outputs with Temporal and OpenAI +description: Use Temporal and OpenAI Responses API to reliably request output conforming to a specific data structure. +tags: [foundations, openai, python] +priority: 950 +source: https://github.com/temporalio/ai-cookbook/tree/main/foundations/structured_output_openai_responses_python +--- + +The OpenAI Responses API provides the [Structured Outputs API](https://platform.openai.com/docs/guides/structured-outputs) allowing you to request responses conforming to a specific data structure. + +In this example, we use structured outputs in a business data cleaning scenario. +Structured outputs are also commonly used for tool calling. + +OpenAI usually returns the correct type. However, this is not always the case due to the non-deterministic nature of LLMs. +When OpenAI returns an incorrect type, Temporal automatically retries the LLM call Activity. + +## Invoke Model Activity + +We create a model-calling Activity that uses the `responses.parse` method of the OpenAI client. + +Key challenges are related to serialization: + +1. In `InvokeModelRequest` the `response_format` field is a class reference. We provide custom Pydantic serialization and deserialization logic. +2. In `InvokeModelResponse` the `response_model` must be deserialized to the correct type. We serialize the type in one field and the model, represented as a dictionary, in another. + +```python +from temporalio import activity +from openai import AsyncOpenAI +from typing import Optional, List, cast, Any, TypeVar, Generic +from typing_extensions import Annotated +from pydantic import BaseModel +from pydantic.functional_validators import BeforeValidator +from pydantic.functional_serializers import PlainSerializer + +import importlib + +T = TypeVar("T", bound=BaseModel) + + +def _coerce_class(v: Any) -> type[Any]: + """Pydantic validator: convert string path to class during deserialization.""" + if isinstance(v, str): + mod_path, sep, qual = v.partition(":") + if not sep: # support "package.module.Class" + mod_path, _, qual = v.rpartition(".") + module = importlib.import_module(mod_path) + obj = module + for attr in qual.split("."): + obj = getattr(obj, attr) + return cast(type[Any], obj) + elif isinstance(v, type): + return v + else: + raise ValueError(f"Cannot coerce {v} to class") + + +def _dump_class(t: type[Any]) -> str: + """Pydantic serializer: convert class to string path during serialization.""" + return f"{t.__module__}:{t.__qualname__}" + + +# Custom type that automatically handles class <-> string conversion in Pydantic serialization +ClassReference = Annotated[ + type[T], + BeforeValidator(_coerce_class), + PlainSerializer(_dump_class, return_type=str), +] + + +class InvokeModelRequest(BaseModel, Generic[T]): + model: str + instructions: str + input: str + response_format: Optional[ClassReference[T]] = None + tools: Optional[List[dict]] = None + + +class InvokeModelResponse(BaseModel, Generic[T]): + # response_format records the type of the response model + response_format: Optional[ClassReference[T]] = None + response_model: Any + + @property + def response(self) -> T: + """Reconstruct the original response type if response_format was provided.""" + if self.response_format: + model_cls = self.response_format + return model_cls.model_validate(self.response_model) + return self.response_model + + +@activity.defn +async def invoke_model(request: InvokeModelRequest[T]) -> InvokeModelResponse[T]: + client = AsyncOpenAI(max_retries=0) + + kwargs = { + "model": request.model, + "instructions": request.instructions, + "input": request.input, + } + + if request.response_format: + kwargs["text_format"] = request.response_format + + if request.tools: + kwargs["tools"] = request.tools + + # Use responses API consistently + resp = await client.responses.parse(**kwargs) + + if request.response_format: + # Convert structured response to dict for managed serialization. + # This allows us to reconstruct the original response type while maintaining type safety. + parsed_model = cast(BaseModel, resp.output_parsed) + return InvokeModelResponse( + response_model=parsed_model.model_dump(), + response_format=request.response_format, + ) + else: + return InvokeModelResponse( + response_model=resp.output_text, response_format=None + ) +``` + +## Workflow + +We define the `Business` class as a Pydantic model. +We use the Pydantic's `EmailStr` type for the email field. +For the phone field, we use a custom validator to ensure the phone number is in E.164 format. + +The validators should check for obvious structural errors that LLMs will only get wrong sporadically. +If the LLM produces invalid responses consistently, Activity retries will fail consistently. +To mitigate the cost of such futile retries, we limit the number of retry attempts when using structured outputs. + +```python +from pydantic import BaseModel, Field, field_validator, EmailStr +from pydantic_core import PydanticCustomError +import re +from temporalio import workflow +from activities import invoke_model +from activities.invoke_model import InvokeModelRequest +from typing import List, Optional +from datetime import timedelta +from temporalio.common import RetryPolicy + +class Business(BaseModel): + name: Optional[str] = Field( + None, + description="The business name", + json_schema_extra={"example": "Acme Corporation"}, + ) + email: Optional[EmailStr] = Field( + None, + description="Primary business email address", + json_schema_extra={"example": "info@acmecorp.com"}, + ) + phone: Optional[str] = Field( + None, + description="Primary business phone number in E.164 format", + json_schema_extra={"example": "+12025550173"}, + ) + address: Optional[str] = Field( + None, + description="Business mailing address", + json_schema_extra={ + "example": "123 Business Park Dr, Suite 100, New York, NY 10001" + }, + ) + website: Optional[str] = Field( + None, + description="Business website URL", + json_schema_extra={"example": "https://www.acmecorp.com"}, + ) + industry: Optional[str] = Field( + None, + description="Business industry or sector", + json_schema_extra={"example": "Technology"}, + ) + + @field_validator("phone", mode="before") + def validate_phone(cls, v): + # Allow None values + if v is None: + return None + + if isinstance(v, str): + v = v.strip() + # Allow empty strings to be converted to None for optional fields + if not v: + return None + + # E.164 format: + followed by 1-9, then 9-15 more digits + e164_pattern = r"^\+[1-9]\d{9,15}$" + + if not re.match(e164_pattern, v): + raise PydanticCustomError( + "phone_format", + "Phone number must be in E.164 format (e.g., +12025550173)", + {"invalid_phone": v}, + ) + + return v + + @field_validator("name", mode="before") + def validate_name(cls, v): + # Allow None values + if v is None: + return None + + if isinstance(v, str): + v = v.strip() + # Convert empty strings to None (this is acceptable) + if not v: + return None + + return v + + +class BusinessList(BaseModel): + businesses: List[Business] + + +@workflow.defn +class CleanDataWorkflow: + @workflow.run + async def run(self, data: str) -> BusinessList: + results = await workflow.execute_activity( + invoke_model.invoke_model, + InvokeModelRequest( + model="gpt-4o", + instructions=f"""Extract and clean business data with these specific rules: + +1. BUSINESS NAME: Extract the main business name, normalize capitalization (Title Case for proper nouns) +2. EMAIL: + - Extract only ONE primary email address + - If multiple emails, choose the one marked as "primary" or the first valid one + - Validate format (must have @ and valid domain with .) + - Set to null if invalid (e.g., "bob@email", "NONE PROVIDED") +3. PHONE: + - Convert to E.164 format (+1 prefix for US numbers, add if not provided) + - Convert letters to numbers where appropriate (e.g., "1-800-FLOWERS" → "+18003569377") + - Set to null if cannot be converted to valid E.164 format + - Examples: "(555) 123-4567" → "+15551234567", "555 234 5678 ext 349i" → null (invalid), "5551234567" → "+15551234567" +4. ADDRESS: + - Provide complete, standardized address + - Set to null if vague/incomplete (e.g., "north end of main st", "unknown", "[PRIVATE]") +5. WEBSITE: + - Standardize to https:// format + - Remove "www." prefix, add https:// if missing + - Set to null if broken/invalid (e.g., "broken-link.com/404", "down for maintenance") +6. INDUSTRY: + - Use clear, professional industry categories + - Normalize similar terms (e.g., "fix cars and trucks" → "Automotive Repair") + +Return null for any field that cannot be reliably extracted or validated.""", + input=data, + response_format=BusinessList, + ), + start_to_close_timeout=timedelta(seconds=300), + retry_policy=RetryPolicy( + maximum_attempts=3, + ), + summary="Clean data", + ) + return results.response + +``` + +## Running + +Start the Temporal Dev Server: + +```bash +temporal server start-dev +``` + +Run the worker: + +```bash +uv run python -m worker +``` + +Start execution: + +```bash +uv run python -m start_workflow +``` diff --git a/cookbook/tool-calling-python.mdx b/cookbook/tool-calling-python.mdx new file mode 100644 index 0000000000..b440ac2353 --- /dev/null +++ b/cookbook/tool-calling-python.mdx @@ -0,0 +1,350 @@ +--- +title: Tool Calling Agent +description: Build a simple, non-looping agent that gives agency to the LLM to choose tools, and then invokes chosen tools. +tags: [agents, python] +priority: 775 +--- + +In this example, we demonstrate how function calling (also known as tool calling) works with the [Open AI API](https://github.com/openai/openai-python) and Temporal. Tool calling allows the model to make decisions on which, if any, functions should be invoked. It also provides information to the LLM that will allow it to structure the response in such a way that the agent can easily invoke the functions. + +Tools are supplied to the [`responses` API](https://platform.openai.com/docs/api-reference/responses/create) through the [`tools` parameter](https://platform.openai.com/docs/api-reference/responses/create#responses-create-tools). The `tools` parameter is in`json` and includes a description of the function as well as descriptions of each of the arguments. + +:::caution +The API used to generate the tools json is an internal function from the [Open AI API](https://github.com/openai/openai-python) and may therefore change in the future. There currently is no public API to generate the tool definition from a Pydantic model or a function signature. +::: + +Being external API calls, invoking the LLM and invoking the function are each done within a Temporal Activity. + +This example lays the foundation for the core agentic pattern where the LLM makes the decision on functions/tools to invoke, the agent calls the function/tool(s) and the response from such calls is sent back to the LLM for interpretation. + +This recipe highlights these key design decisions: + +- A generic Activity for invoking an LLM API; that is, instructions and other responses arguments are passed into the Activity making it appropriate for use in a variety of different use cases. Similarly, the result from the responses API call is returned out of the Activity so that it is usable in a variety of different use cases. +- We have intentionally not implemented the agentic loop so as to focus on how tool details are made available to the LLM and how functions are invoked. We do take the tool output and have the LLM interpret it in a manner consistent with the AI agent pattern. +- Retries are handled by Temporal and not by the underlying libraries such as the OpenAI client. This is important because if you leave the client retries on they can interfere with correct and durable error handling and recovery. + +## Create the Activity for LLM invocations + +We create a wrapper for the `create` method of the `AsyncOpenAI` client object. +This is a generic Activity that invokes the OpenAI LLM. + +We set `max_retries=0` when creating the `AsyncOpenAI` client. +This moves the responsibility for retries from the OpenAI client to Temporal. + +In this implementation, we allow for the model, instructions and input to be passed in, and also the list of tools. + +`activities/openai_responses.py` +```python +from temporalio import activity +from openai import AsyncOpenAI +from openai.types.responses import Response +from dataclasses import dataclass +from typing import Any + +# Temporal best practice: Create a data structure to hold the request parameters. +@dataclass +class OpenAIResponsesRequest: + model: str + instructions: str + input: object + tools: list[dict[str, Any]] + +@activity.defn +async def create(request: OpenAIResponsesRequest) -> Response: + # Temporal best practice: Disable retry logic in OpenAI API client library. + client = AsyncOpenAI(max_retries=0) + + resp = await client.responses.create( + model=request.model, + instructions=request.instructions, + input=request.input, + tools=request.tools, + timeout=30, + ) + + return resp +``` + +## Create the Activity for the tool invocation + +We create a wrapper for invoking the [National Weather Service API](https://www.weather.gov/documentation/services-web-api), specifically for the weather alerts endpoint. + +We follow the Temporal best practice of encapsulating all input parameters to the activity in +data structure, even here where this is only one argument. + +The `WEATHER_ALERTS_TOOL_OAI` leverages a function defined in `helpers/tool_helpers.py` that calls the aforementioned internal OpenAI function, generating a dictionary that becomes the argument passed into the OpenAI responses API. + +`activities/get_weather_alerts.py` +```python +# weather_activities.py + +from typing import Any +from temporalio import activity +import httpx +import json +from pydantic import BaseModel +import openai +from helpers import tool_helpers +from pydantic import Field + +# Constants +NWS_API_BASE = "https://api.weather.gov" +USER_AGENT = "weather-app/1.0" + +def _alerts_url(state: str) -> str: + return f"{NWS_API_BASE}/alerts/active/area/{state}" + +# External calls happen via activities now +async def _make_nws_request(url: str) -> dict[str, Any] | None: + """Make a request to the NWS API with proper error handling.""" + headers = { + "User-Agent": USER_AGENT, + "Accept": "application/geo+json" + } + + async with httpx.AsyncClient() as client: + + response = await client.get(url, headers=headers, timeout=5.0) + response.raise_for_status() + return response.json() + +# Build the tool for the OpenAI Responses API. We use Pydantic to create a structure +# that encapsulates the input parameters for both the weather alerts activity and the +# tool definition that is passed to the OpenAI Responses API. +class GetWeatherAlertsRequest(BaseModel): + state: str = Field(description="Two-letter US state code (e.g. CA, NY)") + +WEATHER_ALERTS_TOOL_OAI: dict[str, Any] = tool_helpers.oai_responses_tool_from_model( + "get_weather_alerts", + "Get weather alerts for a US state.", + GetWeatherAlertsRequest) + +@activity.defn +async def get_weather_alerts(weather_alerts_request: GetWeatherAlertsRequest) -> str: + """Get weather alerts for a US state. + + Args: + state: Two-letter US state code (e.g. CA, NY) + """ + data = await _make_nws_request(_alerts_url(weather_alerts_request.state)) + return json.dumps(data) +``` +### Create the helper function + +The `oai_responses_tool_from_model` function accepts a tool name and description, as well as a list of argument name/description pairs and returns json that is in the format expected for tool definitions in the OpenAI responses API. + +> [!WARNING] +> The API used to generate the tools json is an interal function from the [Open AI API](https://github.com/openai/openai-python) and may therefore change in the future. There currently is no public API to generate the tool definition from a Pydantic model or a function signature. + +`helpers/tool_helpers.py` +```python +from openai.lib._pydantic import to_strict_json_schema # private API; may change +# there currently is no public API to generate the tool definition from a Pydantic model +# or a function signature. +from pydantic import BaseModel + +def oai_responses_tool_from_model(name: str, description: str, model: type[BaseModel]): + return { + "type": "function", + "name": name, + "description": description, + "parameters": to_strict_json_schema(model), + "strict": True, + } +``` + +## Create the Agent + +The agent is implemented as a Temporal workflow that orchestrates +- the intial LLM call with the initial user input and guidance to the LLM that they should respond in haiku when the user input doesn't lead to a tool call, +- the invocation of the function, if the LLM has chosen one +- and if a function has been called, the result is appended to the context that is then sent back to the LLM for interpretation (the LLM is instructed to format the tool response). + +`workflows/get_weather_workflow.py` +```python +from temporalio import workflow +from datetime import timedelta +import json + +from activities import openai_responses + +with workflow.unsafe.imports_passed_through(): + from activities import get_weather_alerts + + +@workflow.defn +class ToolCallingWorkflow: + @workflow.run + async def run(self, input: str) -> str: + + input_list = [ {"role": "user", "content": input} ] + + # We take the user input and pass it to the LLM with the system instructions + # and the tool to use, if applicable. + system_instructions = "if no tools seem to be needed, respond in haikus." + result = await workflow.execute_activity( + openai_responses.create, + openai_responses.OpenAIResponsesRequest( + model="gpt-4o-mini", + instructions=system_instructions, + input=input_list, + tools=[get_weather_alerts.WEATHER_ALERTS_TOOL_OAI], + ), + start_to_close_timeout=timedelta(seconds=30), + ) + + # For this simple example, we only have one item in the output list + item = result.output[0] + + # if the result is a tool call, call the tool + if item.type == "function_call": + if item.name == "get_weather_alerts": + + # serialize the output, which is an OpenAI object + input_list += [ + i.model_dump() if hasattr(i, "model_dump") else i + for i in result.output + ] + + result = await workflow.execute_activity( + get_weather_alerts.get_weather_alerts, + get_weather_alerts.GetWeatherAlertsRequest(state=json.loads(item.arguments)["state"]), + start_to_close_timeout=timedelta(seconds=30), + ) + + # add the tool call result to the input list for context + input_list.append({"type": "function_call_output", + "call_id": item.call_id, + "output": result}) + + result = await workflow.execute_activity( + openai_responses.create, + openai_responses.OpenAIResponsesRequest( + model="gpt-4o-mini", + instructions="return the tool call result in a readable format", + input=input_list, + tools=[] + ), + start_to_close_timeout=timedelta(seconds=30), + ) + + result = result.output_text + + return result + ``` + +## Create the Worker + +The worker is the process that dispatches work to the various parts of the agent implementation - the orchestrator and the activities for the LLM and tool invocations. + +*File: worker.py* + +```python +import asyncio + +from temporalio.client import Client +from temporalio.worker import Worker + +from workflows.get_weather_workflow import ToolCallingWorkflow +from activities import openai_responses, get_weather_alerts +from temporalio.contrib.pydantic import pydantic_data_converter + + +async def main(): + client = await Client.connect( + "localhost:7233", + data_converter=pydantic_data_converter, + ) + + worker = Worker( + client, + task_queue="tool-calling-python-task-queue", + workflows=[ + ToolCallingWorkflow, + ], + activities=[ + openai_responses.create, + get_weather_alerts.get_weather_alerts, + ], + ) + await worker.run() + + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Initiate an interaction with the agent + +In order to interact with this simple AI agent, we create a Temporal client and execute a workflow. + +`start_workflow.py` +```python +import asyncio +import sys + +from temporalio.client import Client + +from workflows.get_weather_workflow import ToolCallingWorkflow +from temporalio.contrib.pydantic import pydantic_data_converter + + +async def main(): + client = await Client.connect( + "localhost:7233", + data_converter=pydantic_data_converter, + ) + + query = sys.argv[1] if len(sys.argv) > 1 else "Hello, how are you?" + + # Submit the Hello World workflow for execution + result = await client.execute_workflow( + ToolCallingWorkflow.run, + query, + id="my-workflow-id", + task_queue="tool-calling-python-task-queue", + ) + print(f"Result: {result}") + + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Running + +### Start the Temporal Dev Server + +```bash +temporal server start-dev +``` + +### Install dependencies + +From this directory: + +```bash +uv sync +``` + +### Run the worker + +First set the `OPENAI_API_KEY` environment variable and then: + +```bash +uv run python -m worker +``` + +### Initiate an interaction with the agent + +This user input should not result in any tool call + +```bash +uv run python -m start_workflow "Tell me about recursion in programming." +``` + +This user input should invoke the tool and respond with current weather alerts for California. + +```bash +uv run python -m start_workflow "Are there any weather alerts in California?" +``` diff --git a/docusaurus.config.js b/docusaurus.config.js index 7befb771cd..6e8967b5a4 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -1,33 +1,33 @@ //@ts-check -const FontPreloadPlugin = require("webpack-font-preload-plugin"); +const FontPreloadPlugin = require('webpack-font-preload-plugin'); /** @type {import('@docusaurus/types').DocusaurusConfig} */ module.exports = async function createConfigAsync() { return { - title: "Temporal Platform Documentation", - tagline: "Build invincible applications", - url: "https://docs.temporal.io", - baseUrl: "/", - onBrokenLinks: "throw", - onBrokenAnchors: "throw", - favicon: "img/favicon.svg", - organizationName: "temporalio", // Usually your GitHub org/user name. - projectName: "temporal-documentation", // Usually your repo name. + title: 'Temporal Platform Documentation', + tagline: 'Build invincible applications', + url: 'https://docs.temporal.io', + baseUrl: '/', + onBrokenLinks: 'throw', + onBrokenAnchors: 'throw', + favicon: 'img/favicon.svg', + organizationName: 'temporalio', // Usually your GitHub org/user name. + projectName: 'temporal-documentation', // Usually your repo name. headTags: [ { - tagName: "link", + tagName: 'link', attributes: { - rel: "preload", - href: "https://iq.temporal.io", - as: "document", + rel: 'preload', + href: 'https://iq.temporal.io', + as: 'document', }, }, ], - clientModules: ["./src/client/remote-amplitude-analytics.js"], + clientModules: ['./src/client/remote-amplitude-analytics.js'], themeConfig: { colorMode: { - defaultMode: "light", + defaultMode: 'light', disableSwitch: false, // switchConfig: { // darkIcon: "🌙", @@ -43,12 +43,12 @@ module.exports = async function createConfigAsync() { // }, // }, }, - metadata: [{ name: "robots", content: "follow, index" }], - image: "/img/assets/open-graph-shiny.png", + metadata: [{ name: 'robots', content: 'follow, index' }], + image: '/img/assets/open-graph-shiny.png', prism: { //theme: require("prism-react-renderer/themes/nightOwlLight"), // darkTheme: require("prism-react-renderer/themes/dracula"), - additionalLanguages: ["java", "ruby", "php", "csharp"], + additionalLanguages: ['java', 'ruby', 'php', 'csharp'], }, docs: { sidebar: { @@ -63,56 +63,57 @@ module.exports = async function createConfigAsync() { // textColor: "#ffffff", // isCloseable: true, // }, -// announcementBar: { -// id: "new_feedback_widget", -// content: "How are we doing? Try our new on-page feedback.", -// backgroundColor: "#312e81", -// textColor: "#ffffff", -// isCloseable: true, -// }, + // announcementBar: { + // id: "new_feedback_widget", + // content: "How are we doing? Try our new on-page feedback.", + // backgroundColor: "#312e81", + // textColor: "#ffffff", + // isCloseable: true, + // }, navbar: { hideOnScroll: false, logo: { - alt: "Temporal logo", - src: "img/assets/temporal-logo-dark.svg", - srcDark: "img/assets/temporal-logo.svg", - href: "https://temporal.io", + alt: 'Temporal logo', + src: 'img/assets/temporal-logo-dark.svg', + srcDark: 'img/assets/temporal-logo.svg', + href: 'https://temporal.io', }, items: [ { - label: "Home", - to: "/", - position: "left", - activeBasePath: "none", + label: 'Home', + to: '/', + position: 'left', + activeBasePath: 'none', }, { - label: "Start learning", - href: "https://learn.temporal.io/getting_started/", - right: "left", + label: 'Start learning', + href: 'https://learn.temporal.io/getting_started/', + right: 'left', }, { - label: "Start building", - href: "/develop", - right: "left", + label: 'Start building', + href: '/develop', + right: 'left', }, { - label: "Code Exchange", - href: "https://temporal.io/code-exchange", - right: "left", + label: 'AI Cookbook', + to: '/cookbook', + activeBasePath: 'cookbook', + position: 'left', }, { - label: "Temporal Cloud", - to: "/cloud", - activeBasePath: "cloud", - position: "left", + label: 'Temporal Cloud', + to: '/cloud', + activeBasePath: 'cloud', + position: 'left', }, ], }, footer: { logo: { - alt: "Temporal logo", - src: "img/favicon.png", - href: "https://temporal.io", + alt: 'Temporal logo', + src: 'img/favicon.png', + href: 'https://temporal.io', width: 24, }, copyright: `Copyright © ${new Date().getFullYear()} Temporal Technologies Inc.`, @@ -120,109 +121,109 @@ module.exports = async function createConfigAsync() { { items: [ { - label: "Github", - href: "https://github.com/temporalio", + label: 'Github', + href: 'https://github.com/temporalio', }, { - label: "Twitter", - href: "https://twitter.com/temporalio", + label: 'Twitter', + href: 'https://twitter.com/temporalio', }, { - label: "YouTube", - href: "https://www.youtube.com/c/Temporalio", + label: 'YouTube', + href: 'https://www.youtube.com/c/Temporalio', }, { - label: "About the docs", - href: "https://github.com/temporalio/documentation/blob/master/README.md", + label: 'About the docs', + href: 'https://github.com/temporalio/documentation/blob/master/README.md', }, ], }, { items: [ { - label: "Temporal Cloud", - href: "https://temporal.io/cloud", + label: 'Temporal Cloud', + href: 'https://temporal.io/cloud', }, { - label: "Meetups", - href: "https://temporal.io/community#events", + label: 'Meetups', + href: 'https://temporal.io/community#events', }, { - label: "Workshops", - href: "https://temporal.io/community#workshops", + label: 'Workshops', + href: 'https://temporal.io/community#workshops', }, { - label: "Support forum", - href: "https://community.temporal.io/", + label: 'Support forum', + href: 'https://community.temporal.io/', }, { - label: "Ask an expert", - href: "https://pages.temporal.io/ask-an-expert", + label: 'Ask an expert', + href: 'https://pages.temporal.io/ask-an-expert', }, ], }, { items: [ { - label: "Learn Temporal", - href: "https://learn.temporal.io", + label: 'Learn Temporal', + href: 'https://learn.temporal.io', }, { - label: "Blog", - href: "https://temporal.io/blog", + label: 'Blog', + href: 'https://temporal.io/blog', }, { - label: "Use cases", - href: "https://temporal.io/use-cases", + label: 'Use cases', + href: 'https://temporal.io/use-cases', }, { - label: "Newsletter signup", - href: "https://pages.temporal.io/newsletter-subscribe", + label: 'Newsletter signup', + href: 'https://pages.temporal.io/newsletter-subscribe', }, ], }, { items: [ { - label: "Security", - to: "/security", + label: 'Security', + to: '/security', }, { - label: "Privacy policy", - to: "https://temporal.io/global-privacy-policy", + label: 'Privacy policy', + to: 'https://temporal.io/global-privacy-policy', }, { - label: "Terms of service", - href: "https://docs.temporal.io/pdf/temporal-tos-2021-07-24.pdf", + label: 'Terms of service', + href: 'https://docs.temporal.io/pdf/temporal-tos-2021-07-24.pdf', }, { label: "We're hiring", - href: "https://temporal.io/careers", + href: 'https://temporal.io/careers', }, ], }, ], }, algolia: { - apiKey: "4a2fa646f476d7756a7cdc599b625bec", - indexName: "temporal", - externalUrlRegex: "temporal\\.io", + apiKey: '4a2fa646f476d7756a7cdc599b625bec', + indexName: 'temporal', + externalUrlRegex: 'temporal\\.io', // contextualSearch: true, // Optional; if you have different version of docs etc (v1 and v2), doesn't display dup results - appId: "T5D6KNJCQS", // Optional, if you run the DocSearch crawler on your own + appId: 'T5D6KNJCQS', // Optional, if you run the DocSearch crawler on your own // algoliaOptions: {}, // Optional, if provided by Algolia insights: true, }, }, presets: [ [ - "@docusaurus/preset-classic", + '@docusaurus/preset-classic', { // Will be passed to @docusaurus/plugin-content-docs docs: { - sidebarPath: require.resolve("./sidebars.js"), - routeBasePath: "/", - exclude: ["**/clusters/**"], // do not render context content - editUrl: "https://github.com/temporalio/documentation/edit/main/docs/", + sidebarPath: require.resolve('./sidebars.js'), + routeBasePath: '/', + exclude: ['**/clusters/**', '**/cookbook/**'], // do not render context content + editUrl: 'https://github.com/temporalio/documentation/edit/main/docs/', /** * Whether to display the author who last updated the doc. */ @@ -241,18 +242,18 @@ module.exports = async function createConfigAsync() { // // below remark plugin disabled until we can figure out why it is not transpiling to ESNext properly - swyx // // original PR https://github.com/temporalio/documentation/pull/496/files admonitions: { - keywords: ["note", "tip", "info", "caution", "danger", "competency", "copycode"], + keywords: ['note', 'tip', 'info', 'caution', 'danger', 'competency', 'copycode'], }, - remarkPlugins: [(await import("remark-math")).default], - rehypePlugins: [(await import("rehype-katex")).default], + remarkPlugins: [(await import('remark-math')).default], + rehypePlugins: [(await import('rehype-katex')).default], + }, + theme: { + customCss: require.resolve('./src/css/custom.css'), }, // Will be passed to @docusaurus/plugin-content-blog // options: https://docusaurus.io/docs/api/plugins/@docusaurus/plugin-content-blog // blog: {}, // Will be passed to @docusaurus/theme-classic. - theme: { - customCss: require.resolve("./src/css/custom.css"), - }, // gtag: { // trackingID: "GTM-TSXFPF2", // // Optional fields. @@ -262,56 +263,56 @@ module.exports = async function createConfigAsync() { sitemap: { // Per v2.0.0-alpha.72 cacheTime is now deprecated //cacheTime: 600 * 1000, // 600 sec - cache purge period - changefreq: "daily", + changefreq: 'daily', priority: 0.5, - filename: "sitemap.xml", + filename: 'sitemap.xml', }, }, ], ], scripts: [ { - src: "/scripts/googletag.js", + src: '/scripts/googletag.js', async: true, defer: true, }, { - src: "https://widget.kapa.ai/kapa-widget.bundle.js", - "data-website-id": "91a88508-9cdc-441f-b1df-37aa9329e6bc", - "data-project-name": "Temporal", - "data-project-color": "#000000", - "data-project-logo": "https://avatars.githubusercontent.com/u/56493103?s=280&v=4", - "data-modal-title": "Temporal's AI developer assistant", - "data-user-analytics-fingerprint-enabled": true, - "data-modal-disclaimer": + src: 'https://widget.kapa.ai/kapa-widget.bundle.js', + 'data-website-id': '91a88508-9cdc-441f-b1df-37aa9329e6bc', + 'data-project-name': 'Temporal', + 'data-project-color': '#000000', + 'data-project-logo': 'https://avatars.githubusercontent.com/u/56493103?s=280&v=4', + 'data-modal-title': "Temporal's AI developer assistant", + 'data-user-analytics-fingerprint-enabled': true, + 'data-modal-disclaimer': "I am Temporal's AI developer assistant. I can access developer docs, forum posts, product blogs, and SDK references. Responses are generated by combining various sources to provide the best possible answer, however I may not be fully accurate, so please use your best judgement. If you have feedback please give a thumbs up or down as I continue to improve.", - "data-modal-example-questions": [ - "What is Temporal?", - "How do I get started using Temporal?", - "I need a Workflow written in TypeScript", - "How do Signals work?", + 'data-modal-example-questions': [ + 'What is Temporal?', + 'How do I get started using Temporal?', + 'I need a Workflow written in TypeScript', + 'How do Signals work?', ], async: true, defer: true, }, { - src: "/scripts/copycode-notice.js", + src: '/scripts/copycode-notice.js', async: true, defer: true, }, ], stylesheets: [ { - href: "https://cdn.jsdelivr.net/npm/katex@0.13.24/dist/katex.min.css", - type: "text/css", - integrity: "sha384-odtC+0UGzzFL/6PNoE8rX/SPcQDXBJ+uRepguP4QkPCm2LBxH3FA3y+fKSiJ+AmM", - crossorigin: "anonymous", + href: 'https://cdn.jsdelivr.net/npm/katex@0.13.24/dist/katex.min.css', + type: 'text/css', + integrity: 'sha384-odtC+0UGzzFL/6PNoE8rX/SPcQDXBJ+uRepguP4QkPCm2LBxH3FA3y+fKSiJ+AmM', + crossorigin: 'anonymous', }, ], plugins: [ function preloadFontPlugin() { return { - name: "preload-font-plugin", + name: 'preload-font-plugin', configureWebpack() { return { plugins: [new FontPreloadPlugin()], @@ -320,61 +321,83 @@ module.exports = async function createConfigAsync() { }; }, [ - "docusaurus-pushfeedback", + 'docusaurus-pushfeedback', + { + project: '6c1ptrxbky', + privacyPolicyText: 'false', + buttonPosition: 'center-right', + modalPosition: 'sidebar-right', + modalTitle: 'Feedback', + }, + ], + [ + '@docusaurus/plugin-content-docs', + { + id: 'cookbook', + path: 'cookbook', + routeBasePath: 'cookbook', // published at /cookbook/* ✅ + sidebarPath: false, // no left nav for these pages ✅ + // optional polish: + showLastUpdateAuthor: true, + showLastUpdateTime: true, + // use a custom item to center the content: + docItemComponent: '@site/src/components/CookbookDocItem', + docCategoryGeneratedIndexComponent: '@site/src/components/CookbookCategoryIndex', // ⬅️ isolated override + }, + ], + [ + require.resolve('./plugins/cookbook-index'), { - project: "6c1ptrxbky", - privacyPolicyText: "false", - buttonPosition: "center-right", - modalPosition: "sidebar-right", - modalTitle: "Feedback", + docsDir: 'cookbook', // change if your folder differs + routeBasePath: 'cookbook', // change if you use a different base }, ], [ - "docusaurus-plugin-llms", + 'docusaurus-plugin-llms', { // Generate both llms.txt (index) and llms-full.txt (complete content) generateLLMsTxt: true, generateLLMsFullTxt: true, - + // Clean up content for better LLM consumption excludeImports: true, removeDuplicateHeadings: true, - + // Organize content in a logical order for LLMs includeOrder: [ - "quickstarts/**", - "evaluate/**", - "develop/**", - "production-deployment/**", - "cli/**", - "references/**", - "troubleshooting/**", - "encyclopedia/**", - "security*", - "web-ui*", - "glossary*", + 'quickstarts/**', + 'evaluate/**', + 'develop/**', + 'production-deployment/**', + 'cli/**', + 'references/**', + 'troubleshooting/**', + 'encyclopedia/**', + 'security*', + 'web-ui*', + 'glossary*', ], - + // Path transformation to clean URLs pathTransformation: { - ignorePaths: ["docs"] + ignorePaths: ['docs'], }, - + // Custom LLM files for specific use cases customLLMFiles: [ { - filename: "llms-quickstart.txt", - includePatterns: ["quickstarts/**", "develop/**/set-up-*"], + filename: 'llms-quickstart.txt', + includePatterns: ['quickstarts/**', 'develop/**/set-up-*'], fullContent: true, - title: "Temporal Quickstart Guide" + title: 'Temporal Quickstart Guide', }, { - filename: "llms-api-reference.txt", - includePatterns: ["references/**", "cli/**"], + filename: 'llms-api-reference.txt', + includePatterns: ['references/**', 'cli/**'], fullContent: true, - title: "Temporal API and CLI Reference" - } - ] + title: 'Temporal API and CLI Reference', + }, + ], }, ], ], @@ -388,7 +411,7 @@ module.exports = async function createConfigAsync() { // TypeScript always outputs 4 space indent. This is a workaround. // See https://github.com/microsoft/TypeScript/issues/4042 return code.replace(/^( {4})+/gm, (match) => { - return " ".repeat(match.length / 4); + return ' '.repeat(match.length / 4); }); } @@ -396,7 +419,7 @@ module.exports = async function createConfigAsync() { // lines. Helpful for cleaning up TypeScript examples that are pulled from // the body of a function. function dedent(code) { - const lines = code.split("\n"); + const lines = code.split('\n'); if (!lines.length) { return code; @@ -424,6 +447,6 @@ module.exports = async function createConfigAsync() { } // Otherwise, remove leading spaces from each line - return lines.map((line) => line.replace(new RegExp(`^ {${minIndent}}`), "")).join("\n"); + return lines.map((line) => line.replace(new RegExp(`^ {${minIndent}}`), '')).join('\n'); } }; diff --git a/package.json b/package.json index a57e6df92f..cd6cfece5a 100644 --- a/package.json +++ b/package.json @@ -21,7 +21,10 @@ "swizzle": "docusaurus swizzle", "vale": "./assembly/run-vale.sh", "write-heading-ids": "docusaurus write-heading-ids", - "write-translations": "docusaurus write-translations" + "write-translations": "docusaurus write-translations", + "e2e:install": "npx playwright install --with-deps", + "e2e:dev": "concurrently \"docusaurus start\" \"wait-on http://localhost:3000 && playwright test --project=chromium-desktop\"", + "e2e": "playwright test" }, "browserslist": { "production": [ @@ -63,7 +66,10 @@ "snipsync": "^1.9.0", "tar-fs": "^2.1.4", "uuid": "^11.1.0", - "webpack-font-preload-plugin": "^1.5.0" + "webpack-font-preload-plugin": "^1.5.0", + "@playwright/test": "^1.47.0", + "concurrently": "^8.2.2", + "wait-on": "^7.2.0" }, "devDependencies": { "dprint": "^0.45.0", diff --git a/playwright.config.ts b/playwright.config.ts new file mode 100644 index 0000000000..8cd11b28c7 --- /dev/null +++ b/playwright.config.ts @@ -0,0 +1,36 @@ +import { defineConfig, devices } from '@playwright/test'; + +const PORT = Number(process.env.PORT ?? 3000); +const BASE_URL = process.env.PLAYWRIGHT_BASE_URL ?? `http://localhost:${PORT}`; + +export default defineConfig({ + testDir: './tests/playwright', + timeout: 60_000, + expect: { + timeout: 10_000, + }, + fullyParallel: true, + forbidOnly: !!process.env.CI, + retries: process.env.CI ? 2 : 0, + workers: process.env.CI ? 1 : undefined, + use: { + baseURL: BASE_URL, + trace: 'on-first-retry', + screenshot: 'only-on-failure', + video: 'retain-on-failure', + }, + webServer: process.env.PLAYWRIGHT_BASE_URL + ? undefined + : { + command: 'yarn start', + url: BASE_URL, + reuseExistingServer: !process.env.CI, + timeout: 120_000, + }, + projects: [ + { + name: 'chromium-desktop', + use: { ...devices['Desktop Chrome'] }, + }, + ], +}); diff --git a/plugins/cookbook-index/index.js b/plugins/cookbook-index/index.js new file mode 100644 index 0000000000..693a718a1b --- /dev/null +++ b/plugins/cookbook-index/index.js @@ -0,0 +1,97 @@ +const fs = require('fs'); +const path = require('path'); +const matter = require('gray-matter'); + +module.exports = function cookbookIndexPlugin(context, options = {}) { +console.log('[cookbook-index] init with docsDir:', options.docsDir); + console.log('[cookbook-index] resolved docsDir:', path.isAbsolute(options.docsDir) + ? options.docsDir + : path.join(context.siteDir, options.docsDir || 'cookbook')); + const docsDir = path.isAbsolute(options.docsDir) + ? options.docsDir + : path.join(context.siteDir, options.docsDir || 'cookbook'); + + const routeBasePath = options.routeBasePath || 'cookbook'; + + function walk(dir) { + if (!fs.existsSync(dir)) { + console.log(`[cookbook-index] walk(): directory not found -> ${dir}`); + return []; + } + const entries = fs.readdirSync(dir); + return entries.flatMap((name) => { + const full = path.join(dir, name); + const stat = fs.statSync(full); + if (stat.isDirectory()) return walk(full); + if (/\.(md|mdx)$/i.test(name)) { + return [full]; + } + return []; + }); +} + + function readItems() { + const files = walk(docsDir); + + const items = files + .map((file) => { + const src = fs.readFileSync(file, 'utf8'); + const { data } = matter(src); + + const rel = path.relative(docsDir, file).replace(/\\/g, '/'); + const base = rel.replace(/\.(md|mdx)$/i, ''); + + // Skip the cookbook index doc only + if (data.id === 'cookbook' || /(^|\/)index$/i.test(base)) { + return null; // will be removed by .filter(Boolean) + } + + const id = data.id || base; + const slug = (data.slug || base).replace(/^\/+/, ''); + const title = data.title || id; + const description = data.description || ''; + const tags = Array.isArray(data.tags) + ? data.tags.map((t) => (typeof t === 'string' ? t : t?.label || t?.name || t?.title)).filter(Boolean) + : []; + const permalink = `/${routeBasePath}/${slug}`.replace(/\/+/g, '/'); + + const source = typeof data.source === 'string' ? data.source : undefined; + + const rawPriority = data.priority; + let priority; + if (typeof rawPriority === 'number') { + priority = Number.isFinite(rawPriority) ? rawPriority : undefined; + } else if (typeof rawPriority === 'string') { + const parsed = Number(rawPriority); + priority = Number.isFinite(parsed) ? parsed : undefined; + } + + const item = { id, title, description, tags, permalink, source }; + if (typeof priority === 'number') { + item.priority = priority; + } + + return item; // ← IMPORTANT: actually return the object + }) + .filter(Boolean); // remove null/undefined entries + + return items; +} + + return { + name: 'cookbook-index', + getPathsToWatch() { + return [`${docsDir}/**/*.{md,mdx}`]; + }, + async loadContent() { + return { items: readItems() }; + }, + async contentLoaded({ content, actions }) { + const { createData, setGlobalData } = actions; + await createData('cookbook.index.json', JSON.stringify(content.items, null, 2)); + setGlobalData({ items: content.items }); + }, + }; + + +}; diff --git a/src/components/CookbookCategoryIndex.tsx b/src/components/CookbookCategoryIndex.tsx new file mode 100644 index 0000000000..79ba5d46a4 --- /dev/null +++ b/src/components/CookbookCategoryIndex.tsx @@ -0,0 +1,15 @@ +import React from 'react'; +import type {Props} from '@theme/DocCategoryGeneratedIndexPage'; +import CookbookHome from '@site/src/components/elements/CookbookHome'; +import Original from '@theme-original/DocCategoryGeneratedIndexPage'; + +export default function CookbookCategoryIndex(props: Props) { + const {categoryGeneratedIndex} = props; + const isRoot = categoryGeneratedIndex.permalink.replace(/\/+$/, '').endsWith('/cookbook'); + + // Root /cookbook uses your custom React page (no MDX, no DocCardList) + if (isRoot) return ; + + // Everything else (including /docs) stays default + return ; +} \ No newline at end of file diff --git a/src/components/CookbookDocItem.module.css b/src/components/CookbookDocItem.module.css new file mode 100644 index 0000000000..0320e7480f --- /dev/null +++ b/src/components/CookbookDocItem.module.css @@ -0,0 +1,129 @@ +.main { + padding: 0 1rem; + width: 100%; + display: flex; + justify-content: center; +} + +.wrapper { + display: grid; + gap: 2rem; + width: min(100%, 1080px); + grid-template-columns: minmax(0, min(90vw, 720px)); + justify-content: center; +} + +.wrapper[data-has-toc='true'] { + grid-template-columns: minmax(0, min(90vw, 720px)) 260px; +} + +.article { + width: 100%; + max-width: min(90vw, 720px); + margin: 0 auto; +} + +.syntheticHeader { + margin-bottom: var(--ifm-spacing-lg, 1.5rem); +} + +.syntheticHeader h1 { + margin-bottom: var(--ifm-spacing-md, 1rem); +} + +.lastUpdated { + margin: 0 0 var(--ifm-spacing-sm, 0.75rem); + color: #465A78; + font-size: 0.95rem; +} + +.actionsRow { + display: flex; + align-items: center; + justify-content: space-between; + gap: 1rem; + flex-wrap: wrap; + margin-top: var(--ifm-spacing-sm, 0.75rem); + margin-bottom: var(--ifm-spacing-lg, 1.5rem); +} + +.actionLink { + display: inline-flex; + align-items: center; + gap: 0.5rem; + padding: 0.45rem 0.85rem; + border-radius: 6px; + background: transparent; + color: var(--ifm-color-emphasis-800); + font-weight: 600; + text-decoration: none; + transition: background-color 160ms ease, border-color 160ms ease, + color 160ms ease, box-shadow 160ms ease; +} + +.actionLink:hover, +.actionLink:focus-visible { + text-decoration: none; + background: color-mix(in srgb, var(--ifm-color-emphasis-800) 12%, transparent); + color: var(--ifm-color-emphasis-900); +} + +.actionLink:focus-visible { + outline: none; + box-shadow: 0 0 0 3px color-mix(in srgb, var(--ifm-color-emphasis-800) 18%, transparent); +} + +.actionGithub { + margin-left: auto; +} + +.actionGithub[aria-disabled='true'] { + opacity: 0.5; +} + +.actionIcon { + width: 1em; + height: 1em; + fill: currentColor; +} + +.actionsRow :is(.actionLink, .actionGithub) { + white-space: nowrap; +} + +.tocMobile { + margin-bottom: var(--ifm-spacing-lg, 1.5rem); +} + +.toc { + position: sticky; + top: calc(var(--ifm-navbar-height, 60px) + 24px); + max-height: calc(100vh - var(--ifm-navbar-height, 60px) - 48px); + overflow: auto; +} + +@media (max-width: 1200px) { + .wrapper[data-has-toc='true'] { + grid-template-columns: minmax(0, min(90vw, 720px)); + } + + .toc { + display: none; + } +} + +@media (max-width: 600px) { + .actionsRow { + flex-direction: column; + align-items: stretch; + } + + .actionGithub { + margin-left: 0; + } + + .actionsRow :is(.actionLink, .actionGithub) { + width: 100%; + text-align: center; + } +} diff --git a/src/components/CookbookDocItem.tsx b/src/components/CookbookDocItem.tsx new file mode 100644 index 0000000000..bab40a7510 --- /dev/null +++ b/src/components/CookbookDocItem.tsx @@ -0,0 +1,217 @@ +import React from 'react'; +import Head from '@docusaurus/Head'; +import {DocProvider, useDoc} from '@docusaurus/plugin-content-docs/client'; +import DocItemMetadata from '@theme/DocItem/Metadata'; +import type {Props as DocItemProps} from '@theme/DocItem'; +import {HtmlClassNameProvider} from '@docusaurus/theme-common'; +import DocItemTOCDesktop from '@theme/DocItem/TOC/Desktop'; +import DocItemTOCMobile from '@theme/DocItem/TOC/Mobile'; +import Link from '@docusaurus/Link'; +import {MDXProvider} from '@mdx-js/react'; +import MDXComponents from '@theme/MDXComponents'; +import clsx from 'clsx'; +import {usePluginData} from '@docusaurus/useGlobalData'; + +import styles from './CookbookDocItem.module.css'; + +type CookbookDocItemProps = DocItemProps & { tags?: string[] }; + +type CookbookIndexItem = { + id: string; + source?: string; +}; + +function BackArrowIcon(props: React.SVGProps) { + return ( + + ); +} + +function GithubIcon(props: React.SVGProps) { + return ( + + ); +} + +export default function CookbookDocItem({content, tags}: CookbookDocItemProps) { + // IMPORTANT: don't call useDoc() here. + return ( + + + + ); +} + +function InnerCookbookDocItem({content, tags}: CookbookDocItemProps) { + const DocContent = content; + + // Now we're under , so useDoc() is safe: + const {metadata, frontMatter, toc, contentTitle} = useDoc(); + const { + title, + description, + id, + unversionedId, + tags: metaTags = [], + formattedLastUpdatedAt, + lastUpdatedAt, + } = metadata as typeof metadata & { + unversionedId?: string; + formattedLastUpdatedAt?: string; + lastUpdatedAt?: number | string | null; + }; + const indexData = usePluginData('cookbook-index') as {items?: CookbookIndexItem[]} | undefined; + const hasTOC = !frontMatter?.hide_table_of_contents && (toc?.length ?? 0) > 0; + const shouldRenderSyntheticTitle = !frontMatter?.hide_title && typeof contentTitle === 'undefined'; + const syntheticTitle = shouldRenderSyntheticTitle ? title : undefined; + + const resolvedTags = (tags ?? metaTags.map((t: any) => t.label)) as string[]; + const dataTags = resolvedTags.length ? resolvedTags.join(',') : undefined; + const cookbookFrontMatter = frontMatter as {source?: string} | undefined; + const pluginSource = React.useMemo(() => { + const items = indexData?.items; + if (!Array.isArray(items)) { + return undefined; + } + const match = items.find((item) => { + if (!item) { + return false; + } + return item.id === id || (!!unversionedId && item.id === unversionedId); + }); + return match?.source?.trim(); + }, [id, indexData, unversionedId]); + const frontMatterSource = cookbookFrontMatter?.source?.trim(); + const githubHref = pluginSource || frontMatterSource || ''; + const isGithubEnabled = Boolean(githubHref); + const handleGithubClick = React.useCallback( + (event: React.MouseEvent) => { + if (!isGithubEnabled) { + event.preventDefault(); + } + }, + [isGithubEnabled], + ); + const lastUpdatedLabel = React.useMemo(() => { + if (formattedLastUpdatedAt) { + return formattedLastUpdatedAt; + } + + const rawTimestamp = (() => { + if (typeof lastUpdatedAt === 'number') { + return lastUpdatedAt; + } + if (typeof lastUpdatedAt === 'string') { + const parsed = Number(lastUpdatedAt); + return Number.isNaN(parsed) ? undefined : parsed; + } + return undefined; + })(); + + if (typeof rawTimestamp !== 'number') { + return undefined; + } + + try { + const formatter = new Intl.DateTimeFormat(undefined, { + year: 'numeric', + month: 'short', + day: 'numeric', + }); + return formatter.format(new Date(rawTimestamp)); + } catch { + return new Date(rawTimestamp).toLocaleDateString(); + } + }, [formattedLastUpdatedAt, lastUpdatedAt]); + const renderLastUpdated = React.useCallback(() => { + if (!lastUpdatedLabel) { + return null; + } + return

Last updated {lastUpdatedLabel}

; + }, [lastUpdatedLabel]); + const renderActions = React.useCallback(() => ( +
+ + + Back to Cookbook + + + + Open in GitHub + +
+ ), [githubHref, handleGithubClick, isGithubEnabled]); + + const components = React.useMemo(() => { + const DefaultH1 = + (MDXComponents?.h1 as React.ComponentType>) ?? + ((props: React.HTMLAttributes) =>

); + + let actionsInjected = Boolean(syntheticTitle); + return { + ...MDXComponents, + h1: (props: React.HTMLAttributes) => { + if (!actionsInjected) { + actionsInjected = true; + return ( + <> + {renderActions()} + + {renderLastUpdated()} + + ); + } + return ; + }, + } as typeof MDXComponents; + }, [renderActions, renderLastUpdated, syntheticTitle]); + + return ( + + + + + {title} + {description && } + +
+
+
+ {syntheticTitle && ( +
+ {renderActions()} +

{syntheticTitle}

+ {renderLastUpdated()} +
+ )} + {hasTOC && ( +
+ +
+ )} + + + +
+ {hasTOC && ( + + )} +
+
+
+ ); +} diff --git a/src/components/elements/CookbookHome.module.css b/src/components/elements/CookbookHome.module.css new file mode 100644 index 0000000000..4c9720a953 --- /dev/null +++ b/src/components/elements/CookbookHome.module.css @@ -0,0 +1,102 @@ +/* Center the page content inside the default docs column */ +/* Build a spacing scale from the theme’s globals */ +:global(.cookbook--centered) { + /* Base: prefer vertical spacing, fall back to global, then 1rem */ + --cbk-space: var(--ifm-spacing-vertical, var(--ifm-global-spacing, 1rem)); + --cbk-space-xs: calc(var(--cbk-space) * 0.25); + --cbk-space-sm: calc(var(--cbk-space) * 0.5); + --cbk-space-md: var(--cbk-space); + --cbk-space-lg: calc(var(--cbk-space) * 1.5); + --cbk-space-xl: calc(var(--cbk-space) * 2); + + /* Optional: provide IfM-style aliases so existing CSS continues to work */ + --ifm-spacing-xs: var(--cbk-space-xs); + --ifm-spacing-sm: var(--cbk-space-sm); + --ifm-spacing-md: var(--cbk-space-md); + --ifm-spacing-lg: var(--cbk-space-lg); + --ifm-spacing-xl: var(--cbk-space-xl); +} + + +:global(.cookbook--centered .container) { + max-width: 1440px; +} + +:global(.cookbook--main) { + display: flex; + justify-content: center; +} + +:global(.cookbook--article) { + width: 100%; + max-width: 1440px; + padding: 2rem 1rem; + margin: 0 auto; +} + +.page { + width: 100%; + display: flex; + justify-content: center; + --cookbook-tile-border-color: rgba(15, 23, 42, 0.08); + --cookbook-tile-shadow: 0 0 1rem 0.5rem rgba(139, 143, 150, 0.2); +} + + +:global([data-theme='dark']) .page { + --cookbook-tile-border-color: rgba(226, 232, 240, 0.5); + --cookbook-tile-shadow: 0 0 1rem 0.5rem rgba(142, 145, 153, 0.78); +} + +.inner { + width: min(100%, 1440px); + margin: 0 auto; + padding: var(--ifm-spacing-xl, var(--cbk-space-xl, 32px)) + var(--ifm-spacing-horizontal, var(--cbk-space-md, 16px)); + display: flex; + flex-direction: column; + align-items: center; + gap: var(--ifm-spacing-xl); +} + +/* Hero */ +.hero { + text-align: center; + margin: 0 auto var(--ifm-spacing-xl, var(--cbk-space-xl, 32px)); + max-width: 820px; +} + +.eyebrow { + margin: var(--ifm-spacing-lg) 0; + font-size: 0.8rem; + letter-spacing: 0.08em; + text-transform: uppercase; + color: var(--ifm-color-emphasis-600); +} + +.heroTitle { + margin: 0 0 var(--ifm-spacing-sm) 0; + font-size: clamp(1.5rem, 2.2vw + 1rem, 2.25rem); + line-height: 1.15; +} + +.heroBlurb { + margin: 0 auto; + color: var(--ifm-color-emphasis-700); +} + +/* Tiles grid: force 3-up from md+ */ +.grid { + display: grid; + grid-template-columns: repeat(3, minmax(0, 1fr)); + gap: var(--ifm-spacing-lg, var(--cbk-space-lg, 24px)); + width: 100%; +} + +.cell { +} + +/* Mobile: stack */ +@media (max-width: 768px) { + .grid { grid-template-columns: 1fr; } +} diff --git a/src/components/elements/CookbookHome.tsx b/src/components/elements/CookbookHome.tsx new file mode 100644 index 0000000000..5ae20a4286 --- /dev/null +++ b/src/components/elements/CookbookHome.tsx @@ -0,0 +1,164 @@ +import React from "react"; +import Tile from "@site/src/components/elements/Tile"; +import { useAllDocsData } from "@docusaurus/plugin-content-docs/client"; +import styles from "./CookbookHome.module.css"; +import useGlobalData, { usePluginData } from "@docusaurus/useGlobalData"; +import clsx from "clsx"; + +type CookbookItem = { + id: string; + title: string; + description: string; + tags: string[]; + permalink: string; + source?: string; + priority?: number; +}; + +type DocMeta = { + id: string; + unversionedId?: string; + title?: string; + description?: string; + frontMatter?: { title?: string; description?: string; tags?: any[] }; + tags?: { label: string }[]; + permalink?: string; + lastUpdatedAt?: number | string | null; +}; + +function resolveDocMeta(item: CookbookItem, docsById: Map) { + return ( + docsById.get(item.id) ?? + docsById.get(`cookbook:${item.id}`) ?? + docsById.get(item.id.replace(/^cookbook:/, "")) ?? + null + ); +} + +function DocTile({ item, docsById }: { item: CookbookItem; docsById: Map }) { + const { id, title: pluginTitle, description: pluginDescription, tags: pluginTags, permalink: pluginPermalink } = item; + + const docMeta = resolveDocMeta(item, docsById); + + const title = docMeta?.title ?? docMeta?.frontMatter?.title ?? pluginTitle; + const description = docMeta?.description ?? docMeta?.frontMatter?.description ?? pluginDescription; + + if (!title || !description) { + throw new Error( + `Cookbook doc "${id}" missing required field(s):` + + `${!title ? " title" : ""}` + + `${!description ? " description" : ""}` + ); + } + + const tagsFromMeta = docMeta?.tags?.map((t: any) => t.label); + const tagsFromFrontMatter = Array.isArray(docMeta?.frontMatter?.tags) + ? docMeta.frontMatter.tags.map((t: any) => (typeof t === "string" ? t : t?.label)).filter(Boolean) + : undefined; + const resolvedTags = (tagsFromMeta ?? tagsFromFrontMatter ?? pluginTags) as string[]; + + const href = docMeta?.permalink ?? pluginPermalink ?? "#"; + + return ; +} + +export default function CookbookHome() { + const global = useGlobalData(); + console.log("[CookbookHome] plugins:", Object.keys(global?.plugins ?? {})); // should include 'cookbook-index' + + const dataAny = usePluginData("cookbook-index") as any; + const allDocsData = useAllDocsData(); + const cookbookDocs = + allDocsData?.cookbook?.versions?.find((version: any) => version?.isLast) ?? allDocsData?.cookbook?.versions?.[0]; + + const docsById = React.useMemo(() => { + const map = new Map(); + const docs: DocMeta[] = cookbookDocs?.docs ?? []; + docs.forEach((doc) => { + map.set(doc.id, doc); + map.set(`cookbook:${doc.id}`, doc); + if (doc.unversionedId) { + map.set(doc.unversionedId, doc); + map.set(`cookbook:${doc.unversionedId}`, doc); + } + }); + return map; + }, [cookbookDocs]); + + const raw = (dataAny?.items ?? []) as (CookbookItem | null | undefined)[]; + raw.forEach((x, i) => { + if (!x || typeof (x as any).title !== "string") { + console.warn("[CookbookHome] invalid item at index", i, x); + } + }); + + const items: CookbookItem[] = raw.filter( + (x): x is CookbookItem => !!x && typeof x === "object" && typeof (x as any).title === "string" + ); + + if (items.length === 0) { + throw new Error("CookbookHome: no items found by cookbook-index plugin (check server logs for [cookbook-index])."); + } + + const getLastUpdatedTimestamp = React.useCallback( + (item: CookbookItem) => { + const meta = resolveDocMeta(item, docsById); + const rawTimestamp = meta?.lastUpdatedAt; + if (typeof rawTimestamp === "number") { + return Number.isFinite(rawTimestamp) ? rawTimestamp : 0; + } + if (typeof rawTimestamp === "string") { + const parsed = Number(rawTimestamp); + return Number.isFinite(parsed) ? parsed : 0; + } + return 0; + }, + [docsById] + ); + + const sortedItems = React.useMemo(() => { + return [...items].sort((a, b) => { + const priorityA = typeof a.priority === "number" && Number.isFinite(a.priority) ? a.priority : null; + const priorityB = typeof b.priority === "number" && Number.isFinite(b.priority) ? b.priority : null; + + if (priorityA !== null && priorityB !== null) { + if (priorityA !== priorityB) { + return priorityB - priorityA; + } + } else if (priorityA !== null) { + return -1; + } else if (priorityB !== null) { + return 1; + } + + const updatedA = getLastUpdatedTimestamp(a); + const updatedB = getLastUpdatedTimestamp(b); + + if (updatedA === updatedB) { + return 0; + } + return updatedB - updatedA; + }); + }, [getLastUpdatedTimestamp, items]); + + return ( +
+
+
+

AI Cookbook

+

+ Step-by-step solutions that show you how to build reliable, production-ready AI systems with Temporal. Learn + practical paradigms for prompts, tools, retries, and Workflow design. +

+
+
+ {sortedItems.map((it) => ( +
+ +
+ ))} +
+
+
+ ); +} diff --git a/src/components/elements/Tile.module.css b/src/components/elements/Tile.module.css new file mode 100644 index 0000000000..453088c1f1 --- /dev/null +++ b/src/components/elements/Tile.module.css @@ -0,0 +1,109 @@ +:global([data-theme='light']) { + --cookbook-tile-border-color: rgba(15, 23, 42, 0.08); + --cookbook-tile-shadow: 0 20px 40px rgba(15, 23, 42, 0.16); +} + + +:global([data-theme='dark']) { + --cookbook-tile-border-color: rgba(148, 163, 184, 0.32); + --cookbook-tile-shadow: 0 24px 44px rgba(0, 5, 29, 0.5); +} + + +.tile, +.tile.card, +.card.tile { + display: flex; + flex-direction: column; + height: 100%; + text-decoration: none; + color: var(--ifm-color-emphasis-900); + background-color: var(--ifm-background-color); + border-radius: 0; + border: 0.1px solid var(--cookbook-tile-border-color); + box-shadow: 0 0 0 1px var(--cookbook-tile-border-color); + transition: box-shadow 0.28s ease 70ms; +} + + +.tile[data-clickable='true'] { + cursor: pointer; +} + + +.tile:hover, +.tile[data-clickable='true']:focus-visible { + box-shadow: 0 0 0 1px var(--cookbook-tile-border-color), var(--cookbook-tile-shadow); + text-decoration: none; +} + + +.tile[data-clickable='true']:focus-visible { + outline: none; +} + + +.header { +display: flex; +align-items: center; +gap: 0.75rem; +} + + +.icon { +font-size: 1.5rem; +line-height: 1; +} + + +.title { +margin: 0; +font-size: 1.1rem; +font-weight: 600; +color: inherit; +} + + +.body { +color: var(--ifm-color-emphasis-700); +} + + +.description { +margin: 0.25rem 0 0 0; +} + + +.tags { +display: flex; +flex-wrap: wrap; +gap: 0.5rem; +margin-top: 0.75rem; +padding: 0; +list-style: none; +} + + +.tag { +font-size: 0.75rem; +} + + +.footer { +margin-top: auto; +} + + +.cta { +display: inline-flex; +align-items: center; +gap: 0.375rem; +font-weight: 500; +transition: color 0.18s ease; +} + + +.arrow { +transform: translateX(0); +transition: transform 0.18s ease; +} diff --git a/src/components/elements/Tile.tsx b/src/components/elements/Tile.tsx new file mode 100644 index 0000000000..a94a61d1ab --- /dev/null +++ b/src/components/elements/Tile.tsx @@ -0,0 +1,67 @@ +import Link from "@docusaurus/Link"; +import clsx from "clsx"; +import styles from "./Tile.module.css"; + +export type TileProps = { + title: string; + description: string; + tags?: string[]; // collected, may be used later + href?: string; // clickable card when present + icon?: React.ReactNode; // emoji or SVG + className?: string; + headingLevel?: "h2" | "h3"; +}; + +export default function Tile({ title, description, tags = [], href, icon, className, headingLevel = "h3" }: TileProps) { + const isLink = typeof href === "string" && href.length > 0; + const Container: React.ElementType = isLink ? Link : "div"; + const containerProps = isLink ? { to: href, href } : {}; + const Heading = headingLevel; + + return ( + +
+ {icon ? ( +
+ {icon} +
+ ) : null} + {title} +
+
+

{description}

+ {tags.length > 0 && ( +
    + {tags.map((t) => ( +
  • + {t.toUpperCase()} +
  • + ))} +
+ )} +
+ {href && ( +
+ + Learn more + + + + +
+ )} +
+ ); +} diff --git a/src/components/elements/TileGrid.tsx b/src/components/elements/TileGrid.tsx new file mode 100644 index 0000000000..f55c148208 --- /dev/null +++ b/src/components/elements/TileGrid.tsx @@ -0,0 +1,19 @@ +import clsx from 'clsx'; +import Tile, { TileProps } from './Tile'; + +export type TileGridProps = { + items: (TileProps & { key?: string })[]; + className?: string; +}; + +export default function TileGrid({ items, className }: TileGridProps) { + return ( +
+ {items.map((it) => ( +
+ +
+ ))} +
+ ); +} diff --git a/src/css/custom.css b/src/css/custom.css index e4c38b5406..98aef47bb2 100644 --- a/src/css/custom.css +++ b/src/css/custom.css @@ -28,7 +28,7 @@ --feedback-white-color: #fff; } -html[data-theme="dark"] { +html[data-theme="dark"]:root { --ifm-color-primary: #bfdbfe; --ifm-background-color: #141414; --ifm-footer-background-color: #000000; @@ -63,7 +63,7 @@ html[data-theme="dark"] { --tag-text-color: var(--tag-text-color-dark); } -html[data-theme="light"] { +html[data-theme="light"]:root { --ifm-color-primary: #1d4ed8; --ifm-background-color: #f9fafb; --ifm-footer-background-color: #ffffff; diff --git a/src/pages/cookbook.tsx b/src/pages/cookbook.tsx new file mode 100644 index 0000000000..103504b48a --- /dev/null +++ b/src/pages/cookbook.tsx @@ -0,0 +1,11 @@ +import React from 'react'; +import Layout from '@theme/Layout'; +import CookbookHome from '@site/src/components/elements/CookbookHome'; + +export default function CookbookLanding() { + return ( + + + + ); +} diff --git a/tests/playwright/cookbook-home.spec.ts b/tests/playwright/cookbook-home.spec.ts new file mode 100644 index 0000000000..4997d56b62 --- /dev/null +++ b/tests/playwright/cookbook-home.spec.ts @@ -0,0 +1,82 @@ +import { expect, test } from '@playwright/test'; +import type { Locator } from '@playwright/test'; + +const collectTileData = async (locator: Locator) => { + return locator.evaluateAll((elements) => { + return elements.map((element) => { + const link = element as HTMLAnchorElement; + const heading = element.querySelector('h3'); + const title = heading?.textContent?.trim() ?? ''; + const href = link.getAttribute('href') ?? link.getAttribute('to') ?? ''; + return { title, href }; + }); + }); +}; + +test.describe('Cookbook home', () => { + test('renders cookbook tiles with expected metadata and layout on desktop', async ({ page }) => { + await page.goto('/cookbook'); + + await expect(page.getByTestId('cookbook-hero')).toBeVisible(); + + const tiles = page.locator('.tile'); + await expect(tiles).not.toHaveCount(0); + + const tileData = await collectTileData(tiles); + expect(tileData.length).toBeGreaterThan(0); + + for (const { title, href } of tileData) { + expect(title).not.toEqual(''); + expect(href).toMatch(/\/cookbook\//); + } + + const gridMetrics = await tiles.evaluateAll((elements) => { + if (elements.length === 0) return null; + + const sample = elements[0]; + const grid = sample.parentElement?.parentElement; + if (!grid) return null; + + const gridRect = grid.getBoundingClientRect(); + const gridTemplate = getComputedStyle(grid).gridTemplateColumns.trim(); + const columnCount = gridTemplate ? gridTemplate.split(/\s+/).filter(Boolean).length : 0; + const fractions = elements.map((el) => { + const rect = el.getBoundingClientRect(); + return gridRect.width > 0 ? rect.width / gridRect.width : 0; + }); + + return { gridTemplate, columnCount, fractions }; + }); + + expect(gridMetrics).not.toBeNull(); + expect(gridMetrics?.columnCount).toBe(3); + + for (const fraction of gridMetrics?.fractions ?? []) { + expect(fraction).toBeGreaterThan(0.2); + expect(fraction).toBeLessThan(0.38); + } + }); + + test('stacks tiles into a single column on mobile viewports', async ({ page }) => { + await page.setViewportSize({ width: 600, height: 900 }); + await page.goto('/cookbook'); + + const tiles = page.locator('.tile'); + await expect(tiles).not.toHaveCount(0); + + const widthFractions = await tiles.evaluateAll((elements) => + elements.map((element) => { + const tileRect = element.getBoundingClientRect(); + const grid = element.parentElement?.parentElement; + const gridRect = grid?.getBoundingClientRect(); + const fraction = gridRect && gridRect.width > 0 ? tileRect.width / gridRect.width : 0; + return Number(fraction.toFixed(2)); + }) + ); + + for (const fraction of widthFractions) { + expect(fraction).toBeGreaterThan(0.9); + expect(fraction).toBeLessThan(1.05); + } + }); +}); diff --git a/tsconfig.json b/tsconfig.json index b7555ee35d..84153ab6bf 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,16 +1,28 @@ { - "compilerOptions": { - "moduleResolution": "node", - "noUnusedLocals": false, - "noUnusedParameters": false, - "noResolve": true, - "allowUnusedLabels": true, - "skipLibCheck": true, - "noEmitOnError": true, - "strict": false, - "removeComments": false, - "module": "ESNext", - "target": "ESNext", - "alwaysStrict": false - } - } \ No newline at end of file + "compilerOptions": { + "module": "ESNext", + "target": "ESNext", + "moduleResolution": "node", // keep in sync with webpack + "jsx": "react-jsx", // fine + "skipLibCheck": true, + "strict": false, + "noUnusedLocals": false, + "noUnusedParameters": false, + "allowSyntheticDefaultImports": true, + "esModuleInterop": true, + + // Avoid redefining @site/@generated here unless you also wire webpack. + // If you must, ensure webpack aliases match exactly. + // "baseUrl": ".", + // "paths": { "@site/*": ["./*"], "@generated/*": [".docusaurus/*"] }, + + "types": ["@docusaurus/module-type-aliases", "react", "react-dom"] + }, + "include": [ + "src", + "docusaurus.config.*", + "sidebars.*", + "types" + ], + "exclude": ["node_modules", "build", ".docusaurus"] +} diff --git a/types/docusaurus.d.ts b/types/docusaurus.d.ts new file mode 100644 index 0000000000..45d99d9cea --- /dev/null +++ b/types/docusaurus.d.ts @@ -0,0 +1,13 @@ +// Minimal shims for Docusaurus virtual modules. +// Purely for the type checker; does not affect build output. +declare module '@theme/*'; +declare module '@generated/*'; +declare module '@docusaurus/*'; +declare module '@site/*'; + +// (optional, nice to have) +declare module '*.module.css' { + const classes: { readonly [key: string]: string }; + export default classes; +} +declare module '*.css'; diff --git a/yarn.lock b/yarn.lock index c377ea69bb..b78e4f19ac 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1058,6 +1058,11 @@ dependencies: regenerator-runtime "^0.14.0" +"@babel/runtime@^7.21.0": + version "7.28.4" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.28.4.tgz#a70226016fabe25c5783b2f22d3e1c9bc5ca3326" + integrity sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ== + "@babel/template@^7.27.1", "@babel/template@^7.27.2": version "7.27.2" resolved "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz" @@ -2342,6 +2347,13 @@ dependencies: "@octokit/openapi-types" "^12.11.0" +"@playwright/test@^1.47.0": + version "1.55.1" + resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.55.1.tgz#80f775d5f948cd3ef550fcc45ef99986d3ffb36c" + integrity sha512-IVAh/nOJaw6W9g+RJVlIQJ6gSiER+ae6mKQ5CX1bERzQgbC1VSeBlwdvczT7pxb0GWiyrxH4TGKbMfDb4Sq/ig== + dependencies: + playwright "1.55.1" + "@pnpm/config.env-replace@^1.1.0": version "1.1.0" resolved "https://registry.npmjs.org/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz" @@ -3293,12 +3305,12 @@ acorn@^7.1.1, acorn@^7.4.0: resolved "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz" integrity sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A== -acorn@^8.0.0, acorn@^8.0.4, acorn@^8.2.4, acorn@^8.5.0, acorn@^8.8.2: +acorn@^8.0.0, acorn@^8.0.4, acorn@^8.11.0, acorn@^8.2.4, acorn@^8.5.0, acorn@^8.8.2: version "8.14.1" resolved "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz" integrity sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg== -acorn@^8.11.0, acorn@^8.15.0: +acorn@^8.15.0: version "8.15.0" resolved "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz" integrity sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg== @@ -3680,6 +3692,15 @@ aws4@^1.8.0: resolved "https://registry.npmjs.org/aws4/-/aws4-1.13.2.tgz" integrity sha512-lHe62zvbTB5eEABUVi/AwVh0ZKY9rMMDhmm+eeyuuUQbQ3+J+fONVQOZyj+DdrvD4BY33uYniyRJ4UJIaSKAfw== +axios@^1.6.1: + version "1.12.2" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.12.2.tgz#6c307390136cf7a2278d09cec63b136dfc6e6da7" + integrity sha512-vMJzPewAlRyOgxV2dU0Cuz2O8zzzx9VYtbJOaBgXFeLc4IV/Eg50n4LowmehOOR61S8ZMpc2K5Sa7g6A4jfkUw== + dependencies: + follow-redirects "^1.15.6" + form-data "^4.0.4" + proxy-from-env "^1.1.0" + babel-extract-comments@^1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/babel-extract-comments/-/babel-extract-comments-1.0.0.tgz" @@ -4195,6 +4216,15 @@ cliui@^7.0.2: strip-ansi "^6.0.0" wrap-ansi "^7.0.0" +cliui@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-8.0.1.tgz#0c04b075db02cbfe60dc8e6cf2f5486b1a3608aa" + integrity sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ== + dependencies: + string-width "^4.2.0" + strip-ansi "^6.0.1" + wrap-ansi "^7.0.0" + clone-deep@^4.0.1: version "4.0.1" resolved "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz" @@ -4340,6 +4370,21 @@ concat-map@0.0.1: resolved "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz" integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== +concurrently@^8.2.2: + version "8.2.2" + resolved "https://registry.yarnpkg.com/concurrently/-/concurrently-8.2.2.tgz#353141985c198cfa5e4a3ef90082c336b5851784" + integrity sha512-1dP4gpXFhei8IOtlXRE/T/4H88ElHgTiUzh71YUmtjTEHMSRS2Z/fgOxHSxxusGHogsRfxNq1vyAwxSC+EVyDg== + dependencies: + chalk "^4.1.2" + date-fns "^2.30.0" + lodash "^4.17.21" + rxjs "^7.8.1" + shell-quote "^1.8.1" + spawn-command "0.0.2" + supports-color "^8.1.1" + tree-kill "^1.2.2" + yargs "^17.7.2" + config-chain@^1.1.11: version "1.1.13" resolved "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz" @@ -4817,6 +4862,13 @@ data-view-byte-offset@^1.0.1: es-errors "^1.3.0" is-data-view "^1.0.1" +date-fns@^2.30.0: + version "2.30.0" + resolved "https://registry.yarnpkg.com/date-fns/-/date-fns-2.30.0.tgz#f367e644839ff57894ec6ac480de40cae4b0f4d0" + integrity sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw== + dependencies: + "@babel/runtime" "^7.21.0" + debounce@^1.2.1: version "1.2.1" resolved "https://registry.npmjs.org/debounce/-/debounce-1.2.1.tgz" @@ -5209,10 +5261,10 @@ end-of-stream@^1.1.0, end-of-stream@^1.4.1: dependencies: once "^1.4.0" -enhanced-resolve@^5.17.2: - version "5.18.2" - resolved "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.2.tgz" - integrity sha512-6Jw4sE1maoRJo3q8MsSIn2onJFbLTOjY9hlx4DZXmOKvLRd1Ok2kXmAGXaafL2+ijsJZ1ClYbl/pmqr9+k4iUQ== +enhanced-resolve@^5.17.3: + version "5.18.3" + resolved "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.3.tgz" + integrity sha512-d4lC8xfavMeBjzGr2vECC3fsGXziXZQyJxD868h2M/mBI3PwAuODxAkLkq5HYuvrPYcUtiLzsTo8U3PgX3Ocww== dependencies: graceful-fs "^4.2.4" tapable "^2.2.0" @@ -5912,7 +5964,7 @@ flatted@^3.2.9: resolved "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz" integrity sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg== -follow-redirects@^1.0.0: +follow-redirects@^1.0.0, follow-redirects@^1.15.6: version "1.15.11" resolved "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz" integrity sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ== @@ -5963,6 +6015,17 @@ form-data@^4.0.0: es-set-tostringtag "^2.1.0" mime-types "^2.1.12" +form-data@^4.0.4: + version "4.0.4" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.4.tgz#784cdcce0669a9d68e94d11ac4eea98088edd2c4" + integrity sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.8" + es-set-tostringtag "^2.1.0" + hasown "^2.0.2" + mime-types "^2.1.12" + form-data@~2.3.2: version "2.3.3" resolved "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz" @@ -6034,6 +6097,11 @@ fs.realpath@^1.0.0: resolved "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz" integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw== +fsevents@2.3.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" + integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== + fsevents@~2.3.2: version "2.3.3" resolved "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz" @@ -7315,7 +7383,7 @@ jiti@^1.20.0: resolved "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz" integrity sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A== -joi@^17.9.2: +joi@^17.11.0, joi@^17.9.2: version "17.13.3" resolved "https://registry.npmjs.org/joi/-/joi-17.13.3.tgz" integrity sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA== @@ -8625,7 +8693,7 @@ minimatch@^9.0.3: dependencies: brace-expansion "^2.0.1" -minimist@^1.2.0, minimist@^1.2.6: +minimist@^1.2.0, minimist@^1.2.6, minimist@^1.2.8: version "1.2.8" resolved "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== @@ -9226,6 +9294,20 @@ pkg-dir@^7.0.0: dependencies: find-up "^6.3.0" +playwright-core@1.55.1: + version "1.55.1" + resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.55.1.tgz#5d3bb1846bc4289d364ea1a9dcb33f14545802e9" + integrity sha512-Z6Mh9mkwX+zxSlHqdr5AOcJnfp+xUWLCt9uKV18fhzA8eyxUd8NUWzAjxUh55RZKSYwDGX0cfaySdhZJGMoJ+w== + +playwright@1.55.1: + version "1.55.1" + resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.55.1.tgz#8a9954e9e61ed1ab479212af9be336888f8b3f0e" + integrity sha512-cJW4Xd/G3v5ovXtJJ52MAOclqeac9S/aGGgRzLabuF8TnIb6xHvMzKIa6JmrRzUkeXJgfL1MhukP0NK6l39h3A== + dependencies: + playwright-core "1.55.1" + optionalDependencies: + fsevents "2.3.2" + possible-typed-array-names@^1.0.0: version "1.1.0" resolved "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz" @@ -10086,6 +10168,11 @@ proxy-addr@~2.0.7: forwarded "0.2.0" ipaddr.js "1.9.1" +proxy-from-env@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz#e102f16ca355424865755d2c9e8ea4f24d58c3e2" + integrity sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg== + psl@^1.1.28, psl@^1.1.33: version "1.15.0" resolved "https://registry.npmjs.org/psl/-/psl-1.15.0.tgz" @@ -10753,6 +10840,13 @@ run-parallel@^1.1.9: dependencies: queue-microtask "^1.2.2" +rxjs@^7.8.1: + version "7.8.2" + resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-7.8.2.tgz#955bc473ed8af11a002a2be52071bf475638607b" + integrity sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA== + dependencies: + tslib "^2.1.0" + safe-array-concat@^1.1.3: version "1.1.3" resolved "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz" @@ -11014,7 +11108,7 @@ shebang-regex@^3.0.0: resolved "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz" integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== -shell-quote@^1.8.3: +shell-quote@^1.8.1, shell-quote@^1.8.3: version "1.8.3" resolved "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.3.tgz" integrity sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw== @@ -11195,6 +11289,11 @@ space-separated-tokens@^2.0.0: resolved "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz" integrity sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q== +spawn-command@0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/spawn-command/-/spawn-command-0.0.2.tgz#9544e1a43ca045f8531aac1a48cb29bdae62338e" + integrity sha512-zC8zGoGkmc8J9ndvml8Xksr1Amk9qBujgbF0JAIWO7kXr43w0h/0GJNM/Vustixu+YE8N/MTrQ7N31FvHUACxQ== + spdx-correct@^3.0.0: version "3.2.0" resolved "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.2.0.tgz" @@ -11488,7 +11587,7 @@ supports-color@^7.1.0: dependencies: has-flag "^4.0.0" -supports-color@^8.0.0: +supports-color@^8.0.0, supports-color@^8.1.1: version "8.1.1" resolved "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz" integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== @@ -11711,6 +11810,11 @@ tr46@~0.0.3: resolved "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz" integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw== +tree-kill@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/tree-kill/-/tree-kill-1.2.2.tgz#4ca09a9092c88b73a7cdc5e8a01b507b0790a0cc" + integrity sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A== + trim-lines@^3.0.0: version "3.0.1" resolved "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz" @@ -11721,7 +11825,7 @@ trough@^2.0.0: resolved "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz" integrity sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw== -tslib@^2.0.3, tslib@^2.4.0, tslib@^2.6.0: +tslib@^2.0.3, tslib@^2.1.0, tslib@^2.4.0, tslib@^2.6.0: version "2.8.1" resolved "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz" integrity sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w== @@ -12191,6 +12295,17 @@ w3c-xmlserializer@^3.0.0: dependencies: xml-name-validator "^4.0.0" +wait-on@^7.2.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/wait-on/-/wait-on-7.2.0.tgz#d76b20ed3fc1e2bebc051fae5c1ff93be7892928" + integrity sha512-wCQcHkRazgjG5XoAq9jbTMLpNIjoSlZslrJ2+N9MxDsGEv1HnFoVjOCexL0ESva7Y9cu350j+DWADdk54s4AFQ== + dependencies: + axios "^1.6.1" + joi "^17.11.0" + lodash "^4.17.21" + minimist "^1.2.8" + rxjs "^7.8.1" + watchpack@^2.4.1: version "2.4.2" resolved "https://registry.npmjs.org/watchpack/-/watchpack-2.4.2.tgz" @@ -12333,9 +12448,9 @@ webpack-sources@^3.2.2, webpack-sources@^3.3.3: integrity sha512-yd1RBzSGanHkitROoPFd6qsrxt+oFhg/129YzheDGqeustzX0vTZJZsSsQjVQC4yzBQ56K55XU8gaNCtIzOnTg== webpack@^5.88.1, webpack@^5.95.0: - version "5.100.1" - resolved "https://registry.npmjs.org/webpack/-/webpack-5.100.1.tgz" - integrity sha512-YJB/ESPUe2Locd0NKXmw72Dx8fZQk1gTzI6rc9TAT4+Sypbnhl8jd8RywB1bDsDF9Dy1RUR7gn3q/ZJTd0OZZg== + version "5.101.3" + resolved "https://registry.npmjs.org/webpack/-/webpack-5.101.3.tgz" + integrity sha512-7b0dTKR3Ed//AD/6kkx/o7duS8H3f1a4w3BYpIriX4BzIhjkn4teo05cptsxvLesHFKK5KObnadmCHBwGc+51A== dependencies: "@types/eslint-scope" "^3.7.7" "@types/estree" "^1.0.8" @@ -12347,7 +12462,7 @@ webpack@^5.88.1, webpack@^5.95.0: acorn-import-phases "^1.0.3" browserslist "^4.24.0" chrome-trace-event "^1.0.2" - enhanced-resolve "^5.17.2" + enhanced-resolve "^5.17.3" es-module-lexer "^1.2.1" eslint-scope "5.1.1" events "^3.2.0" @@ -12760,6 +12875,11 @@ yargs-parser@^20.2.2: resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz" integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w== +yargs-parser@^21.1.1: + version "21.1.1" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" + integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== + yargs@^16.0.3: version "16.2.0" resolved "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz" @@ -12773,6 +12893,19 @@ yargs@^16.0.3: y18n "^5.0.5" yargs-parser "^20.2.2" +yargs@^17.7.2: + version "17.7.2" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269" + integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w== + dependencies: + cliui "^8.0.1" + escalade "^3.1.1" + get-caller-file "^2.0.5" + require-directory "^2.1.1" + string-width "^4.2.3" + y18n "^5.0.5" + yargs-parser "^21.1.1" + yauzl@^2.10.0: version "2.10.0" resolved "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz"