From 1fc1ae03d1429e976f6bf213f44907cd59b3b5cf Mon Sep 17 00:00:00 2001 From: "L. Elaine Dazzio" Date: Wed, 18 Mar 2026 19:37:11 -0400 Subject: [PATCH 1/2] fix: use workflow factory to avoid RuntimeError under parallel requests (#4766) Pass a factory lambda to `from_agent_framework()` instead of a pre-built agent instance so each hosted request gets a fresh workflow. Previously, the single shared workflow would raise `RuntimeError: Workflow is already running. Concurrent executions are not allowed.` when parallel requests arrived. --- .../main.py | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 python/samples/05-end-to-end/hosted_agents/writer_reviewer_agents_in_workflow/main.py diff --git a/python/samples/05-end-to-end/hosted_agents/writer_reviewer_agents_in_workflow/main.py b/python/samples/05-end-to-end/hosted_agents/writer_reviewer_agents_in_workflow/main.py new file mode 100644 index 0000000000..fc0cec4d58 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/writer_reviewer_agents_in_workflow/main.py @@ -0,0 +1,72 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import os +from contextlib import asynccontextmanager + +from agent_framework import WorkflowBuilder +from agent_framework.azure import AzureOpenAIResponsesClient +from azure.ai.agentserver.agentframework import from_agent_framework +from azure.identity.aio import AzureCliCredential, ManagedIdentityCredential +from dotenv import load_dotenv + +load_dotenv(override=True) + +# Configure these for your Foundry project +# Read the explicit variables present in the .env file +PROJECT_ENDPOINT = os.getenv( + "PROJECT_ENDPOINT" +) # e.g., "https://.services.ai.azure.com/api/projects/" +MODEL_DEPLOYMENT_NAME = os.getenv( + "MODEL_DEPLOYMENT_NAME", "gpt-4.1-mini" +) # Your model deployment name e.g., "gpt-4.1-mini" + + +def get_credential(): + """Will use Managed Identity when running in Azure, otherwise falls back to Azure CLI Credential.""" + return ( + ManagedIdentityCredential() + if os.getenv("MSI_ENDPOINT") + else AzureCliCredential() + ) + + +@asynccontextmanager +async def create_agents(): + async with get_credential() as credential: + client = AzureOpenAIResponsesClient( + project_endpoint=PROJECT_ENDPOINT, + deployment_name=MODEL_DEPLOYMENT_NAME, + credential=credential, + ) + writer = client.as_agent( + name="Writer", + instructions="You are an excellent content writer. You create new content and edit contents based on the feedback.", + ) + reviewer = client.as_agent( + name="Reviewer", + instructions="You are an excellent content reviewer. Provide actionable feedback to the writer about the provided content in the most concise manner possible.", + ) + yield writer, reviewer + + +def create_workflow(writer, reviewer): + workflow = WorkflowBuilder(start_executor=writer).add_edge(writer, reviewer).build() + return workflow.as_agent() + + +async def main() -> None: + """ + The writer and reviewer multi-agent workflow. + + Environment variables required: + - PROJECT_ENDPOINT: Your Microsoft Foundry project endpoint + - MODEL_DEPLOYMENT_NAME: Your Microsoft Foundry model deployment name + """ + + async with create_agents() as (writer, reviewer): + await from_agent_framework(lambda: create_workflow(writer, reviewer)).run_async() + + +if __name__ == "__main__": + asyncio.run(main()) From 49153de4975f5b5ffe685d3e80dd5f7756c34dad Mon Sep 17 00:00:00 2001 From: "L. Elaine Dazzio" Date: Wed, 18 Mar 2026 21:28:58 -0400 Subject: [PATCH 2/2] fix: align sample with established hosted-agent patterns - Switch to sync DefaultAzureCredential (matches all other samples) - Use from_agent_framework(agent).run() instead of .run_async() - Remove unnecessary async/asyncio patterns - Change load_dotenv(override=True) to load_dotenv() Addresses review feedback from @moonbox3 and Copilot. --- .../main.py | 66 +++++++------------ 1 file changed, 24 insertions(+), 42 deletions(-) diff --git a/python/samples/05-end-to-end/hosted_agents/writer_reviewer_agents_in_workflow/main.py b/python/samples/05-end-to-end/hosted_agents/writer_reviewer_agents_in_workflow/main.py index fc0cec4d58..b6600fece6 100644 --- a/python/samples/05-end-to-end/hosted_agents/writer_reviewer_agents_in_workflow/main.py +++ b/python/samples/05-end-to-end/hosted_agents/writer_reviewer_agents_in_workflow/main.py @@ -1,19 +1,17 @@ # Copyright (c) Microsoft. All rights reserved. -import asyncio import os -from contextlib import asynccontextmanager from agent_framework import WorkflowBuilder from agent_framework.azure import AzureOpenAIResponsesClient from azure.ai.agentserver.agentframework import from_agent_framework -from azure.identity.aio import AzureCliCredential, ManagedIdentityCredential +from azure.identity import DefaultAzureCredential # pyright: ignore[reportUnknownVariableType] from dotenv import load_dotenv -load_dotenv(override=True) +# Load environment variables from .env file +load_dotenv() # Configure these for your Foundry project -# Read the explicit variables present in the .env file PROJECT_ENDPOINT = os.getenv( "PROJECT_ENDPOINT" ) # e.g., "https://.services.ai.azure.com/api/projects/" @@ -22,40 +20,7 @@ ) # Your model deployment name e.g., "gpt-4.1-mini" -def get_credential(): - """Will use Managed Identity when running in Azure, otherwise falls back to Azure CLI Credential.""" - return ( - ManagedIdentityCredential() - if os.getenv("MSI_ENDPOINT") - else AzureCliCredential() - ) - - -@asynccontextmanager -async def create_agents(): - async with get_credential() as credential: - client = AzureOpenAIResponsesClient( - project_endpoint=PROJECT_ENDPOINT, - deployment_name=MODEL_DEPLOYMENT_NAME, - credential=credential, - ) - writer = client.as_agent( - name="Writer", - instructions="You are an excellent content writer. You create new content and edit contents based on the feedback.", - ) - reviewer = client.as_agent( - name="Reviewer", - instructions="You are an excellent content reviewer. Provide actionable feedback to the writer about the provided content in the most concise manner possible.", - ) - yield writer, reviewer - - -def create_workflow(writer, reviewer): - workflow = WorkflowBuilder(start_executor=writer).add_edge(writer, reviewer).build() - return workflow.as_agent() - - -async def main() -> None: +def main(): """ The writer and reviewer multi-agent workflow. @@ -63,10 +28,27 @@ async def main() -> None: - PROJECT_ENDPOINT: Your Microsoft Foundry project endpoint - MODEL_DEPLOYMENT_NAME: Your Microsoft Foundry model deployment name """ + client = AzureOpenAIResponsesClient( + project_endpoint=PROJECT_ENDPOINT, + deployment_name=MODEL_DEPLOYMENT_NAME, + credential=DefaultAzureCredential(), + ) + writer = client.as_agent( + name="Writer", + instructions="You are an excellent content writer. You create new content and edit contents based on the feedback.", + ) + reviewer = client.as_agent( + name="Reviewer", + instructions="You are an excellent content reviewer. Provide actionable feedback to the writer about the provided content in the most concise manner possible.", + ) + + # Build the workflow and convert to agent + workflow = WorkflowBuilder(start_executor=writer).add_edge(writer, reviewer).build() + workflow_agent = workflow.as_agent() - async with create_agents() as (writer, reviewer): - await from_agent_framework(lambda: create_workflow(writer, reviewer)).run_async() + # Run the agent as a hosted agent + from_agent_framework(workflow_agent).run() if __name__ == "__main__": - asyncio.run(main()) + main()