Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "uipath-runtime"
version = "0.9.3"
version = "0.9.4"
description = "Runtime abstractions and interfaces for building agents and automation scripts in the UiPath ecosystem"
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.11"
Expand Down
46 changes: 45 additions & 1 deletion src/uipath/runtime/chat/runtime.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@
"""Chat runtime implementation."""

import logging
from datetime import datetime, timezone
from typing import Any, AsyncGenerator, cast

from uipath.core.triggers import UiPathResumeTriggerType
from uipath.core.chat import (
UiPathConversationMessageEvent,
UiPathConversationToolCallEvent,
UiPathConversationToolCallStartEvent,
)
from uipath.core.triggers import UiPathResumeTrigger, UiPathResumeTriggerType

from uipath.runtime.base import (
UiPathExecuteOptions,
Expand Down Expand Up @@ -41,6 +47,7 @@ def __init__(
super().__init__()
self.delegate = delegate
self.chat_bridge = chat_bridge
self._current_message_id: str | None = None

async def execute(
self,
Expand Down Expand Up @@ -80,6 +87,7 @@ async def stream(
):
if isinstance(event, UiPathRuntimeMessageEvent):
if event.payload:
self._current_message_id = event.payload.message_id
await self.chat_bridge.emit_message_event(event.payload)

if isinstance(event, UiPathRuntimeResult):
Expand All @@ -103,6 +111,12 @@ async def stream(

resume_data = await self.chat_bridge.wait_for_resume()

await (
self._emit_start_tool_call_on_confirmation_approval(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think this is the right place to do this .. tool call confirmation is just one of multiple interruptable use-cases

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Where do you recommend this go? I have detailed justification on why I couldn't get langchain repo to work in the PR description, but runtime is (from my understanding) only place that can emit start tool call between AImessage.toolCall and toolMessage. We don't want socket logic in tool_node.py either

Copy link
Member

@cristipufu cristipufu Mar 17, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JoshParkSJ
If I'm reading this correctly, the current order of operations is AIMessage => startToolCall => startInterrupt (which is the expected, correct sequence), and what you're trying to achieve is AIMessage => startInterrupt => startToolCall.

If that's right, I'd push back on solving this in the runtime. The confusing UX you're describing, users seeing both tool call UI and interrupt UI simultaneously, is a client-side state machine problem. The client should be able to handle receiving startToolCall before startInterrupt without rendering both at the same time. Reordering backend events to paper over a client rendering issue is the wrong layer to fix this, and it pulls protocol sequencing logic into the runtime in a way that'll be hard to unpick later.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It may be worth exploring how these guys https://github.com/ag-ui-protocol/ag-ui handle the same scenario

Copy link
Contributor Author

@JoshParkSJ JoshParkSJ Mar 17, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's a good point - I'll move this handling to CAS. Appreciate the feedback!

trigger, resume_data
)
)

assert trigger.interrupt_id is not None, (
"Trigger interrupt_id cannot be None"
)
Expand All @@ -122,6 +136,36 @@ async def stream(
else:
yield event

async def _emit_start_tool_call_on_confirmation_approval(
self, trigger: UiPathResumeTrigger, resume_data: dict[str, Any]
) -> None:
"""Emit a startToolCall event when a HITL tool call confirmation is approved."""
if self._current_message_id is None or trigger.api_resume is None:
return

request = trigger.api_resume.request or {}
value = resume_data.get("value") or {}
tool_input = value.get("input")

tool_call_start_event = UiPathConversationMessageEvent(
message_id=self._current_message_id,
tool_call=UiPathConversationToolCallEvent(
tool_call_id=request.get("toolCallId", ""),
start=UiPathConversationToolCallStartEvent(
tool_name=request.get("toolName", ""),
timestamp=datetime.now(timezone.utc)
.isoformat(timespec="milliseconds")
.replace("+00:00", "Z"),
input=tool_input,
),
),
)

try:
await self.chat_bridge.emit_message_event(tool_call_start_event)
except Exception as e:
logger.warning(f"Error emitting startToolCall on approval: {e}")

async def get_schema(self) -> UiPathRuntimeSchema:
"""Get schema from the delegate runtime."""
return await self.delegate.get_schema()
Expand Down
2 changes: 1 addition & 1 deletion uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading