Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
b8728cd
fix: 修复5081号PR在子代理执行后台任务时,未正确使用系统配置的流式/非流请求的问题(#5081)
a61995987 Feb 22, 2026
746ffd3
feat:为子代理增加远程图片URL参数支持
a61995987 Feb 22, 2026
140c014
fix: update description for image_urls parameter in HandoffTool to cl…
Soulter Feb 23, 2026
3d13673
ruff format
Soulter Feb 23, 2026
fda9313
Merge branch 'AstrBotDevs:master' into master
a61995987 Feb 24, 2026
7a2eefa
Merge branch 'AstrBotDevs:master' into master
a61995987 Feb 28, 2026
8a44647
fix:修正子agent无法正确接收本地图片(参考图)路径的问题
a61995987 Feb 28, 2026
cb22ac4
fix:增强image_urls接收的鲁棒性
a61995987 Feb 28, 2026
d6b5fd1
fix:ruff检查
a61995987 Feb 28, 2026
123efc2
fix: harden handoff image_urls preprocessing
zouyonghe Mar 1, 2026
27706d8
fix: refactor handoff image_urls preprocessing flow
zouyonghe Mar 1, 2026
8c02f85
refactor: simplify handoff image_urls data flow
zouyonghe Mar 1, 2026
3023742
fix: filter non-string handoff image_urls entries
zouyonghe Mar 1, 2026
c89ce3f
refactor: streamline handoff image url collection
zouyonghe Mar 1, 2026
166fb19
refactor: share handoff image ref validation utilities
zouyonghe Mar 1, 2026
4319c51
refactor: simplify handoff image url processing
zouyonghe Mar 1, 2026
ed178e5
refactor: honor prepared handoff image urls contract
zouyonghe Mar 1, 2026
3c58013
#fix:兼容openai适配器
a61995987 Mar 1, 2026
9c02e29
Merge branch 'master' of https://github.com/a61995987/AstrBot
a61995987 Mar 1, 2026
4f84a00
Merge branch 'AstrBotDevs:master' into master
a61995987 Mar 1, 2026
1efd07c
fix:调整空回复异常时的日志级别至debug 修正openai响应兼容性判断曾经过多的问题
a61995987 Mar 1, 2026
106aab1
fix:调整空回复判断条件 增加更多条件
a61995987 Mar 1, 2026
eae8995
fix:移除openai非标兼容中的消息链判断 因为空回复可能出现在任何阶段
a61995987 Mar 1, 2026
d263f0c
fix:暂时移除空回复判断 因为该问题难以复现 似乎需要更多测试
a61995987 Mar 1, 2026
040915c
fix:回退上一个版本
a61995987 Mar 1, 2026
4c3d32e
fix:补全注释及ruff
a61995987 Mar 1, 2026
88dcd58
fix:删除多余调试输出
a61995987 Mar 1, 2026
2566fb8
fix:调整日志记录方式
a61995987 Mar 1, 2026
20ca527
fix:修正chunk.choices 空值/空列表检查方式
a61995987 Mar 1, 2026
6cd2173
fix:重新加入空回复处理 已验证并测试
a61995987 Mar 1, 2026
b50d5bb
fix:删除错误的注释
a61995987 Mar 1, 2026
b0d07a9
fix:兼容处理中增加warning日志
a61995987 Mar 1, 2026
e8a8d09
fix:增加空回复判断条件
a61995987 Mar 1, 2026
5ec49e9
fix:贵伐话检查回复合法性(去除空字符 检查组件引用等)
a61995987 Mar 1, 2026
67950e7
fix:将空回复判断提取为辅助函数并在流程中引用
a61995987 Mar 1, 2026
6ab529c
fix:处理非标聚合平台可能缺失工具调用中type字段的问题
a61995987 Mar 1, 2026
7157720
ruff
a61995987 Mar 1, 2026
833a37f
ruff
a61995987 Mar 1, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 69 additions & 0 deletions astrbot/core/agent/runners/tool_loop_agent_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,12 @@
TextResourceContents,
)

import astrbot.core.message.components as Comp
from astrbot import logger
from astrbot.core.agent.message import ImageURLPart, TextPart, ThinkPart
from astrbot.core.agent.tool import ToolSet
from astrbot.core.agent.tool_image_cache import tool_image_cache
from astrbot.core.exceptions import LLMEmptyResponseError
from astrbot.core.message.components import Json
from astrbot.core.message.message_event_result import (
MessageChain,
Expand Down Expand Up @@ -219,6 +221,42 @@ async def _iter_llm_responses(
else:
yield await self.provider.text_chat(**payload)

def _is_empty_llm_response(self, resp: LLMResponse) -> bool:
"""Check if an LLM response is effectively empty.

This heuristic checks:
- completion_text is empty or whitespace only
- reasoning_content is empty or whitespace only
- tools_call_args is empty (no tool calls)
- result_chain has no meaningful content (Plain components with non-empty text,
or any non-Plain components like images, voice, etc.)

Returns True if the response contains no meaningful content.
"""
completion_text_stripped = (resp.completion_text or "").strip()
reasoning_content_stripped = (resp.reasoning_content or "").strip()

# Check result_chain for meaningful non-empty content (e.g., images, non-empty text)
has_result_chain_content = False
if resp.result_chain and resp.result_chain.chain:
for comp in resp.result_chain.chain:
# Skip empty Plain components
if isinstance(comp, Comp.Plain):
if comp.text and comp.text.strip():
has_result_chain_content = True
break
else:
# Non-Plain components (e.g., images, voice) are considered valid content
has_result_chain_content = True
break

return (
not completion_text_stripped
and not reasoning_content_stripped
and not resp.tools_call_args
and not has_result_chain_content
)

async def _iter_llm_responses_with_fallback(
self,
) -> T.AsyncGenerator[LLMResponse, None]:
Expand All @@ -241,11 +279,23 @@ async def _iter_llm_responses_with_fallback(
has_stream_output = False
try:
async for resp in self._iter_llm_responses(include_model=idx == 0):
# 对于流式 chunk,不立即检查是否为空,因为单个 chunk 可能只是元数据/心跳
# 流式响应的最终结果会在 resp.is_chunk=False 时返回
if resp.is_chunk:
has_stream_output = True
yield resp
continue

# 如果回复为空且无工具调用 且不是最后一个回退渠道 则引发fallback
# 此处不应判断整个消息链是否为空 因为消息链包含整个对话流 而空回复可能发生在任何阶段
# 使用辅助函数检查是否为空回复
if self._is_empty_llm_response(resp) and not is_last_candidate:
logger.warning(
"Chat Model %s returns empty response, trying fallback to next provider.",
candidate_id,
)
break

if (
resp.role == "err"
and not has_stream_output
Expand Down Expand Up @@ -504,6 +554,25 @@ async def step(self):
logger.warning(
"LLM returned empty assistant message with no tool calls."
)
# 若所有fallback使用完毕后依然为空回复 则显示执行报错 避免静默
base_msg = "LLM returned empty assistant message with no tool calls."
model_id = getattr(self.run_context, "model_id", None)
provider_id = getattr(self.run_context, "provider_id", None)
run_id = getattr(self.run_context, "run_id", None)

ctx_parts = []
if model_id is not None:
ctx_parts.append(f"model_id={model_id}")
if provider_id is not None:
ctx_parts.append(f"provider_id={provider_id}")
if run_id is not None:
ctx_parts.append(f"run_id={run_id}")

if ctx_parts:
base_msg = f"{base_msg} Context: " + ", ".join(ctx_parts) + "."

raise LLMEmptyResponseError(base_msg)

self.run_context.messages.append(Message(role="assistant", content=parts))

# call the on_agent_done hook
Expand Down
4 changes: 4 additions & 0 deletions astrbot/core/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,7 @@ class AstrBotError(Exception):

class ProviderNotFoundError(AstrBotError):
"""Raised when a specified provider is not found."""


class LLMEmptyResponseError(AstrBotError):
"""Raised when LLM returns an empty assistant message with no tool calls."""
28 changes: 26 additions & 2 deletions astrbot/core/provider/sources/openai_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,12 +306,36 @@ async def _query_stream(

state = ChatCompletionStreamState()

chunk_index = 0
async for chunk in stream:
# 兼容处理:部分非标准聚合平台(如通过newapi适配层转接的 Gemini)在流式返回 tool_calls 时,
# 可能会缺失 type 字段。由于 openai SDK 的 ChatCompletionStreamState.handle_chunk
# 内部有 assert tool.type == "function" 的断言,缺少该字段会导致 AssertionError。
# 因此,若检测到 tool_call 且 type 为空,在此处手动补全为 "function"。
for choice in chunk.choices or []:
if not choice.delta or not choice.delta.tool_calls:
continue
for tool_call in choice.delta.tool_calls:
# 使用 getattr 处理 type 字段可能完全缺失的情况
tool_type = getattr(tool_call, "type", None)
if tool_type is None or tool_type == "":
logger.debug(
f"[{self.get_model()}] tool_call.type is missing or empty in chunk {chunk_index} "
f"(provider: {self.provider_config.get('id', 'unknown')}), "
f"manually set to 'function'"
)
tool_call.type = "function"
chunk_index += 1

try:
state.handle_chunk(chunk)
except Exception as e:
logger.warning("Saving chunk state error: " + str(e))
if len(chunk.choices) == 0:
logger.warning(
f"[{self.get_model()}] Saving chunk state error: {e} "
f"(provider: {self.provider_config.get('id', 'unknown')})"
)

if not chunk.choices:
continue
delta = chunk.choices[0].delta
# logger.debug(f"chunk delta: {delta}")
Expand Down