feta:优化AI处理流程

This commit is contained in:
2025-12-29 18:40:24 +08:00
parent 2c564d2870
commit 9b6173be76
4 changed files with 473 additions and 129 deletions

View File

@@ -167,6 +167,70 @@ class AIChat(PluginBase):
else:
return sender_wxid or from_wxid # 私聊使用用户ID
def _get_group_history_chat_id(self, from_wxid: str, user_wxid: str = None) -> str:
"""获取群聊 history 的会话ID可配置为全群共享或按用户隔离"""
if not from_wxid:
return ""
history_config = (self.config or {}).get("history", {})
scope = str(history_config.get("scope", "chatroom") or "chatroom").strip().lower()
if scope in ("per_user", "user", "peruser"):
if not user_wxid:
return from_wxid
return self._get_chat_id(from_wxid, user_wxid, is_group=True)
return from_wxid
def _should_capture_group_history(self, *, is_triggered: bool) -> bool:
"""判断群聊消息是否需要写入 history减少无关上下文污染"""
history_config = (self.config or {}).get("history", {})
capture = str(history_config.get("capture", "all") or "all").strip().lower()
if capture in ("none", "off", "disable", "disabled"):
return False
if capture in ("reply", "ai_only", "triggered"):
return bool(is_triggered)
return True
def _parse_history_timestamp(self, ts) -> float | None:
if ts is None:
return None
if isinstance(ts, (int, float)):
return float(ts)
if isinstance(ts, str):
s = ts.strip()
if not s:
return None
try:
return float(s)
except Exception:
pass
try:
return datetime.fromisoformat(s).timestamp()
except Exception:
return None
return None
def _filter_history_by_window(self, history: list) -> list:
history_config = (self.config or {}).get("history", {})
window_seconds = history_config.get("context_window_seconds", None)
if window_seconds is None:
window_seconds = history_config.get("window_seconds", 0)
try:
window_seconds = float(window_seconds or 0)
except Exception:
window_seconds = 0
if window_seconds <= 0:
return history
cutoff = time.time() - window_seconds
filtered = []
for msg in history or []:
ts = self._parse_history_timestamp((msg or {}).get("timestamp"))
if ts is None or ts >= cutoff:
filtered.append(msg)
return filtered
def _sanitize_speaker_name(self, name: str) -> str:
"""清洗昵称,避免破坏历史格式(如 [name] 前缀)。"""
if name is None:
@@ -1173,6 +1237,203 @@ class AIChat(PluginBase):
"content": f"[{msg_nickname}] {msg_content}"
})
def _get_bot_nickname(self) -> str:
try:
with open("main_config.toml", "rb") as f:
main_config = tomllib.load(f)
nickname = main_config.get("Bot", {}).get("nickname", "")
return nickname or "机器人"
except Exception:
return "机器人"
def _tool_call_to_action_text(self, function_name: str, arguments: dict) -> str:
args = arguments if isinstance(arguments, dict) else {}
if function_name == "query_weather":
city = str(args.get("city") or "").strip()
return f"查询{city}天气" if city else "查询天气"
if function_name == "register_city":
city = str(args.get("city") or "").strip()
return f"注册城市{city}" if city else "注册城市"
if function_name == "user_signin":
return "签到"
if function_name == "check_profile":
return "查询个人信息"
return f"执行{function_name}"
def _build_tool_calls_context_note(self, tool_calls_data: list) -> str:
actions: list[str] = []
for tool_call in tool_calls_data or []:
function_name = tool_call.get("function", {}).get("name", "")
if not function_name:
continue
arguments_str = tool_call.get("function", {}).get("arguments", "{}")
try:
arguments = json.loads(arguments_str) if arguments_str else {}
except Exception:
arguments = {}
actions.append(self._tool_call_to_action_text(function_name, arguments))
if not actions:
return "(已触发工具处理:上一条请求。结果将发送到聊天中。)"
return f"(已触发工具处理:{''.join(actions)}。结果将发送到聊天中。)"
async def _record_tool_calls_to_context(
self,
tool_calls_data: list,
*,
from_wxid: str,
chat_id: str,
is_group: bool,
user_wxid: str | None = None,
):
note = self._build_tool_calls_context_note(tool_calls_data)
if chat_id:
self._add_to_memory(chat_id, "assistant", note)
if is_group and from_wxid:
history_chat_id = self._get_group_history_chat_id(from_wxid, user_wxid or "")
await self._add_to_history(history_chat_id, self._get_bot_nickname(), note, role="assistant", sender_wxid=user_wxid or None)
def _extract_tool_intent_text(self, user_message: str, tool_query: str | None = None) -> str:
text = tool_query if tool_query is not None else user_message
text = str(text or "").strip()
if not text:
return ""
# 对“聊天记录/视频”等组合消息,尽量只取用户真实提问部分,避免历史文本触发工具误判
markers = (
"[用户的问题]:",
"[用户的问题]",
"[用户的问题]\n",
"[用户的问题]",
)
for marker in markers:
if marker in text:
text = text.rsplit(marker, 1)[-1].strip()
return text
def _select_tools_for_message(self, tools: list, *, user_message: str, tool_query: str | None = None) -> list:
tools_config = (self.config or {}).get("tools", {})
if not tools_config.get("smart_select", False):
return tools
intent_text = self._extract_tool_intent_text(user_message, tool_query=tool_query)
if not intent_text:
return tools
t = intent_text.lower()
allow: set[str] = set()
# 天气
if re.search(r"(天气|气温|温度|下雨|下雪|风力|空气质量|pm2\\.?5|湿度|预报)", t):
allow.add("query_weather")
# 注册/设置城市(避免仅凭城市名触发)
if re.search(r"(注册|设置|更新|更换|修改|绑定|默认).{0,6}城市|城市.{0,6}(注册|设置|更新|更换|修改|绑定|默认)", t):
allow.add("register_city")
# 签到/个人信息
if re.search(r"(用户签到|签到|签个到)", t):
allow.add("user_signin")
if re.search(r"(个人信息|我的信息|我的积分|查积分|积分多少|连续签到|连签|我的资料)", t):
allow.add("check_profile")
# 鹿打卡
if re.search(r"(鹿打卡|鹿签到)", t):
allow.add("deer_checkin")
if re.search(r"(补签|补打卡)", t):
allow.add("makeup_checkin")
if re.search(r"(鹿.*(日历|月历|打卡日历))|((日历|月历|打卡日历).*鹿)", t):
allow.add("view_calendar")
# 搜索/资讯
if re.search(r"(联网|搜索|搜一下|搜一搜|搜搜|帮我搜|搜新闻|搜资料|查资料|查新闻|查价格)", t):
# 兼容旧工具名与当前插件实现
allow.add("tavily_web_search")
allow.add("web_search")
# 隐式信息检索:用户询问具体实体/口碑/评价但未明确说“搜索/联网”
if re.search(r"(怎么样|如何|评价|口碑|靠谱吗|值不值得|值得吗|好不好|推荐|牛不牛|强不强|厉不厉害|有名吗|什么来头|背景|近况|最新|最近)", t) and re.search(
r"(公会|战队|服务器|区服|游戏|公司|品牌|店|商家|产品|软件|插件|项目|平台|up主|主播|作者|电影|电视剧|小说|手游|网游)",
t,
):
allow.add("tavily_web_search")
allow.add("web_search")
if re.search(r"(60秒|每日新闻|早报|新闻图片|读懂世界)", t):
allow.add("get_daily_news")
if re.search(r"(epic|喜加一|免费游戏)", t):
allow.add("get_epic_free_games")
# 音乐/短剧
if re.search(r"(搜歌|找歌|点歌|来一首|歌名|歌曲|音乐|听.*歌|播放.*歌)", t) or ("" in t and re.search(r"(搜|找|点|来一首|播放|听)", t)):
allow.add("search_music")
if re.search(r"(短剧|搜短剧|找短剧)", t):
allow.add("search_playlet")
# 群聊总结
if re.search(r"(群聊总结|生成总结|总结一下|今日总结|昨天总结|群总结)", t):
allow.add("generate_summary")
# 娱乐
if re.search(r"(疯狂星期四|v我50|kfc)", t):
allow.add("get_kfc")
# 发病文学:必须是明确请求(避免用户口头禅/情绪表达误触工具)
if re.search(r"(发病文学|犯病文学|发病文|犯病文|发病语录|犯病语录)", t):
allow.add("get_fabing")
elif re.search(r"(来|整|给|写|讲|说|发|搞|整点).{0,4}(发病|犯病)", t):
allow.add("get_fabing")
elif re.search(r"(发病|犯病).{0,6}(一下|一段|一条|几句|文学|文|语录|段子)", t):
allow.add("get_fabing")
if re.search(r"(随机图片|来张图|来个图|随机图)", t):
allow.add("get_random_image")
if re.search(r"(随机视频|来个视频|随机短视频)", t):
allow.add("get_random_video")
# 绘图/视频生成(只在用户明确要求时开放)
if (
# 明确绘图动词/模式
re.search(r"(画一张|画一个|画个|画一下|画图|绘图|绘制|作画|出图|生成图片|文生图|图生图|以图生图)", t)
# “生成/做/给我”+“一张/一个/张/个”+“图/图片”类表达(例如:生成一张瑞依/做一张图)
or re.search(r"(生成|做|给我|帮我).{0,4}(一张|一幅|一个|张|个).{0,8}(图|图片|照片)", t)
# “来/发”+“一张/张”+“图/图片”(例如:来张瑞依的图)
or re.search(r"(来|发).{0,2}(一张|一幅|一个|张|个).{0,10}(图|图片|照片)", t)
# 视觉诉求但没说“画”(例如:看看腿/白丝)
or re.search(r"(看看|看下|给我看|让我看看).{0,8}(腿|白丝|黑丝|丝袜|玉足|脚|足|写真|涩图|色图|福利图)", t)
):
allow.update({
"nano_ai_image_generation",
"flow2_ai_image_generation",
"jimeng_ai_image_generation",
"kiira2_ai_image_generation",
"generate_image",
})
if re.search(r"(生成视频|做个视频|视频生成|sora)", t):
allow.add("sora_video_generation")
# 如果已经命中特定领域工具(天气/音乐/短剧等),且用户未明确表示“联网/网页/链接/来源”等需求,避免把联网搜索也暴露出去造成误触
explicit_web = bool(re.search(r"(联网|网页|网站|网址|链接|来源)", t))
if not explicit_web and {"query_weather", "search_music", "search_playlet"} & allow:
allow.discard("tavily_web_search")
allow.discard("web_search")
# 严格模式:没有明显工具意图时,不向模型暴露任何 tools避免误触
if not allow:
return []
selected = []
for tool in tools or []:
name = tool.get("function", {}).get("name", "")
if name and name in allow:
selected.append(tool)
return selected
async def _handle_context_stats(self, bot, from_wxid: str, user_wxid: str, is_group: bool):
"""处理上下文统计指令"""
try:
@@ -1190,7 +1451,9 @@ class AIChat(PluginBase):
if is_group:
# 群聊:使用 history 机制
history = await self._load_history(from_wxid)
history_chat_id = self._get_group_history_chat_id(from_wxid, user_wxid)
history = await self._load_history(history_chat_id)
history = self._filter_history_by_window(history)
max_context = self.config.get("history", {}).get("max_context", 50)
# 实际会发送给 AI 的上下文
@@ -1393,7 +1656,9 @@ class AIChat(PluginBase):
if content == "/记忆状态":
if user_wxid in admins:
if is_group:
history = await self._load_history(from_wxid)
history_chat_id = self._get_group_history_chat_id(from_wxid, user_wxid)
history = await self._load_history(history_chat_id)
history = self._filter_history_by_window(history)
max_context = self.config.get("history", {}).get("max_context", 50)
context_count = min(len(history), max_context)
msg = f"📊 群聊记忆: {len(history)}\n"
@@ -1479,15 +1744,18 @@ class AIChat(PluginBase):
if should_reply:
actual_content = self._extract_content(message, content)
# 保存到群组历史记录(所有消息都保存,不管是否回复
# 保存到群组历史记录(默认全量保存;可配置为仅保存触发 AI 的消息,减少上下文污染/串线
# 但如果是 AutoReply 触发的,跳过保存(消息已经在正常流程中保存过了)
if is_group and not message.get('_auto_reply_triggered'):
# mention 模式下,群聊里@机器人仅作为触发条件,不进入上下文,避免同一句话在上下文中出现两种形式(含@/不含@
trigger_mode = self.config.get("behavior", {}).get("trigger_mode", "mention")
history_content = content
if trigger_mode == "mention" and should_reply and actual_content:
history_content = actual_content
await self._add_to_history(from_wxid, nickname, history_content, sender_wxid=user_wxid)
if self._should_capture_group_history(is_triggered=bool(should_reply)):
# mention 模式下,群聊里@机器人仅作为触发条件,不进入上下文,避免同一句话在上下文中出现两种形式(含@/不含@
trigger_mode = self.config.get("behavior", {}).get("trigger_mode", "mention")
history_content = content
if trigger_mode == "mention" and should_reply and actual_content:
history_content = actual_content
history_chat_id = self._get_group_history_chat_id(from_wxid, user_wxid)
await self._add_to_history(history_chat_id, nickname, history_content, sender_wxid=user_wxid)
# 如果不需要回复,直接返回
if not should_reply:
@@ -1517,7 +1785,13 @@ class AIChat(PluginBase):
# 群聊:消息已写入 history则不再重复附加到 LLM messages避免“同一句话发给AI两次”
history_enabled = bool(self.store) and self.config.get("history", {}).get("enabled", True)
append_user_message = not (is_group and history_enabled and not message.get('_auto_reply_triggered'))
captured_to_history = bool(
is_group
and history_enabled
and not message.get('_auto_reply_triggered')
and self._should_capture_group_history(is_triggered=True)
)
append_user_message = not captured_to_history
# 调用 AI API带重试机制
max_retries = self.config.get("api", {}).get("max_retries", 2)
@@ -1569,11 +1843,22 @@ class AIChat(PluginBase):
await bot.send_text(from_wxid, cleaned_response)
self._add_to_memory(chat_id, "assistant", cleaned_response)
# 保存机器人回复到历史记录
if is_group:
history_config = self.config.get("history", {})
sync_bot_messages = history_config.get("sync_bot_messages", False)
history_scope = str(history_config.get("scope", "chatroom") or "chatroom").strip().lower()
can_rely_on_hook = bool(sync_bot_messages and history_scope not in ("per_user", "user", "peruser"))
if is_group and not can_rely_on_hook:
with open("main_config.toml", "rb") as f:
main_config = tomllib.load(f)
bot_nickname = main_config.get("Bot", {}).get("nickname", "机器人")
await self._add_to_history(from_wxid, bot_nickname, cleaned_response, role="assistant")
history_chat_id = self._get_group_history_chat_id(from_wxid, user_wxid)
await self._add_to_history(
history_chat_id,
bot_nickname,
cleaned_response,
role="assistant",
sender_wxid=user_wxid,
)
logger.success(f"AI 回复成功: {cleaned_response[:50]}...")
else:
logger.warning("AI 回复清洗后为空(可能只包含思维链/格式标记),已跳过发送")
@@ -1673,16 +1958,18 @@ class AIChat(PluginBase):
is_group: bool = False,
*,
append_user_message: bool = True,
tool_query: str | None = None,
) -> str:
"""调用 AI API"""
api_config = self.config["api"]
# 收集工具
tools = self._collect_tools()
logger.info(f"收集到 {len(tools)} 个工具函数")
all_tools = self._collect_tools()
tools = self._select_tools_for_message(all_tools, user_message=user_message, tool_query=tool_query)
logger.info(f"收集到 {len(all_tools)} 个工具函数,本次启用 {len(tools)}")
if tools:
tool_names = [t["function"]["name"] for t in tools]
logger.info(f"工具列表: {tool_names}")
logger.info(f"本次启用工具: {tool_names}")
# 构建消息列表
system_content = self.system_prompt
@@ -1714,7 +2001,9 @@ class AIChat(PluginBase):
# 从 JSON 历史记录加载上下文(仅群聊)
if is_group and from_wxid:
history = await self._load_history(from_wxid)
history_chat_id = self._get_group_history_chat_id(from_wxid, user_wxid or "")
history = await self._load_history(history_chat_id)
history = self._filter_history_by_window(history)
max_context = self.config.get("history", {}).get("max_context", 50)
# 取最近的 N 条消息作为上下文
@@ -1877,6 +2166,16 @@ class AIChat(PluginBase):
if tool_calls_data:
# 提示已在流式处理中发送,直接启动异步工具执行
logger.info(f"启动异步工具执行,共 {len(tool_calls_data)} 个工具")
try:
await self._record_tool_calls_to_context(
tool_calls_data,
from_wxid=from_wxid,
chat_id=chat_id,
is_group=is_group,
user_wxid=user_wxid,
)
except Exception as e:
logger.debug(f"记录工具调用到上下文失败: {e}")
asyncio.create_task(
self._execute_tools_async(
tool_calls_data, bot, from_wxid, chat_id,
@@ -2624,12 +2923,20 @@ class AIChat(PluginBase):
self._add_to_memory(chat_id, "user", title_text, image_base64=image_base64)
# 保存用户引用图片消息到群组历史记录
if is_group:
await self._add_to_history(from_wxid, nickname, title_text, image_base64=image_base64)
if is_group and self._should_capture_group_history(is_triggered=True):
history_chat_id = self._get_group_history_chat_id(from_wxid, user_wxid)
await self._add_to_history(
history_chat_id,
nickname,
title_text,
image_base64=image_base64,
sender_wxid=user_wxid,
)
# 调用AI API带图片
history_enabled = bool(self.store) and self.config.get("history", {}).get("enabled", True)
append_user_message = not (is_group and history_enabled)
captured_to_history = bool(is_group and history_enabled and self._should_capture_group_history(is_triggered=True))
append_user_message = not captured_to_history
response = await self._call_ai_api_with_image(
title_text,
image_base64,
@@ -2640,6 +2947,7 @@ class AIChat(PluginBase):
user_wxid,
is_group,
append_user_message=append_user_message,
tool_query=title_text,
)
if response:
@@ -2648,12 +2956,23 @@ class AIChat(PluginBase):
await bot.send_text(from_wxid, cleaned_response)
self._add_to_memory(chat_id, "assistant", cleaned_response)
# 保存机器人回复到历史记录
if is_group:
history_config = self.config.get("history", {})
sync_bot_messages = history_config.get("sync_bot_messages", False)
history_scope = str(history_config.get("scope", "chatroom") or "chatroom").strip().lower()
can_rely_on_hook = bool(sync_bot_messages and history_scope not in ("per_user", "user", "peruser"))
if is_group and not can_rely_on_hook:
import tomllib
with open("main_config.toml", "rb") as f:
main_config = tomllib.load(f)
bot_nickname = main_config.get("Bot", {}).get("nickname", "机器人")
await self._add_to_history(from_wxid, bot_nickname, cleaned_response, role="assistant")
history_chat_id = self._get_group_history_chat_id(from_wxid, user_wxid)
await self._add_to_history(
history_chat_id,
bot_nickname,
cleaned_response,
role="assistant",
sender_wxid=user_wxid,
)
logger.success(f"AI回复成功: {cleaned_response[:50]}...")
else:
logger.warning("AI 回复清洗后为空,已跳过发送")
@@ -2758,11 +3077,26 @@ class AIChat(PluginBase):
self._add_to_memory(chat_id, "user", combined_message)
# 如果是群聊,添加到历史记录
if is_group:
await self._add_to_history(from_wxid, nickname, f"[发送了聊天记录] {user_question}")
if is_group and self._should_capture_group_history(is_triggered=True):
history_chat_id = self._get_group_history_chat_id(from_wxid, user_wxid)
await self._add_to_history(
history_chat_id,
nickname,
f"[发送了聊天记录] {user_question}",
sender_wxid=user_wxid,
)
# 调用 AI API
response = await self._call_ai_api(combined_message, bot, from_wxid, chat_id, nickname, user_wxid, is_group)
response = await self._call_ai_api(
combined_message,
bot,
from_wxid,
chat_id,
nickname,
user_wxid,
is_group,
tool_query=user_question,
)
if response:
cleaned_response = self._sanitize_llm_output(response)
@@ -2770,12 +3104,23 @@ class AIChat(PluginBase):
await bot.send_text(from_wxid, cleaned_response)
self._add_to_memory(chat_id, "assistant", cleaned_response)
# 保存机器人回复到历史记录
if is_group:
history_config = self.config.get("history", {})
sync_bot_messages = history_config.get("sync_bot_messages", False)
history_scope = str(history_config.get("scope", "chatroom") or "chatroom").strip().lower()
can_rely_on_hook = bool(sync_bot_messages and history_scope not in ("per_user", "user", "peruser"))
if is_group and not can_rely_on_hook:
import tomllib
with open("main_config.toml", "rb") as f:
main_config = tomllib.load(f)
bot_nickname = main_config.get("Bot", {}).get("nickname", "机器人")
await self._add_to_history(from_wxid, bot_nickname, cleaned_response, role="assistant")
history_chat_id = self._get_group_history_chat_id(from_wxid, user_wxid)
await self._add_to_history(
history_chat_id,
bot_nickname,
cleaned_response,
role="assistant",
sender_wxid=user_wxid,
)
logger.success(f"[聊天记录] AI 回复成功: {cleaned_response[:50]}...")
else:
logger.warning("[聊天记录] AI 回复清洗后为空,已跳过发送")
@@ -2848,11 +3193,26 @@ class AIChat(PluginBase):
self._add_to_memory(chat_id, "user", combined_message)
# 如果是群聊,添加到历史记录
if is_group:
await self._add_to_history(from_wxid, nickname, f"[发送了一个视频] {user_question}")
if is_group and self._should_capture_group_history(is_triggered=True):
history_chat_id = self._get_group_history_chat_id(from_wxid, user_wxid)
await self._add_to_history(
history_chat_id,
nickname,
f"[发送了一个视频] {user_question}",
sender_wxid=user_wxid,
)
# 调用主AI生成回复使用现有的 _call_ai_api 方法,继承完整上下文)
response = await self._call_ai_api(combined_message, bot, from_wxid, chat_id, nickname, user_wxid, is_group)
response = await self._call_ai_api(
combined_message,
bot,
from_wxid,
chat_id,
nickname,
user_wxid,
is_group,
tool_query=user_question,
)
if response:
cleaned_response = self._sanitize_llm_output(response)
@@ -2860,12 +3220,23 @@ class AIChat(PluginBase):
await bot.send_text(from_wxid, cleaned_response)
self._add_to_memory(chat_id, "assistant", cleaned_response)
# 保存机器人回复到历史记录
if is_group:
history_config = self.config.get("history", {})
sync_bot_messages = history_config.get("sync_bot_messages", False)
history_scope = str(history_config.get("scope", "chatroom") or "chatroom").strip().lower()
can_rely_on_hook = bool(sync_bot_messages and history_scope not in ("per_user", "user", "peruser"))
if is_group and not can_rely_on_hook:
import tomllib
with open("main_config.toml", "rb") as f:
main_config = tomllib.load(f)
bot_nickname = main_config.get("Bot", {}).get("nickname", "机器人")
await self._add_to_history(from_wxid, bot_nickname, cleaned_response, role="assistant")
history_chat_id = self._get_group_history_chat_id(from_wxid, user_wxid)
await self._add_to_history(
history_chat_id,
bot_nickname,
cleaned_response,
role="assistant",
sender_wxid=user_wxid,
)
logger.success(f"[视频识别] 主AI回复成功: {cleaned_response[:50]}...")
else:
logger.warning("[视频识别] 主AI回复清洗后为空已跳过发送")
@@ -3132,7 +3503,9 @@ class AIChat(PluginBase):
history_context = ""
if is_group and from_wxid:
# 群聊:从 Redis/文件加载历史
history = await self._load_history(from_wxid)
history_chat_id = self._get_group_history_chat_id(from_wxid, user_wxid or "")
history = await self._load_history(history_chat_id)
history = self._filter_history_by_window(history)
max_context = self.config.get("history", {}).get("max_context", 50)
recent_history = history[-max_context:] if len(history) > max_context else history
@@ -3412,10 +3785,16 @@ class AIChat(PluginBase):
is_group: bool = False,
*,
append_user_message: bool = True,
tool_query: str | None = None,
) -> str:
"""调用AI API带图片"""
api_config = self.config["api"]
tools = self._collect_tools()
all_tools = self._collect_tools()
tools = self._select_tools_for_message(all_tools, user_message=user_message, tool_query=tool_query)
logger.info(f"[图片] 收集到 {len(all_tools)} 个工具函数,本次启用 {len(tools)}")
if tools:
tool_names = [t["function"]["name"] for t in tools]
logger.info(f"[图片] 本次启用工具: {tool_names}")
# 构建消息列表
system_content = self.system_prompt
@@ -3446,7 +3825,9 @@ class AIChat(PluginBase):
# 添加历史上下文
if is_group and from_wxid:
history = await self._load_history(from_wxid)
history_chat_id = self._get_group_history_chat_id(from_wxid, user_wxid or "")
history = await self._load_history(history_chat_id)
history = self._filter_history_by_window(history)
max_context = self.config.get("history", {}).get("max_context", 50)
recent_history = history[-max_context:] if len(history) > max_context else history
self._append_group_history_messages(messages, recent_history)
@@ -3596,13 +3977,23 @@ class AIChat(PluginBase):
if tool_calls_data:
# 提示已在流式处理中发送,直接启动异步工具执行
logger.info(f"[图片] 启动异步工具执行,共 {len(tool_calls_data)} 个工具")
try:
await self._record_tool_calls_to_context(
tool_calls_data,
from_wxid=from_wxid,
chat_id=chat_id,
is_group=is_group,
user_wxid=user_wxid,
)
except Exception as e:
logger.debug(f"[图片] 记录工具调用到上下文失败: {e}")
asyncio.create_task(
self._execute_tools_async_with_image(
tool_calls_data, bot, from_wxid, chat_id,
user_wxid, nickname, is_group, messages, image_base64
)
)
return ""
return None
# 检查是否包含错误的工具调用格式
if "<tool_code>" in full_content or "print(" in full_content and "flow2_ai_image_generation" in full_content: