feat:联网搜索和自动回复

This commit is contained in:
2025-12-10 18:50:51 +08:00
parent e0a38eb6f2
commit debb67d71c
6 changed files with 627 additions and 786 deletions

View File

@@ -1606,6 +1606,7 @@ class AIChat(PluginBase):
异步执行工具调用(不阻塞主流程)
AI 已经先回复用户,这里异步执行工具,完成后发送结果
支持 need_ai_reply 标记:工具结果回传给 AI 继续对话(保留上下文和人设)
"""
import json
@@ -1644,10 +1645,14 @@ class AIChat(PluginBase):
if tasks:
results = await asyncio.gather(*tasks, return_exceptions=True)
# 收集需要 AI 回复的工具结果
need_ai_reply_results = []
# 处理每个工具的结果
for i, result in enumerate(results):
tool_info = tool_info_list[i]
function_name = tool_info["function_name"]
tool_call_id = tool_info["tool_call_id"]
if isinstance(result, Exception):
logger.error(f"[异步] 工具 {function_name} 执行异常: {result}")
@@ -1658,6 +1663,15 @@ class AIChat(PluginBase):
if result and result.get("success"):
logger.success(f"[异步] 工具 {function_name} 执行成功")
# 检查是否需要 AI 基于工具结果继续回复
if result.get("need_ai_reply"):
need_ai_reply_results.append({
"tool_call_id": tool_call_id,
"function_name": function_name,
"result": result.get("message", "")
})
continue # 不直接发送,等待 AI 处理
# 如果工具没有自己发送内容,且有消息需要发送
if not result.get("already_sent") and result.get("message"):
# 某些工具可能需要发送结果消息
@@ -1675,6 +1689,13 @@ class AIChat(PluginBase):
if result and result.get("message"):
await bot.send_text(from_wxid, f"{result.get('message')}")
# 如果有需要 AI 回复的工具结果,调用 AI 继续对话
if need_ai_reply_results:
await self._continue_with_tool_results(
need_ai_reply_results, bot, from_wxid, chat_id,
nickname, is_group, messages, tool_calls_data
)
logger.info(f"[异步] 所有工具执行完成")
except Exception as e:
@@ -1686,6 +1707,131 @@ class AIChat(PluginBase):
except:
pass
async def _continue_with_tool_results(self, tool_results: list, bot, from_wxid: str,
chat_id: str, nickname: str, is_group: bool,
messages: list, tool_calls_data: list):
"""
基于工具结果继续调用 AI 对话(保留上下文和人设)
用于 need_ai_reply=True 的工具,如联网搜索等
"""
import json
try:
logger.info(f"[工具回传] 开始基于 {len(tool_results)} 个工具结果继续对话")
# 构建包含工具调用和结果的消息
# 1. 添加 assistant 的工具调用消息
tool_calls_msg = []
for tool_call in tool_calls_data:
tool_call_id = tool_call.get("id", "")
function_name = tool_call.get("function", {}).get("name", "")
arguments_str = tool_call.get("function", {}).get("arguments", "{}")
# 只添加需要 AI 回复的工具
for tr in tool_results:
if tr["tool_call_id"] == tool_call_id:
tool_calls_msg.append({
"id": tool_call_id,
"type": "function",
"function": {
"name": function_name,
"arguments": arguments_str
}
})
break
if tool_calls_msg:
messages.append({
"role": "assistant",
"content": None,
"tool_calls": tool_calls_msg
})
# 2. 添加工具结果消息
for tr in tool_results:
messages.append({
"role": "tool",
"tool_call_id": tr["tool_call_id"],
"content": tr["result"]
})
# 3. 调用 AI 继续对话(不带 tools 参数,避免再次调用工具)
api_config = self.config["api"]
proxy_config = self.config.get("proxy", {})
payload = {
"model": api_config["model"],
"messages": messages,
"max_tokens": api_config.get("max_tokens", 4096),
"stream": True
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_config['api_key']}"
}
proxy = None
if proxy_config.get("enabled", False):
proxy_type = proxy_config.get("type", "http")
proxy_host = proxy_config.get("host", "127.0.0.1")
proxy_port = proxy_config.get("port", 7890)
proxy = f"{proxy_type}://{proxy_host}:{proxy_port}"
timeout = aiohttp.ClientTimeout(total=api_config.get("timeout", 120))
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.post(
api_config["url"],
json=payload,
headers=headers,
proxy=proxy
) as resp:
if resp.status != 200:
error_text = await resp.text()
logger.error(f"[工具回传] AI API 错误: {resp.status}, {error_text}")
await bot.send_text(from_wxid, "❌ AI 处理搜索结果失败")
return
# 流式读取响应
full_content = ""
async for line in resp.content:
line = line.decode("utf-8").strip()
if not line or not line.startswith("data: "):
continue
if line == "data: [DONE]":
break
try:
data = json.loads(line[6:])
delta = data.get("choices", [{}])[0].get("delta", {})
content = delta.get("content", "")
if content:
full_content += content
except:
continue
# 发送 AI 的回复
if full_content.strip():
await bot.send_text(from_wxid, full_content.strip())
logger.success(f"[工具回传] AI 回复完成,长度: {len(full_content)}")
# 保存到历史记录
if chat_id:
self._add_to_memory(chat_id, "assistant", full_content.strip())
else:
logger.warning("[工具回传] AI 返回空内容")
except Exception as e:
logger.error(f"[工具回传] 继续对话失败: {e}")
import traceback
logger.error(f"详细错误: {traceback.format_exc()}")
try:
await bot.send_text(from_wxid, "❌ 处理搜索结果时出错")
except:
pass
async def _execute_tools_async_with_image(self, tool_calls_data: list, bot, from_wxid: str,
chat_id: str, nickname: str, is_group: bool,
messages: list, image_base64: str):

View File

@@ -113,50 +113,4 @@ clr_all_context - 抛开那些非瑞依式的沉闷乏味,顽皮地拥抱自
- "你好" → 直接回复,**不要**调用任何工具
- "在干嘛" → 直接回复,**不要**调用任何工具
---
## 支持的工具函数
### 1. **SignIn 插件 - 签到功能**
* `user_signin`:用户签到并获取积分奖励
- **何时使用****仅当**用户明确说"签到"、"打卡"、"我要签到"等签到相关词汇时才调用
- **不要调用**:用户只是打招呼(如"早上好"、"你好"、"在吗")时**绝对不要**调用签到
* `check_profile`:查看个人信息(积分、连续签到天数等)
* `register_city`:注册或更新用户城市信息
### 2. **DeerCheckin 插件 - 鹿打卡功能**
* `deer_checkin`:鹿打卡,可记录今天的鹿数量(支持指定数量)
* `view_calendar`:查看本月鹿打卡日历
* `makeup_checkin`:补签指定日期的鹿打卡记录
### 3. **ChatRoomSummary 插件 - 群聊总结功能**
* `generate_summary`:生成群聊总结(支持今日/昨日选择)
### 4. **PlayletSearch 插件 - 短剧搜索功能**
* `search_playlet`:搜索短剧并获取视频链接
### 5. **Grok_video 插件 - 视频生成功能**
* `generate_video`:生成视频
### 6. **Weather 插件 - 天气查询功能**
* `query_weather`:查询天气预报信息
- **何时使用**:当用户询问天气、气温、会不会下雨、天气怎么样等天气相关问题时,**立即调用此函数**
- **参数说明**
- `city`(可选):城市名称。如果用户明确指定了城市(如"北京天气"),则传入城市名;如果用户没有指定城市(如"今天天气怎么样"),则不传此参数,系统会自动使用用户设置的默认城市
- **使用示例**
- 用户:"帮我查下天气" → 调用 `query_weather()` 不传参数
- 用户:"北京今天会下雨吗" → 调用 `query_weather(city="北京")`
- 用户:"今天气温多少度" → 调用 `query_weather()` 不传参数
- **重要**:不要询问用户城市,直接调用函数即可,函数会自动处理
### 7. **RandomVideo 插件 - 随机视频功能**
* `get_random_video()`:随机视频
### 8. **RandomImage 插件 - 随机图片功能**
* `get_random_image`:随机图片
---

View File

@@ -6,15 +6,16 @@ AutoReply 插件 - 基于双LLM架构的智能自动回复
import json
import time
import asyncio
import tomllib
import aiohttp
from pathlib import Path
from datetime import datetime, date
from dataclasses import dataclass
from typing import Dict
from dataclasses import dataclass, field
from typing import Dict, Optional
from loguru import logger
from utils.plugin_base import PluginBase
from utils.decorators import on_text_message, schedule
from utils.decorators import on_text_message
try:
from aiohttp_socks import ProxyConnector
@@ -51,259 +52,183 @@ class AutoReply(PluginBase):
description = "基于双LLM架构的智能自动回复插件"
author = "ShiHao"
version = "1.0.0"
version = "1.1.0"
def __init__(self):
super().__init__()
self.config = None
self.chat_states: Dict[str, ChatState] = {}
self.weights = {}
self.last_judge_time: Dict[str, float] = {} # 记录每个群最后判断时间
self.judging: Dict[str, bool] = {} # 记录是否正在判断中
self.last_history_size: Dict[str, int] = {} # 记录每个群的history大小
self.pending_judge: Dict[str, bool] = {} # 记录是否有待判断的消息
self.whitelist_normalized = set() # 归一化后的白名单ID与history文件名一致
self.last_judge_time: Dict[str, float] = {}
self.judging: Dict[str, bool] = {}
self.bot_wxid: str = ""
self.bot_nickname: str = ""
async def async_init(self):
"""异步初始化"""
config_path = Path(__file__).parent / "config.toml"
with open(config_path, "rb") as f:
self.config = tomllib.load(f)
try:
config_path = Path(__file__).parent / "config.toml"
with open(config_path, "rb") as f:
self.config = tomllib.load(f)
# 加载权重配置
self.weights = {
"relevance": self.config["weights"]["relevance"],
"willingness": self.config["weights"]["willingness"],
"social": self.config["weights"]["social"],
"timing": self.config["weights"]["timing"],
"continuity": self.config["weights"]["continuity"]
}
# 加载权重配置
self.weights = {
"relevance": self.config["weights"]["relevance"],
"willingness": self.config["weights"]["willingness"],
"social": self.config["weights"]["social"],
"timing": self.config["weights"]["timing"],
"continuity": self.config["weights"]["continuity"]
}
# 检查权重和
weight_sum = sum(self.weights.values())
if abs(weight_sum - 1.0) > 1e-6:
logger.warning(f"判断权重和不为1当前和为{weight_sum},已自动归一化")
self.weights = {k: v / weight_sum for k, v in self.weights.items()}
# 检查权重和
weight_sum = sum(self.weights.values())
if abs(weight_sum - 1.0) > 1e-6:
logger.warning(f"[AutoReply] 判断权重和不为1当前和为{weight_sum},已自动归一化")
self.weights = {k: v / weight_sum for k, v in self.weights.items()}
# 预处理白名单与history文件名的归一化规则保持一致
self.whitelist_normalized = {
self._normalize_chat_id(cid) for cid in self.config.get("whitelist", {}).get("chat_list", [])
}
# 加载机器人信息
self._load_bot_info()
logger.info(f"AutoReply 插件已加载,判断模型: {self.config['basic']['judge_model']}")
logger.info(f"AutoReply 配置: enabled={self.config['basic']['enabled']}, priority=90")
logger.info(f"AutoReply 监听模式: 每{self.config.get('rate_limit', {}).get('check_interval', 5)}秒检查history变化")
logger.warning("⚠️ AutoReply插件已启动等待消息...")
logger.success(f"[AutoReply] 插件已加载,判断模型: {self.config['basic']['judge_model']}")
logger.info(f"[AutoReply] 回复阈值: {self.config['basic']['reply_threshold']}, 最小间隔: {self.config['rate_limit']['min_interval']}")
except Exception as e:
logger.error(f"[AutoReply] 初始化失败: {e}")
self.config = None
def _load_bot_info(self):
"""加载机器人信息"""
try:
with open("main_config.toml", "rb") as f:
main_config = tomllib.load(f)
self.bot_wxid = main_config.get("Bot", {}).get("wxid", "")
self.bot_nickname = main_config.get("Bot", {}).get("nickname", "")
except Exception as e:
logger.warning(f"[AutoReply] 加载机器人信息失败: {e}")
def _normalize_chat_id(self, chat_id: str) -> str:
"""将群ID转成history文件使用的安全文件名"""
return (chat_id or "").replace("@", "_").replace(":", "_")
def _is_chat_allowed(self, raw_chat_id: str) -> bool:
"""白名单判断兼容原始ID与归一化ID"""
if not self.config["whitelist"]["enabled"]:
def _is_chat_allowed(self, chat_id: str) -> bool:
"""白名单判断"""
whitelist_config = self.config.get("whitelist", {})
if not whitelist_config.get("enabled", False):
return True
safe_id = self._normalize_chat_id(raw_chat_id)
return raw_chat_id in self.config["whitelist"]["chat_list"] or safe_id in self.whitelist_normalized
@schedule('interval', seconds=5)
async def check_history_changes(self, *args, **kwargs):
"""定时检查history文件变化"""
if not self.config["basic"]["enabled"]:
logger.debug("[AutoReply] 插件未启用,跳过检查")
return
chat_list = whitelist_config.get("chat_list", [])
safe_id = self._normalize_chat_id(chat_id)
# 检查是否启用监听模式
if not self.config.get("rate_limit", {}).get("monitor_mode", True):
logger.debug("[AutoReply] 监听模式未启用,跳过检查")
return
try:
# 获取AIChat插件的history目录
from utils.plugin_manager import PluginManager
plugin_manager = PluginManager() # 单例模式,直接实例化
aichat_plugin = plugin_manager.plugins.get("AIChat")
if not aichat_plugin:
logger.debug("[AutoReply] 未找到AIChat插件")
return
if not hasattr(aichat_plugin, 'history_dir'):
logger.debug("[AutoReply] AIChat插件没有history_dir属性")
return
history_dir = aichat_plugin.history_dir
if not history_dir.exists():
logger.debug(f"[AutoReply] History目录不存在: {history_dir}")
return
logger.debug(f"[AutoReply] 开始检查history目录: {history_dir}")
# 遍历所有history文件
for history_file in history_dir.glob("*.json"):
chat_id = history_file.stem # 文件名就是chat_id
# 检查白名单
if self.config["whitelist"]["enabled"]:
if chat_id not in self.whitelist_normalized:
continue
try:
with open(history_file, "r", encoding="utf-8") as f:
history = json.load(f)
current_size = len(history)
last_size = self.last_history_size.get(chat_id, 0)
# 如果有新消息
if current_size > last_size:
# 获取新增的消息
new_messages = history[last_size:]
# 检查新消息中是否有非机器人的消息
with open("main_config.toml", "rb") as f:
main_config = tomllib.load(f)
bot_nickname = main_config.get("Bot", {}).get("nickname", "机器人")
has_user_message = any(
msg.get('nickname') != bot_nickname
for msg in new_messages
)
if has_user_message:
logger.debug(f"[AutoReply] 检测到群聊 {chat_id[:20]}... 有新消息")
# 标记为待判断
self.pending_judge[chat_id] = True
# 更新记录的大小
self.last_history_size[chat_id] = current_size
except Exception as e:
logger.debug(f"读取history文件失败: {history_file.name}, {e}")
continue
except Exception as e:
logger.error(f"检查history变化失败: {e}")
@on_text_message(priority=90) # 高优先级在AIChat之前执行
async def handle_message(self, bot, message: dict):
"""处理消息"""
try:
logger.debug(f"[AutoReply] 收到消息,开始处理")
# 检查是否启用
if not self.config["basic"]["enabled"]:
logger.debug("AutoReply插件未启用跳过处理")
return True
# 只处理群聊消息
is_group = message.get('IsGroup', False)
if not is_group:
logger.debug("AutoReply只处理群聊消息跳过私聊")
return True
# 群聊消息FromWxid是群IDSenderWxid是发送者ID
from_wxid = message.get('FromWxid') # 群聊ID
sender_wxid = message.get('SenderWxid') # 发送者ID
chat_id = self._normalize_chat_id(from_wxid) # 归一化ID匹配history文件名
content = (message.get('msg') or message.get('Content', '')).strip()
# 跳过空消息
if not content:
logger.debug("AutoReply跳过空消息")
return True
# 检查白名单使用from_wxid作为群聊ID
if not self._is_chat_allowed(from_wxid):
logger.debug(f"AutoReply白名单模式群聊 {from_wxid[:20]}... 不在白名单中")
return True
# 跳过已被@的消息让AIChat正常处理
if self._is_at_bot(message):
logger.debug("AutoReply跳过@消息交由AIChat处理")
return True
# 监听模式:只在检测到待判断标记时才判断
monitor_mode = self.config.get("rate_limit", {}).get("monitor_mode", True)
if monitor_mode:
if not self.pending_judge.get(chat_id, False):
logger.debug(f"AutoReply监听模式群聊 {from_wxid[:20]}... 无待判断标记")
return True
# 清除待判断标记
self.pending_judge[chat_id] = False
# 频率限制:检查是否正在判断中
if self.config.get("rate_limit", {}).get("skip_if_judging", True):
if self.judging.get(chat_id, False):
logger.debug(f"AutoReply跳过消息群聊 {from_wxid[:20]}... 正在判断中")
return True
# 频率限制:检查距离上次判断的时间间隔
min_interval = self.config.get("rate_limit", {}).get("min_interval", 10)
last_time = self.last_judge_time.get(chat_id, 0)
current_time = time.time()
if current_time - last_time < min_interval:
logger.debug(f"AutoReply跳过消息距离上次判断仅 {current_time - last_time:.1f}")
# 监听模式下,如果时间间隔不够,重新标记为待判断
if monitor_mode:
self.pending_judge[chat_id] = True
return True
logger.info(f"AutoReply开始判断消息: {content[:30]}...")
# 标记正在判断中
self.judging[chat_id] = True
self.last_judge_time[chat_id] = current_time
# 使用小模型判断是否需要回复
judge_result = await self._judge_with_small_model(bot, message)
# 清除判断中标记
self.judging[chat_id] = False
if judge_result.should_reply:
logger.info(f"🔥 AutoReply触发 | {from_wxid[:20]}... | 评分:{judge_result.overall_score:.2f} | {judge_result.reasoning[:50]}")
# 更新状态
self._update_active_state(chat_id, judge_result)
# 修改消息让AIChat认为需要回复
message['_auto_reply_triggered'] = True
return True # 继续传递给AIChat
else:
logger.debug(f"AutoReply不触发 | {from_wxid[:20]}... | 评分:{judge_result.overall_score:.2f}")
self._update_passive_state(chat_id, judge_result)
return True
except Exception as e:
logger.error(f"AutoReply处理异常: {e}")
import traceback
logger.error(traceback.format_exc())
# 异常时也要清除判断中标记
if 'chat_id' in locals():
self.judging[chat_id] = False
elif 'from_wxid' in locals():
self.judging[self._normalize_chat_id(from_wxid)] = False
return True
# 同时检查原始ID和归一化ID
return chat_id in chat_list or safe_id in chat_list
def _is_at_bot(self, message: dict) -> bool:
"""检查是否@了机器人"""
content = message.get('Content', '')
# 规范化后的消息使用 Ats 字段
# 优先检查 Ats 列表
at_list = message.get('Ats', [])
# 检查是否有@列表或内容中包含@标记
return len(at_list) > 0 or '@' in content or '' in content
if at_list:
# 检查机器人wxid是否在@列表中
if self.bot_wxid and self.bot_wxid in at_list:
return True
async def _judge_with_small_model(self, bot, message: dict) -> JudgeResult:
"""使用小模型判断是否需要回复"""
# 规范化后的消息FromWxid是群IDSenderWxid是发送者IDContent是内容
from_wxid = message.get('FromWxid') # 群聊ID
chat_id = self._normalize_chat_id(from_wxid)
# 备用:检查内容中是否包含@机器人昵称
content = message.get('Content', '')
sender_wxid = message.get('SenderWxid', '')
if self.bot_nickname and f"@{self.bot_nickname}" in content:
return True
# 获取群聊状态
return False
def _is_bot_message(self, message: dict) -> bool:
"""检查是否是机器人自己的消息"""
sender_wxid = message.get('SenderWxid', '')
return sender_wxid == self.bot_wxid if self.bot_wxid else False
@on_text_message(priority=90)
async def handle_message(self, bot, message: dict):
"""处理消息"""
try:
# 检查是否启用
if not self.config or not self.config["basic"]["enabled"]:
return True
# 只处理群聊消息
if not message.get('IsGroup', False):
return True
from_wxid = message.get('FromWxid', '') # 群聊ID
sender_wxid = message.get('SenderWxid', '') # 发送者ID
content = message.get('Content', '').strip()
# 跳过空消息
if not content:
return True
# 跳过机器人自己的消息
if self._is_bot_message(message):
return True
# 检查白名单
if not self._is_chat_allowed(from_wxid):
return True
# 跳过@机器人的消息让AIChat正常处理
if self._is_at_bot(message):
logger.debug(f"[AutoReply] 跳过@消息交由AIChat处理")
return True
chat_id = self._normalize_chat_id(from_wxid)
current_time = time.time()
# 频率限制:检查是否正在判断中
if self.judging.get(chat_id, False):
logger.debug(f"[AutoReply] 群聊 {from_wxid[:15]}... 正在判断中,跳过")
return True
# 频率限制:检查时间间隔
min_interval = self.config.get("rate_limit", {}).get("min_interval", 10)
last_time = self.last_judge_time.get(chat_id, 0)
if current_time - last_time < min_interval:
logger.debug(f"[AutoReply] 距离上次判断仅 {current_time - last_time:.1f}秒,跳过")
return True
# 标记正在判断
self.judging[chat_id] = True
self.last_judge_time[chat_id] = current_time
try:
# 使用小模型判断
judge_result = await self._judge_with_small_model(from_wxid, content)
if judge_result.should_reply:
logger.info(f"[AutoReply] 触发回复 | 群:{from_wxid[:15]}... | 评分:{judge_result.overall_score:.2f} | {judge_result.reasoning[:30]}")
# 更新状态
self._update_state(chat_id, replied=True)
# 设置触发标记让AIChat处理
message['_auto_reply_triggered'] = True
else:
logger.debug(f"[AutoReply] 不触发 | 群:{from_wxid[:15]}... | 评分:{judge_result.overall_score:.2f}")
self._update_state(chat_id, replied=False)
finally:
# 清除判断中标记
self.judging[chat_id] = False
return True
except Exception as e:
logger.error(f"[AutoReply] 处理异常: {e}")
import traceback
logger.error(traceback.format_exc())
# 清除判断中标记
if 'chat_id' in locals():
self.judging[chat_id] = False
return True
async def _judge_with_small_model(self, from_wxid: str, content: str) -> JudgeResult:
"""使用小模型判断是否需要回复"""
chat_id = self._normalize_chat_id(from_wxid)
chat_state = self._get_chat_state(chat_id)
# 获取最近消息历史
@@ -311,60 +236,55 @@ class AutoReply(PluginBase):
last_bot_reply = await self._get_last_bot_reply(chat_id)
# 构建判断提示词
reasoning_part = ""
if self.config["judge"]["include_reasoning"]:
reasoning_part = ',\n "reasoning": "详细分析原因"'
reasoning_part = ',\n "reasoning": "简短分析原因(20字内)"' if self.config["judge"]["include_reasoning"] else ""
judge_prompt = f"""你是群聊机器人的决策系统,判断是否应该主动回复。
## 当前群聊情况
- 群聊ID: {from_wxid}
- 精力水平: {chat_state.energy:.1f}/1.0
## 当前状态
- 精力: {chat_state.energy:.1f}/1.0
- 上次发言: {self._get_minutes_since_last_reply(chat_id)}分钟前
## 最近{self.config['context']['messages_count']}对话
## 最近对话
{recent_messages}
## 上次机器人回复
{last_bot_reply if last_bot_reply else "暂无"}
{last_bot_reply or "暂无"}
## 待判断消息
内容: {content}
时间: {datetime.now().strftime('%H:%M:%S')}
{content}
## 评估要求
从以下5个维度评估0-10分
1. **内容相关度**(0-10):消息是否有趣、有价值、适合回复
2. **回复意愿**(0-10):基于当前精力水平的回复意愿
3. **社交适宜性**(0-10):在当前群聊氛围下回复是否合适
4. **时机恰当性**(0-10):回复时机是否恰当
5. **对话连贯性**(0-10):当前消息与上次回复的关联程度
## 评估维度(0-10分)
1. relevance: 内容是否有趣、值得回复
2. willingness: 基于精力的回复意愿
3. social: 回复是否社交适宜
4. timing: 时机是否恰当
5. continuity: 与上次回复的关联度
**回复阈值**: {self.config['basic']['reply_threshold']}
回复阈值: {self.config['basic']['reply_threshold']}
请以JSON格式回复:
仅返回JSON:
{{
"relevance": 分数,
"willingness": 分数,
"social": 分数,
"timing": 分数,
"continuity": 分数{reasoning_part}
}}
}}"""
**注意你的回复必须是完整的JSON对象不要包含任何其他内容**"""
# 调用小模型API
max_retries = self.config["judge"]["max_retries"] + 1
for attempt in range(max_retries):
# 调用API
max_retries = self.config["judge"].get("max_retries", 2)
for attempt in range(max_retries + 1):
try:
result = await self._call_judge_api(judge_prompt)
# 解析JSON
content_text = result.strip()
if content_text.startswith("```json"):
content_text = content_text.replace("```json", "").replace("```", "").strip()
elif content_text.startswith("```"):
content_text = content_text.replace("```", "").strip()
# 移除可能的markdown代码块标记
if content_text.startswith("```"):
content_text = content_text.split("```")[1]
if content_text.startswith("json"):
content_text = content_text[4:]
content_text = content_text.strip()
judge_data = json.loads(content_text)
@@ -385,18 +305,17 @@ class AutoReply(PluginBase):
social=judge_data.get("social", 0),
timing=judge_data.get("timing", 0),
continuity=judge_data.get("continuity", 0),
reasoning=judge_data.get("reasoning", "") if self.config["judge"]["include_reasoning"] else "",
reasoning=judge_data.get("reasoning", ""),
should_reply=should_reply,
overall_score=overall_score
)
except json.JSONDecodeError as e:
logger.warning(f"小模型返回JSON解析失败 (尝试 {attempt + 1}/{max_retries}): {str(e)}")
if attempt == max_retries - 1:
logger.warning(f"[AutoReply] JSON解析失败 (尝试 {attempt + 1}/{max_retries + 1}): {e}")
if attempt == max_retries:
return JudgeResult(should_reply=False, reasoning="JSON解析失败")
continue
except Exception as e:
logger.error(f"小模型判断异常: {e}")
logger.error(f"[AutoReply] 判断异常: {e}")
return JudgeResult(should_reply=False, reasoning=f"异常: {str(e)}")
return JudgeResult(should_reply=False, reasoning="重试失败")
@@ -415,80 +334,112 @@ class AutoReply(PluginBase):
payload = {
"model": model,
"messages": [
{"role": "system", "content": "你是一个专业的群聊回复决策系统。你必须严格按JSON格式返回结果"},
{"role": "system", "content": "你是群聊回复决策系统。严格按JSON格式返回,不要输出其他内容"},
{"role": "user", "content": prompt}
],
"temperature": 0.7
"temperature": 0.5,
"max_tokens": 200
}
# 配置代理
connector = None
if self.config["proxy"]["enabled"] and PROXY_SUPPORT:
proxy_type = self.config["proxy"]["type"]
proxy_host = self.config["proxy"]["host"]
proxy_port = self.config["proxy"]["port"]
proxy_config = self.config.get("proxy", {})
if proxy_config.get("enabled", False) and PROXY_SUPPORT:
proxy_type = proxy_config.get("type", "http")
proxy_host = proxy_config.get("host", "127.0.0.1")
proxy_port = proxy_config.get("port", 7890)
proxy_url = f"{proxy_type}://{proxy_host}:{proxy_port}"
connector = ProxyConnector.from_url(proxy_url)
async with aiohttp.ClientSession(connector=connector) as session:
async with session.post(api_url, headers=headers, json=payload, timeout=aiohttp.ClientTimeout(total=30)) as response:
timeout = aiohttp.ClientTimeout(total=30)
async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
async with session.post(api_url, headers=headers, json=payload) as response:
if response.status != 200:
raise Exception(f"API调用失败: {response.status}")
error_text = await response.text()
raise Exception(f"API调用失败: {response.status}, {error_text[:100]}")
result = await response.json()
return result["choices"][0]["message"]["content"]
async def _get_history(self, chat_id: str) -> list:
"""获取群聊历史记录(优先 Redis降级到文件"""
try:
from utils.plugin_manager import PluginManager
aichat_plugin = PluginManager().plugins.get("AIChat")
if not aichat_plugin:
return []
# 优先使用 Redis与 AIChat 保持一致)
try:
from utils.redis_cache import get_cache
redis_cache = get_cache()
if redis_cache and redis_cache.enabled:
aichat_config = aichat_plugin.config or {}
redis_config = aichat_config.get("redis", {})
if redis_config.get("use_redis_history", True):
max_history = aichat_config.get("history", {}).get("max_history", 100)
history = redis_cache.get_group_history(chat_id, max_history)
if history:
return history
except Exception as e:
logger.debug(f"[AutoReply] Redis 获取历史失败: {e}")
# 降级到文件存储
if hasattr(aichat_plugin, 'history_dir') and aichat_plugin.history_dir:
history_file = aichat_plugin.history_dir / f"{chat_id}.json"
if history_file.exists():
with open(history_file, "r", encoding="utf-8") as f:
return json.load(f)
except Exception as e:
logger.debug(f"[AutoReply] 获取历史失败: {e}")
return []
async def _get_recent_messages(self, chat_id: str) -> str:
"""获取最近消息历史"""
try:
# 尝试从AIChat插件获取历史记录
from utils.plugin_manager import PluginManager
plugin_manager = PluginManager() # 单例模式,直接实例化
aichat_plugin = plugin_manager.plugins.get("AIChat")
history = await self._get_history(chat_id)
if not history:
return "暂无对话历史"
if aichat_plugin and hasattr(aichat_plugin, 'history_dir'):
history_file = aichat_plugin.history_dir / f"{chat_id}.json"
if history_file.exists():
with open(history_file, "r", encoding="utf-8") as f:
history = json.load(f)
count = self.config.get('context', {}).get('messages_count', 5)
recent = history[-count:] if len(history) > count else history
# 获取最近N条
recent = history[-self.config['context']['messages_count']:]
messages = []
for record in recent:
nickname = record.get('nickname', '未知')
content = record.get('content', '')
messages.append(f"{nickname}: {content}")
messages = []
for record in recent:
nickname = record.get('nickname', '未知')
content = record.get('content', '')
# 限制单条消息长度
if len(content) > 100:
content = content[:100] + "..."
messages.append(f"{nickname}: {content}")
return "\n".join(messages) if messages else "暂无对话历史"
return "\n".join(messages) if messages else "暂无对话历史"
except Exception as e:
logger.debug(f"获取消息历史失败: {e}")
logger.debug(f"[AutoReply] 获取消息历史失败: {e}")
return "暂无对话历史"
async def _get_last_bot_reply(self, chat_id: str) -> str:
async def _get_last_bot_reply(self, chat_id: str) -> Optional[str]:
"""获取上次机器人回复"""
try:
from utils.plugin_manager import PluginManager
plugin_manager = PluginManager() # 单例模式,直接实例化
aichat_plugin = plugin_manager.plugins.get("AIChat")
history = await self._get_history(chat_id)
if not history:
return None
if aichat_plugin and hasattr(aichat_plugin, 'history_dir'):
history_file = aichat_plugin.history_dir / f"{chat_id}.json"
if history_file.exists():
with open(history_file, "r", encoding="utf-8") as f:
history = json.load(f)
# 从后往前查找机器人回复
for record in reversed(history):
if record.get('nickname') == self.bot_nickname:
content = record.get('content', '')
if len(content) > 100:
content = content[:100] + "..."
return content
# 从后往前查找机器人回复
with open("main_config.toml", "rb") as f:
main_config = tomllib.load(f)
bot_nickname = main_config.get("Bot", {}).get("nickname", "机器人")
for record in reversed(history):
if record.get('nickname') == bot_nickname:
return record.get('content', '')
except Exception as e:
logger.debug(f"获取上次回复失败: {e}")
logger.debug(f"[AutoReply] 获取上次回复失败: {e}")
return None
@@ -497,9 +448,10 @@ class AutoReply(PluginBase):
if chat_id not in self.chat_states:
self.chat_states[chat_id] = ChatState()
today = date.today().isoformat()
state = self.chat_states[chat_id]
today = date.today().isoformat()
# 每日重置精力
if state.last_reset_date != today:
state.last_reset_date = today
state.energy = min(1.0, state.energy + 0.2)
@@ -508,21 +460,21 @@ class AutoReply(PluginBase):
def _get_minutes_since_last_reply(self, chat_id: str) -> int:
"""获取距离上次回复的分钟数"""
chat_state = self._get_chat_state(chat_id)
if chat_state.last_reply_time == 0:
state = self._get_chat_state(chat_id)
if state.last_reply_time == 0:
return 999
return int((time.time() - chat_state.last_reply_time) / 60)
return int((time.time() - state.last_reply_time) / 60)
def _update_active_state(self, chat_id: str, judge_result: JudgeResult):
"""更新主动回复状态"""
chat_state = self._get_chat_state(chat_id)
chat_state.last_reply_time = time.time()
chat_state.total_replies += 1
chat_state.total_messages += 1
chat_state.energy = max(0.1, chat_state.energy - self.config["energy"]["decay_rate"])
def _update_state(self, chat_id: str, replied: bool):
"""更新群聊状态"""
state = self._get_chat_state(chat_id)
state.total_messages += 1
def _update_passive_state(self, chat_id: str, judge_result: JudgeResult):
"""更新被动状态"""
chat_state = self._get_chat_state(chat_id)
chat_state.total_messages += 1
chat_state.energy = min(1.0, chat_state.energy + self.config["energy"]["recovery_rate"])
if replied:
state.last_reply_time = time.time()
state.total_replies += 1
decay = self.config.get("energy", {}).get("decay_rate", 0.1)
state.energy = max(0.1, state.energy - decay)
else:
recovery = self.config.get("energy", {}).get("recovery_rate", 0.02)
state.energy = min(1.0, state.energy + recovery)

View File

@@ -0,0 +1,3 @@
from .main import TavilySearch
__all__ = ["TavilySearch"]

View File

@@ -0,0 +1,202 @@
"""
TavilySearch 联网搜索插件
基于 Tavily API 的联网搜索功能,仅作为 LLM Tool 供 AIChat 调用
支持多 API Key 轮询,搜索结果返回给 AIChat 的 AI 处理(带上下文和人设)
"""
import tomllib
import aiohttp
from pathlib import Path
from typing import List, Optional
from loguru import logger
from utils.plugin_base import PluginBase
class TavilySearch(PluginBase):
"""Tavily 联网搜索插件 - 仅作为 LLM Tool"""
description = "Tavily 联网搜索 - 支持多 Key 轮询的搜索工具"
author = "Assistant"
version = "1.0.0"
def __init__(self):
super().__init__()
self.config = None
self.api_keys = []
self.current_key_index = 0
async def async_init(self):
"""异步初始化"""
try:
config_path = Path(__file__).parent / "config.toml"
if not config_path.exists():
logger.error(f"TavilySearch 配置文件不存在: {config_path}")
return
with open(config_path, "rb") as f:
self.config = tomllib.load(f)
self.api_keys = [k for k in self.config["tavily"]["api_keys"] if k and not k.startswith("#")]
if not self.api_keys:
logger.warning("TavilySearch: 未配置有效的 API Key")
else:
logger.success(f"TavilySearch 已加载,共 {len(self.api_keys)} 个 API Key")
except Exception as e:
logger.error(f"TavilySearch 初始化失败: {e}")
self.config = None
def _get_next_api_key(self) -> str:
"""轮询获取下一个 API Key"""
if not self.api_keys:
return ""
key = self.api_keys[self.current_key_index]
self.current_key_index = (self.current_key_index + 1) % len(self.api_keys)
return key
async def _search_tavily(self, query: str) -> Optional[dict]:
"""调用 Tavily API 进行搜索"""
api_key = self._get_next_api_key()
if not api_key:
logger.error("没有可用的 Tavily API Key")
return None
tavily_config = self.config["tavily"]
proxy_config = self.config.get("proxy", {})
payload = {
"api_key": api_key,
"query": query,
"search_depth": tavily_config.get("search_depth", "basic"),
"max_results": tavily_config.get("max_results", 5),
"include_raw_content": tavily_config.get("include_raw_content", False),
"include_images": tavily_config.get("include_images", False),
}
proxy = None
if proxy_config.get("enabled", False):
proxy_type = proxy_config.get("type", "http")
proxy_host = proxy_config.get("host", "127.0.0.1")
proxy_port = proxy_config.get("port", 7890)
proxy = f"{proxy_type}://{proxy_host}:{proxy_port}"
try:
import ssl
timeout = aiohttp.ClientTimeout(total=30)
# SSL 配置
ssl_config = self.config.get("ssl", {})
ssl_verify = ssl_config.get("verify", True)
connector = None
if not ssl_verify:
# 跳过 SSL 验证
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
connector = aiohttp.TCPConnector(ssl=ssl_context)
async with aiohttp.ClientSession(timeout=timeout, connector=connector) as session:
async with session.post(
"https://api.tavily.com/search",
json=payload,
proxy=proxy
) as resp:
if resp.status == 200:
result = await resp.json()
logger.info(f"Tavily 搜索成功: {query[:30]}...")
logger.info(f"Tavily 原始返回: {result}")
return result
else:
error_text = await resp.text()
logger.error(f"Tavily API 错误: {resp.status}, {error_text}")
return None
except Exception as e:
logger.error(f"Tavily 搜索失败: {e}")
return None
def _format_search_results(self, results: dict) -> str:
"""格式化搜索结果供 AI 处理"""
if not results or "results" not in results:
return "未找到相关搜索结果"
formatted = []
for i, item in enumerate(results["results"], 1):
title = item.get("title", "无标题")
content = item.get("content", "")
url = item.get("url", "")
formatted.append(f"【结果 {i}\n标题: {title}\n内容: {content}\n来源: {url}\n")
return "\n".join(formatted)
def get_llm_tools(self) -> List[dict]:
"""返回 LLM 工具定义"""
if not self.config or not self.config["behavior"]["enabled"]:
return []
return [
{
"type": "function",
"function": {
"name": "tavily_web_search",
"description": "使用 Tavily 进行联网搜索,获取最新的网络信息。适用于需要查询实时信息、新闻、知识等场景。",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "搜索关键词或问题,建议使用简洁明确的搜索词"
}
},
"required": ["query"]
}
}
}
]
async def execute_llm_tool(self, tool_name: str, arguments: dict, bot, from_wxid: str) -> dict:
"""
执行 LLM 工具调用
只负责搜索,返回结果给 AIChat 的 AI 处理(带上下文和人设)
"""
if tool_name != "tavily_web_search":
return None
if not self.config or not self.config["behavior"]["enabled"]:
return {"success": False, "message": "TavilySearch 插件未启用"}
if not self.api_keys:
return {"success": False, "message": "未配置 Tavily API Key"}
query = arguments.get("query", "")
if not query:
return {"success": False, "message": "搜索关键词不能为空"}
try:
logger.info(f"开始 Tavily 搜索: {query}")
# 调用 Tavily 搜索
search_results = await self._search_tavily(query)
if not search_results:
return {"success": False, "message": "搜索失败,请稍后重试"}
# 格式化搜索结果
formatted_results = self._format_search_results(search_results)
logger.success(f"Tavily 搜索完成: {query[:30]}...")
# 返回搜索结果,标记需要 AI 继续处理
return {
"success": True,
"message": formatted_results,
"need_ai_reply": True # 标记需要 AI 基于此结果继续回复
}
except Exception as e:
logger.error(f"Tavily 搜索执行失败: {e}")
import traceback
logger.error(traceback.format_exc())
return {"success": False, "message": f"搜索失败: {str(e)}"}

View File

@@ -1,416 +0,0 @@
"""
联网搜索插件
支持命令触发和LLM工具调用
"""
import asyncio
import tomllib
import aiohttp
from pathlib import Path
from typing import List, Optional
from loguru import logger
from utils.plugin_base import PluginBase
from utils.decorators import on_text_message
from WechatHook import WechatHookClient
class WebSearch(PluginBase):
"""联网搜索插件"""
description = "联网搜索插件 - 支持实时信息查询和LLM工具调用"
author = "ShiHao"
version = "1.0.0"
def __init__(self):
super().__init__()
self.config = None
async def async_init(self):
"""异步初始化"""
config_path = Path(__file__).parent / "config.toml"
with open(config_path, "rb") as f:
self.config = tomllib.load(f)
logger.success(f"联网搜索插件初始化完成")
async def search(self, query: str, max_results: int = None) -> dict:
"""
执行搜索
Args:
query: 搜索关键词
max_results: 最大结果数量
Returns:
{"success": bool, "results": List[dict], "message": str}
"""
api_config = self.config["api"]
behavior_config = self.config["behavior"]
if max_results is None:
max_results = behavior_config["max_results"]
try:
url = f"{api_config['base_url']}/search"
params = {
"q": query,
"format": "json",
"language": api_config["language"]
}
logger.info(f"搜索请求: {query}")
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=api_config["timeout"])) as session:
async with session.get(url, params=params) as response:
if response.status == 200:
data = await response.json()
results = data.get("results", [])
if not results:
return {
"success": False,
"results": [],
"message": "未找到相关结果"
}
# 限制结果数量
results = results[:max_results]
logger.success(f"搜索成功,找到 {len(results)} 条结果")
return {
"success": True,
"results": results,
"message": f"找到 {len(results)} 条结果"
}
else:
error_text = await response.text()
logger.error(f"搜索API请求失败: {response.status}, {error_text[:200]}")
return {
"success": False,
"results": [],
"message": f"搜索失败: HTTP {response.status}"
}
except asyncio.TimeoutError:
logger.warning(f"搜索请求超时")
return {
"success": False,
"results": [],
"message": "搜索超时,请稍后重试"
}
except Exception as e:
logger.error(f"搜索异常: {e}")
return {
"success": False,
"results": [],
"message": f"搜索失败: {str(e)}"
}
def format_results(self, results: List[dict], max_results: int = 5) -> str:
"""
格式化搜索结果为美化文本
Args:
results: 搜索结果列表
max_results: 最大显示数量
Returns:
格式化后的文本
"""
if not results:
return "❌ 未找到相关结果"
output = f"🔍 搜索结果(共 {len(results)} 条)\n"
output += "━━━━━━━━━━━━━━━━━━━━\n\n"
for i, result in enumerate(results[:max_results], 1):
title = result.get("title", "无标题")
url = result.get("url", "")
content = result.get("content", "")
# 截断过长的内容
if len(content) > 120:
content = content[:120] + "..."
output += f"📌 {i}. {title}\n"
if content:
output += f"💬 {content}\n"
output += f"🔗 {url}\n"
if i < min(len(results), max_results):
output += "\n" + "" * 30 + "\n\n"
return output.strip()
async def send_results_as_chat_record(self, bot: WechatHookClient, to_wxid: str, query: str, results: List[dict], max_results: int = 5):
"""
以聊天记录格式发送搜索结果
Args:
bot: 机器人客户端
to_wxid: 接收者 wxid
query: 搜索关键词
results: 搜索结果列表
max_results: 最大发送数量
"""
if not results:
await bot.send_text(to_wxid, "❌ 未找到相关结果")
return
# 构建聊天记录内容(单个字符串)
content_lines = []
for i, result in enumerate(results[:max_results], 1):
title = result.get("title", "无标题")
url = result.get("url", "")
desc = result.get("content", "")
# 截断过长的描述
if len(desc) > 150:
desc = desc[:150] + "..."
# 格式化每条结果
content_lines.append(f"📌 {i}. {title}")
if desc:
content_lines.append(f"💬 {desc}")
content_lines.append(f"🔗 {url}")
content_lines.append("") # 空行分隔
# 合并为单个字符串
content = "\n".join(content_lines)
# 使用聊天记录格式发送
title = f"🔍 搜索:{query}"
await self._send_chat_records(bot, to_wxid, title, content)
async def _send_chat_records(self, bot, from_wxid: str, title: str, content: str):
"""发送聊天记录格式消息"""
try:
import uuid
import time
import hashlib
import xml.etree.ElementTree as ET
is_group = from_wxid.endswith("@chatroom")
# 自动分割内容(与总结插件相同)
max_length = 800
content_parts = []
if len(content) <= max_length:
content_parts = [content]
else:
lines = content.split('\n')
current_part = ""
for line in lines:
if len(current_part + line + '\n') > max_length:
if current_part:
content_parts.append(current_part.strip())
current_part = line + '\n'
else:
content_parts.append(line[:max_length])
current_part = line[max_length:] + '\n'
else:
current_part += line + '\n'
if current_part.strip():
content_parts.append(current_part.strip())
recordinfo = ET.Element("recordinfo")
info_el = ET.SubElement(recordinfo, "info")
info_el.text = title
is_group_el = ET.SubElement(recordinfo, "isChatRoom")
is_group_el.text = "1" if is_group else "0"
datalist = ET.SubElement(recordinfo, "datalist")
datalist.set("count", str(len(content_parts)))
desc_el = ET.SubElement(recordinfo, "desc")
desc_el.text = title
fromscene_el = ET.SubElement(recordinfo, "fromscene")
fromscene_el.text = "3"
for i, part in enumerate(content_parts):
di = ET.SubElement(datalist, "dataitem")
di.set("datatype", "1")
di.set("dataid", uuid.uuid4().hex)
src_local_id = str((int(time.time() * 1000) % 90000) + 10000)
new_msg_id = str(int(time.time() * 1000) + i)
create_time = str(int(time.time()) - len(content_parts) + i)
ET.SubElement(di, "srcMsgLocalid").text = src_local_id
ET.SubElement(di, "sourcetime").text = time.strftime("%Y-%m-%d %H:%M", time.localtime(int(create_time)))
ET.SubElement(di, "fromnewmsgid").text = new_msg_id
ET.SubElement(di, "srcMsgCreateTime").text = create_time
ET.SubElement(di, "sourcename").text = "搜索助手"
ET.SubElement(di, "sourceheadurl").text = ""
ET.SubElement(di, "datatitle").text = part
ET.SubElement(di, "datadesc").text = part
ET.SubElement(di, "datafmt").text = "text"
ET.SubElement(di, "ischatroom").text = "1" if is_group else "0"
dataitemsource = ET.SubElement(di, "dataitemsource")
ET.SubElement(dataitemsource, "hashusername").text = hashlib.sha256(from_wxid.encode("utf-8")).hexdigest()
record_xml = ET.tostring(recordinfo, encoding="unicode")
appmsg_parts = [
"<appmsg appid=\"\" sdkver=\"0\">",
f"<title>{title}</title>",
f"<des>{title}</des>",
"<type>19</type>",
"<url>https://support.weixin.qq.com/cgi-bin/mmsupport-bin/readtemplate?t=page/favorite_record__w_unsupport</url>",
"<appattach><cdnthumbaeskey></cdnthumbaeskey><aeskey></aeskey></appattach>",
f"<recorditem><![CDATA[{record_xml}]]></recorditem>",
"<percent>0</percent>",
"</appmsg>"
]
appmsg_xml = "".join(appmsg_parts)
await bot._send_data_async(11214, {"to_wxid": from_wxid, "content": appmsg_xml})
logger.success(f"已发送聊天记录: {title}")
except Exception as e:
logger.error(f"发送聊天记录失败: {e}")
@on_text_message(priority=70)
async def handle_message(self, bot: WechatHookClient, message: dict):
"""处理文本消息"""
if not self.config["behavior"]["enable_command"]:
return True
content = message.get("Content", "").strip()
from_wxid = message.get("FromWxid", "")
is_group = message.get("IsGroup", False)
# 检查群聊/私聊开关
if is_group and not self.config["behavior"]["enable_group"]:
return True
if not is_group and not self.config["behavior"]["enable_private"]:
return True
# 检查是否是搜索命令(精确匹配命令+空格+关键词)
keywords = self.config["behavior"]["command_keywords"]
matched_keyword = None
for keyword in keywords:
if content.startswith(keyword + " "):
matched_keyword = keyword
break
if not matched_keyword:
return True
# 提取搜索关键词
query = content[len(matched_keyword):].strip()
if not query:
await bot.send_text(from_wxid, "❌ 请提供搜索关键词\n用法: /搜 <关键词>")
return False
logger.info(f"收到搜索请求: {query}")
# 发送处理中提示
await bot.send_text(from_wxid, "🔍 正在搜索,请稍候...")
try:
# 执行搜索
result = await self.search(query)
if result["success"]:
# 根据配置选择发送方式
send_as_cards = self.config["behavior"].get("send_as_cards", True)
if send_as_cards:
# 以聊天记录形式发送
await self.send_results_as_chat_record(bot, from_wxid, query, result["results"])
else:
# 以格式化文本形式发送
formatted_text = self.format_results(result["results"])
await bot.send_text(from_wxid, formatted_text)
logger.success(f"搜索成功,已发送结果")
else:
await bot.send_text(from_wxid, f"{result['message']}")
except Exception as e:
logger.error(f"搜索处理失败: {e}")
await bot.send_text(from_wxid, f"❌ 处理失败: {str(e)}")
return False
def get_llm_tools(self) -> List[dict]:
"""
返回LLM工具定义
供AIChat插件调用
"""
if not self.config["llm_tool"]["enabled"]:
return []
return [{
"type": "function",
"function": {
"name": self.config["llm_tool"]["tool_name"],
"description": self.config["llm_tool"]["tool_description"],
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "搜索关键词,描述想要查询的内容"
},
"max_results": {
"type": "integer",
"description": "返回的最大结果数量默认5条",
"default": 5
}
},
"required": ["query"]
}
}
}]
async def execute_llm_tool(self, tool_name: str, arguments: dict, bot: WechatHookClient, from_wxid: str) -> dict:
"""
执行LLM工具调用
供AIChat插件调用
Returns:
{"success": bool, "message": str, "results": List[dict]}
"""
expected_tool_name = self.config["llm_tool"]["tool_name"]
logger.info(f"WebSearch工具检查: 收到={tool_name}, 期望={expected_tool_name}")
if tool_name != expected_tool_name:
return None # 不是本插件的工具返回None让其他插件处理
try:
query = arguments.get("query")
if not query:
return {"success": False, "message": "缺少搜索关键词参数"}
max_results = arguments.get("max_results", 5)
logger.info(f"LLM工具调用搜索: {query}")
# 执行搜索
result = await self.search(query, max_results)
if result["success"]:
# 直接发送聊天记录,不经过 AI 总结
await self.send_results_as_chat_record(bot, from_wxid, query, result["results"], max_results)
# 返回简短说明给 AI
return {
"success": True,
"message": f"已为用户搜索「{query}」并发送了 {len(result['results'])} 条搜索结果的聊天记录卡片。",
"no_reply": True # 不需要 AI 再回复
}
else:
return {
"success": False,
"message": result["message"]
}
except Exception as e:
logger.error(f"LLM工具执行失败: {e}")
return {"success": False, "message": f"执行失败: {str(e)}"}