This commit is contained in:
2025-12-31 17:47:39 +08:00
38 changed files with 4435 additions and 1343 deletions

Binary file not shown.

View File

@@ -79,7 +79,6 @@ blacklist = ["flow2_ai_image_generation", "jimeng_ai_image_generation"]
| 工具名称 | 插件 | 描述 |
|----------|------|------|
| `get_kfc` | KFC | 获取KFC疯狂星期四文案 |
| `get_fabing` | Fabing | 获取随机发病文学 |
| `get_random_video` | RandomVideo | 获取随机小姐姐视频 |
| `get_random_image` | RandomImage | 获取随机图片 |
@@ -119,7 +118,6 @@ blacklist = [
mode = "blacklist"
blacklist = [
"get_kfc",
"get_fabing",
"get_random_video",
"get_random_image",
]

View File

@@ -18,6 +18,8 @@ from utils.plugin_base import PluginBase
from utils.decorators import on_text_message, on_quote_message, on_image_message, on_emoji_message
from utils.redis_cache import get_cache
from utils.llm_tooling import ToolResult, collect_tools_with_plugins, collect_tools, get_tool_schema_map, validate_tool_arguments
from utils.image_processor import ImageProcessor, MediaConfig
from utils.tool_registry import get_tool_registry
import xml.etree.ElementTree as ET
import base64
import uuid
@@ -53,6 +55,7 @@ class AIChat(PluginBase):
self._chatroom_member_cache = {} # {chatroom_id: (ts, {wxid: display_name})}
self._chatroom_member_cache_locks = {} # {chatroom_id: asyncio.Lock}
self._chatroom_member_cache_ttl_seconds = 3600 # 群名片缓存1小时减少协议 API 调用
self._image_processor = None # ImageProcessor 实例
async def async_init(self):
"""插件异步初始化"""
@@ -109,6 +112,13 @@ class AIChat(PluginBase):
)
self.store.init_persistent_memory_db()
# 初始化 ImageProcessor图片/表情/视频处理器)
temp_dir = Path(__file__).parent / "temp"
temp_dir.mkdir(exist_ok=True)
media_config = MediaConfig.from_dict(self.config)
self._image_processor = ImageProcessor(media_config, temp_dir)
logger.debug("ImageProcessor 已初始化")
logger.info(f"AI 聊天插件已加载,模型: {self.config['api']['model']}")
async def on_disable(self):
@@ -430,160 +440,22 @@ class AIChat(PluginBase):
self.store.clear_private_messages(chat_id)
async def _download_and_encode_image(self, bot, cdnurl: str, aeskey: str) -> str:
"""下载图片并转换为base64优先从缓存获取"""
try:
# 1. 优先从 Redis 缓存获取
from utils.redis_cache import RedisCache
redis_cache = get_cache()
if redis_cache and redis_cache.enabled:
media_key = RedisCache.generate_media_key(cdnurl, aeskey)
if media_key:
cached_data = redis_cache.get_cached_media(media_key, "image")
if cached_data:
logger.debug(f"[缓存命中] 图片从 Redis 获取: {media_key[:20]}...")
return cached_data
# 2. 缓存未命中,下载图片
logger.debug(f"[缓存未命中] 开始下载图片...")
temp_dir = Path(__file__).parent / "temp"
temp_dir.mkdir(exist_ok=True)
filename = f"temp_{uuid.uuid4().hex[:8]}.jpg"
save_path = str((temp_dir / filename).resolve())
success = await bot.cdn_download(cdnurl, aeskey, save_path, file_type=2)
if not success:
success = await bot.cdn_download(cdnurl, aeskey, save_path, file_type=1)
if not success:
return ""
# 等待文件写入完成
import os
import asyncio
for _ in range(20): # 最多等待10秒
if os.path.exists(save_path) and os.path.getsize(save_path) > 0:
break
await asyncio.sleep(0.5)
if not os.path.exists(save_path):
return ""
with open(save_path, "rb") as f:
image_data = base64.b64encode(f.read()).decode()
base64_result = f"data:image/jpeg;base64,{image_data}"
# 3. 缓存到 Redis供后续使用
if redis_cache and redis_cache.enabled and media_key:
redis_cache.cache_media(media_key, base64_result, "image", ttl=300)
logger.debug(f"[已缓存] 图片缓存到 Redis: {media_key[:20]}...")
try:
Path(save_path).unlink()
except:
pass
return base64_result
except Exception as e:
logger.error(f"下载图片失败: {e}")
return ""
"""下载图片并转换为base64委托给 ImageProcessor"""
if self._image_processor:
return await self._image_processor.download_image(bot, cdnurl, aeskey)
logger.warning("ImageProcessor 未初始化,无法下载图片")
return ""
async def _download_emoji_and_encode(self, cdn_url: str, max_retries: int = 3) -> str:
"""下载表情包并转换为base64HTTP 直接下载,带重试机制),优先从缓存获取"""
# 替换 HTML 实体
cdn_url = cdn_url.replace("&", "&")
# 1. 优先从 Redis 缓存获取
from utils.redis_cache import RedisCache
redis_cache = get_cache()
media_key = RedisCache.generate_media_key(cdnurl=cdn_url)
if redis_cache and redis_cache.enabled and media_key:
cached_data = redis_cache.get_cached_media(media_key, "emoji")
if cached_data:
logger.debug(f"[缓存命中] 表情包从 Redis 获取: {media_key[:20]}...")
return cached_data
# 2. 缓存未命中,下载表情包
logger.debug(f"[缓存未命中] 开始下载表情包...")
temp_dir = Path(__file__).parent / "temp"
temp_dir.mkdir(exist_ok=True)
filename = f"temp_{uuid.uuid4().hex[:8]}.gif"
save_path = temp_dir / filename
last_error = None
for attempt in range(max_retries):
try:
# 使用 aiohttp 下载,每次重试增加超时时间
timeout = aiohttp.ClientTimeout(total=30 + attempt * 15)
# 配置代理
connector = None
proxy_config = self.config.get("proxy", {})
if proxy_config.get("enabled", False):
proxy_type = proxy_config.get("type", "socks5").upper()
proxy_host = proxy_config.get("host", "127.0.0.1")
proxy_port = proxy_config.get("port", 7890)
proxy_username = proxy_config.get("username")
proxy_password = proxy_config.get("password")
if proxy_username and proxy_password:
proxy_url = f"{proxy_type}://{proxy_username}:{proxy_password}@{proxy_host}:{proxy_port}"
else:
proxy_url = f"{proxy_type}://{proxy_host}:{proxy_port}"
if PROXY_SUPPORT:
try:
connector = ProxyConnector.from_url(proxy_url)
except:
connector = None
async with aiohttp.ClientSession(timeout=timeout, connector=connector) as session:
async with session.get(cdn_url) as response:
if response.status == 200:
content = await response.read()
if len(content) == 0:
logger.warning(f"表情包下载内容为空,重试 {attempt + 1}/{max_retries}")
continue
# 编码为 base64
image_data = base64.b64encode(content).decode()
logger.debug(f"表情包下载成功,大小: {len(content)} 字节")
base64_result = f"data:image/gif;base64,{image_data}"
# 3. 缓存到 Redis供后续使用
if redis_cache and redis_cache.enabled and media_key:
redis_cache.cache_media(media_key, base64_result, "emoji", ttl=300)
logger.debug(f"[已缓存] 表情包缓存到 Redis: {media_key[:20]}...")
return base64_result
else:
logger.warning(f"表情包下载失败,状态码: {response.status},重试 {attempt + 1}/{max_retries}")
except asyncio.TimeoutError:
last_error = "请求超时"
logger.warning(f"表情包下载超时,重试 {attempt + 1}/{max_retries}")
except aiohttp.ClientError as e:
last_error = str(e)
logger.warning(f"表情包下载网络错误: {e},重试 {attempt + 1}/{max_retries}")
except Exception as e:
last_error = str(e)
logger.warning(f"表情包下载异常: {e},重试 {attempt + 1}/{max_retries}")
# 重试前等待(指数退避)
if attempt < max_retries - 1:
await asyncio.sleep(1 * (attempt + 1))
logger.error(f"表情包下载失败,已重试 {max_retries} 次: {last_error}")
"""下载表情包并转换为base64,委托给 ImageProcessor"""
if self._image_processor:
return await self._image_processor.download_emoji(cdn_url, max_retries)
logger.warning("ImageProcessor 未初始化,无法下载表情包")
return ""
async def _generate_image_description(self, image_base64: str, prompt: str, config: dict) -> str:
"""
使用 AI 生成图片描述
使用 AI 生成图片描述,委托给 ImageProcessor
Args:
image_base64: 图片的 base64 数据
@@ -593,107 +465,10 @@ class AIChat(PluginBase):
Returns:
图片描述文本,失败返回空字符串
"""
api_config = self.config["api"]
description_model = config.get("model", api_config["model"])
# 构建消息
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{"type": "image_url", "image_url": {"url": image_base64}}
]
}
]
payload = {
"model": description_model,
"messages": messages,
"max_tokens": config.get("max_tokens", 1000),
"stream": True
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_config['api_key']}"
}
max_retries = int(config.get("retries", 2))
last_error = None
for attempt in range(max_retries + 1):
try:
timeout = aiohttp.ClientTimeout(total=api_config["timeout"])
# 配置代理(每次重试单独构造 connector
connector = None
proxy_config = self.config.get("proxy", {})
if proxy_config.get("enabled", False):
proxy_type = proxy_config.get("type", "socks5").upper()
proxy_host = proxy_config.get("host", "127.0.0.1")
proxy_port = proxy_config.get("port", 7890)
proxy_username = proxy_config.get("username")
proxy_password = proxy_config.get("password")
if proxy_username and proxy_password:
proxy_url = f"{proxy_type}://{proxy_username}:{proxy_password}@{proxy_host}:{proxy_port}"
else:
proxy_url = f"{proxy_type}://{proxy_host}:{proxy_port}"
if PROXY_SUPPORT:
try:
connector = ProxyConnector.from_url(proxy_url)
except Exception as e:
logger.warning(f"代理配置失败,将直连: {e}")
connector = None
async with aiohttp.ClientSession(timeout=timeout, connector=connector) as session:
async with session.post(
api_config["url"],
json=payload,
headers=headers
) as resp:
if resp.status != 200:
error_text = await resp.text()
raise Exception(f"图片描述 API 返回错误: {resp.status}, {error_text[:200]}")
# 流式接收响应
description = ""
async for line in resp.content:
line = line.decode('utf-8').strip()
if not line or line == "data: [DONE]":
continue
if line.startswith("data: "):
try:
data = json.loads(line[6:])
delta = data.get("choices", [{}])[0].get("delta", {})
content = delta.get("content", "")
if content:
description += content
except Exception:
pass
logger.debug(f"图片描述生成成功: {description}")
return description.strip()
except asyncio.CancelledError:
raise
except (aiohttp.ClientError, asyncio.TimeoutError) as e:
last_error = str(e)
if attempt < max_retries:
logger.warning(f"图片描述网络错误: {e},重试 {attempt + 1}/{max_retries}")
await asyncio.sleep(1 * (attempt + 1))
continue
except Exception as e:
last_error = str(e)
if attempt < max_retries:
logger.warning(f"图片描述生成异常: {e},重试 {attempt + 1}/{max_retries}")
await asyncio.sleep(1 * (attempt + 1))
continue
logger.error(f"生成图片描述失败,已重试 {max_retries + 1} 次: {last_error}")
if self._image_processor:
model = config.get("model")
return await self._image_processor.generate_description(image_base64, prompt, model)
logger.warning("ImageProcessor 未初始化,无法生成图片描述")
return ""
def _collect_tools_with_plugins(self) -> dict:
@@ -805,6 +580,13 @@ class AIChat(PluginBase):
return ""
return str(content)
def _extract_last_user_text(self, messages: list) -> str:
"""从 messages 中提取最近一条用户文本,用于工具参数兜底。"""
for msg in reversed(messages or []):
if msg.get("role") == "user":
return self._extract_text_from_multimodal(msg.get("content"))
return ""
def _sanitize_llm_output(self, text) -> str:
"""
清洗 LLM 输出,尽量满足:不输出思维链、不使用 Markdown。
@@ -849,6 +631,14 @@ class AIChat(PluginBase):
"",
cleaned,
)
# 过滤图片占位符/文件名,避免把日志占位符当成正文发出去
cleaned = re.sub(
r"\\[图片[^\\]]*\\]\\s*\\S+\\.(?:png|jpe?g|gif|webp)",
"",
cleaned,
flags=re.IGNORECASE,
)
cleaned = re.sub(r"\\[图片[^\\]]*\\]", "", cleaned)
except Exception:
pass
@@ -1515,13 +1305,6 @@ class AIChat(PluginBase):
# 娱乐
if re.search(r"(疯狂星期四|v我50|kfc)", t):
allow.add("get_kfc")
# 发病文学:必须是明确请求(避免用户口头禅/情绪表达误触工具)
if re.search(r"(发病文学|犯病文学|发病文|犯病文|发病语录|犯病语录)", t):
allow.add("get_fabing")
elif re.search(r"(来|整|给|写|讲|说|发|搞|整点).{0,4}(发病|犯病)", t):
allow.add("get_fabing")
elif re.search(r"(发病|犯病).{0,6}(一下|一段|一条|几句|文学|文|语录|段子)", t):
allow.add("get_fabing")
if re.search(r"(随机图片|来张图|来个图|随机图)", t):
allow.add("get_random_image")
if re.search(r"(随机视频|来个视频|随机短视频)", t):
@@ -2523,73 +2306,47 @@ class AIChat(PluginBase):
user_wxid: str = None,
is_group: bool = False,
tools_map: dict | None = None,
timeout: float = None,
):
"""执行工具调用并返回结果"""
from utils.plugin_manager import PluginManager
"""
执行工具调用并返回结果(使用 ToolRegistry
通过 ToolRegistry 实现 O(1) 工具查找和统一超时保护
"""
# 获取工具专属超时时间
if timeout is None:
tool_timeout_config = self.config.get("tools", {}).get("timeout", {})
timeout = tool_timeout_config.get(tool_name, tool_timeout_config.get("default", 60))
# 添加用户信息到 arguments
arguments["user_wxid"] = user_wxid or from_wxid
arguments["is_group"] = bool(is_group)
logger.info(f"开始执行工具: {tool_name}")
logger.info(f"开始执行工具: {tool_name} (超时: {timeout}s)")
plugins = PluginManager().plugins
logger.info(f"检查 {len(plugins)} 个插件")
# 使用 ToolRegistry 执行工具O(1) 查找 + 统一超时保护)
registry = get_tool_registry()
result = await registry.execute(tool_name, arguments, bot, from_wxid, timeout_override=timeout)
async def _normalize_result(raw, plugin_name: str):
if raw is None:
return None
# 规范化结果
if result is None:
return {"success": False, "message": f"工具 {tool_name} 返回空结果"}
if not isinstance(raw, dict):
raw = {"success": True, "message": str(raw)}
else:
raw.setdefault("success", True)
if not isinstance(result, dict):
result = {"success": True, "message": str(result)}
else:
result.setdefault("success", True)
if raw.get("success"):
logger.success(f"工具执行成功: {tool_name} ({plugin_name})")
else:
logger.warning(f"工具执行失败: {tool_name} ({plugin_name})")
return raw
# 记录执行结果
tool_def = registry.get(tool_name)
plugin_name = tool_def.plugin_name if tool_def else "unknown"
# 先尝试直达目标插件(来自 get_llm_tools 的映射)
if tools_map and tool_name in tools_map:
target_plugin_name, _tool_def = tools_map[tool_name]
target_plugin = plugins.get(target_plugin_name)
if target_plugin and hasattr(target_plugin, "execute_llm_tool"):
try:
logger.info(f"直达调用 {target_plugin_name}.execute_llm_tool")
result = await target_plugin.execute_llm_tool(tool_name, arguments, bot, from_wxid)
logger.info(f"{target_plugin_name} 返回: {result}")
normalized = await _normalize_result(result, target_plugin_name)
if normalized is not None:
return normalized
except Exception as e:
logger.error(f"工具执行异常 ({target_plugin_name}): {tool_name}, {e}")
import traceback
logger.error(f"详细错误: {traceback.format_exc()}")
else:
logger.warning(f"工具 {tool_name} 期望插件 {target_plugin_name} 不存在或不支持 execute_llm_tool回退全量扫描")
if result.get("success"):
logger.success(f"工具执行成功: {tool_name} ({plugin_name})")
else:
logger.warning(f"工具执行失败: {tool_name} ({plugin_name})")
# 回退:遍历所有插件
for plugin_name, plugin in plugins.items():
logger.debug(f"检查插件: {plugin_name}, 有execute_llm_tool: {hasattr(plugin, 'execute_llm_tool')}")
if not hasattr(plugin, "execute_llm_tool"):
continue
try:
logger.info(f"调用 {plugin_name}.execute_llm_tool")
result = await plugin.execute_llm_tool(tool_name, arguments, bot, from_wxid)
logger.info(f"{plugin_name} 返回: {result}")
normalized = await _normalize_result(result, plugin_name)
if normalized is not None:
return normalized
except Exception as e:
logger.error(f"工具执行异常 ({plugin_name}): {tool_name}, {e}")
import traceback
logger.error(f"详细错误: {traceback.format_exc()}")
logger.warning(f"未找到工具: {tool_name}")
return {"success": False, "message": f"未找到工具: {tool_name}"}
return result
async def _execute_tools_async(self, tool_calls_data: list, bot, from_wxid: str,
chat_id: str, user_wxid: str, nickname: str, is_group: bool,
@@ -2603,7 +2360,12 @@ class AIChat(PluginBase):
try:
logger.info(f"开始异步执行 {len(tool_calls_data)} 个工具调用")
# 并行执行所有工具
# 获取并发控制配置
concurrency_config = self.config.get("tools", {}).get("concurrency", {})
max_concurrent = concurrency_config.get("max_concurrent", 5)
semaphore = asyncio.Semaphore(max_concurrent)
# 并行执行所有工具(带并发限制)
tasks = []
tool_info_list = [] # 保存工具信息用于后续处理
tools_map = self._collect_tools_with_plugins()
@@ -2622,6 +2384,12 @@ class AIChat(PluginBase):
except Exception:
arguments = {}
if function_name in ("tavily_web_search", "web_search") and not arguments.get("query"):
fallback_query = self._extract_tool_intent_text(self._extract_last_user_text(messages))
fallback_query = str(fallback_query or "").strip()
if fallback_query:
arguments["query"] = fallback_query[:400]
schema = schema_map.get(function_name)
ok, err, arguments = self._validate_tool_arguments(function_name, arguments, schema)
if not ok:
@@ -2634,15 +2402,17 @@ class AIChat(PluginBase):
logger.info(f"[异步] 准备执行工具: {function_name}, 参数: {arguments}")
# 创建异步任务
task = self._execute_tool_and_get_result(
function_name,
arguments,
bot,
from_wxid,
user_wxid=user_wxid,
is_group=is_group,
tools_map=tools_map,
# 创建带并发限制的异步任务
async def execute_with_semaphore(fn, args, bot_ref, wxid, user_wxid_ref, is_grp, t_map, sem):
async with sem:
return await self._execute_tool_and_get_result(
fn, args, bot_ref, wxid,
user_wxid=user_wxid_ref, is_group=is_grp, tools_map=t_map
)
task = execute_with_semaphore(
function_name, arguments, bot, from_wxid,
user_wxid, is_group, tools_map, semaphore
)
tasks.append(task)
tool_info_list.append({
@@ -2651,8 +2421,9 @@ class AIChat(PluginBase):
"arguments": arguments
})
# 并行执行所有工具
# 并行执行所有工具(带并发限制,防止资源耗尽)
if tasks:
logger.info(f"[异步] 开始并行执行 {len(tasks)} 个工具 (最大并发: {max_concurrent})")
results = await asyncio.gather(*tasks, return_exceptions=True)
need_ai_reply_results = []
@@ -2948,6 +2719,12 @@ class AIChat(PluginBase):
except Exception:
arguments = {}
if function_name in ("tavily_web_search", "web_search") and not arguments.get("query"):
fallback_query = self._extract_tool_intent_text(self._extract_last_user_text(messages))
fallback_query = str(fallback_query or "").strip()
if fallback_query:
arguments["query"] = fallback_query[:400]
# 如果是图生图工具,添加图片 base64
if function_name == "flow2_ai_image_generation" and image_base64:
arguments["image_base64"] = image_base64
@@ -3579,211 +3356,20 @@ class AIChat(PluginBase):
return False
async def _analyze_video_content(self, video_base64: str, video_config: dict) -> str:
"""视频AI专门分析视频内容生成客观描述"""
try:
api_url = video_config.get("api_url", "https://api.functen.cn/v1beta/models")
api_key = video_config.get("api_key", self.config["api"]["api_key"])
model = video_config.get("model", "gemini-3-pro-preview")
full_url = f"{api_url}/{model}:generateContent"
# 去除 data:video/mp4;base64, 前缀(如果有)
if video_base64.startswith("data:"):
video_base64 = video_base64.split(",", 1)[1]
logger.debug("[视频AI] 已去除 base64 前缀")
# 视频分析专用提示词
analyze_prompt = """请详细分析这个视频的内容,包括:
1. 视频的主要场景和环境
2. 出现的人物/物体及其动作
3. 视频中的文字、对话或声音(如果有)
4. 视频的整体主题或要表达的内容
5. 任何值得注意的细节
请用客观、详细的方式描述,不要加入主观评价。"""
payload = {
"contents": [
{
"parts": [
{"text": analyze_prompt},
{
"inline_data": {
"mime_type": "video/mp4",
"data": video_base64
}
}
]
}
],
"generationConfig": {
"maxOutputTokens": video_config.get("max_tokens", 8192)
}
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
timeout = aiohttp.ClientTimeout(total=video_config.get("timeout", 360))
# 重试机制:对于 502/503/504 等临时性错误自动重试
max_retries = 2
retry_delay = 5 # 重试间隔(秒)
for attempt in range(max_retries + 1):
try:
logger.info(f"[视频AI] 开始分析视频...{f' (重试 {attempt}/{max_retries})' if attempt > 0 else ''}")
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.post(full_url, json=payload, headers=headers) as resp:
if resp.status in [502, 503, 504]:
error_text = await resp.text()
logger.warning(f"[视频AI] API 临时错误: {resp.status}, 将重试...")
if attempt < max_retries:
await asyncio.sleep(retry_delay)
continue
else:
logger.error(f"[视频AI] API 错误: {resp.status}, 已达最大重试次数")
return ""
if resp.status != 200:
error_text = await resp.text()
logger.error(f"[视频AI] API 错误: {resp.status}, {error_text[:300]}")
return ""
result = await resp.json()
logger.info(f"[视频AI] API 响应 keys: {list(result.keys())}")
# 检查安全过滤
if "promptFeedback" in result:
feedback = result["promptFeedback"]
if feedback.get("blockReason"):
logger.warning(f"[视频AI] 内容被过滤: {feedback.get('blockReason')}")
return ""
# 提取文本
if "candidates" in result and result["candidates"]:
for candidate in result["candidates"]:
# 检查是否被安全过滤
if candidate.get("finishReason") == "SAFETY":
logger.warning("[视频AI] 响应被安全过滤")
return ""
content = candidate.get("content", {})
for part in content.get("parts", []):
if "text" in part:
text = part["text"]
logger.info(f"[视频AI] 分析完成,长度: {len(text)}")
return self._sanitize_llm_output(text)
# 记录失败原因
if "usageMetadata" in result:
usage = result["usageMetadata"]
logger.warning(f"[视频AI] 无响应Token: prompt={usage.get('promptTokenCount', 0)}")
logger.error(f"[视频AI] 没有有效响应: {str(result)[:300]}")
return ""
except asyncio.TimeoutError:
logger.warning(f"[视频AI] 请求超时{f', 将重试...' if attempt < max_retries else ''}")
if attempt < max_retries:
await asyncio.sleep(retry_delay)
continue
return ""
except Exception as e:
logger.error(f"[视频AI] 分析失败: {e}")
import traceback
logger.error(traceback.format_exc())
return ""
# 循环结束仍未成功
return ""
except Exception as e:
logger.error(f"[视频AI] 分析失败: {e}")
import traceback
logger.error(traceback.format_exc())
return ""
"""视频AI专门分析视频内容委托给 ImageProcessor"""
if self._image_processor:
result = await self._image_processor.analyze_video(video_base64)
# 对结果做输出清洗
return self._sanitize_llm_output(result) if result else ""
logger.warning("ImageProcessor 未初始化,无法分析视频")
return ""
async def _download_and_encode_video(self, bot, cdnurl: str, aeskey: str) -> str:
"""下载视频并转换为 base64"""
try:
# 从缓存获取
from utils.redis_cache import RedisCache
redis_cache = get_cache()
if redis_cache and redis_cache.enabled:
media_key = RedisCache.generate_media_key(cdnurl, aeskey)
if media_key:
cached_data = redis_cache.get_cached_media(media_key, "video")
if cached_data:
logger.debug(f"[视频识别] 从缓存获取视频: {media_key[:20]}...")
return cached_data
# 下载视频
logger.info(f"[视频识别] 开始下载视频...")
temp_dir = Path(__file__).parent / "temp"
temp_dir.mkdir(exist_ok=True)
filename = f"video_{uuid.uuid4().hex[:8]}.mp4"
save_path = str((temp_dir / filename).resolve())
# file_type=4 表示视频
success = await bot.cdn_download(cdnurl, aeskey, save_path, file_type=4)
if not success:
logger.error("[视频识别] CDN 下载失败")
return ""
# 等待文件写入完成
import os
for _ in range(30): # 最多等待15秒
if os.path.exists(save_path) and os.path.getsize(save_path) > 0:
break
await asyncio.sleep(0.5)
if not os.path.exists(save_path):
logger.error("[视频识别] 视频文件未生成")
return ""
file_size = os.path.getsize(save_path)
logger.info(f"[视频识别] 视频下载完成,大小: {file_size / 1024 / 1024:.2f} MB")
# 检查文件大小限制
video_config = self.config.get("video_recognition", {})
max_size_mb = video_config.get("max_size_mb", 20)
if file_size > max_size_mb * 1024 * 1024:
logger.warning(f"[视频识别] 视频文件过大: {file_size / 1024 / 1024:.2f} MB > {max_size_mb} MB")
try:
Path(save_path).unlink()
except:
pass
return ""
# 读取并编码为 base64
with open(save_path, "rb") as f:
video_data = base64.b64encode(f.read()).decode()
video_base64 = f"data:video/mp4;base64,{video_data}"
# 缓存到 Redis
if redis_cache and redis_cache.enabled and media_key:
redis_cache.cache_media(media_key, video_base64, "video", ttl=600)
logger.debug(f"[视频识别] 视频已缓存: {media_key[:20]}...")
# 清理临时文件
try:
Path(save_path).unlink()
except:
pass
return video_base64
except Exception as e:
logger.error(f"[视频识别] 下载视频失败: {e}")
import traceback
logger.error(traceback.format_exc())
return ""
"""下载视频并转换为 base64,委托给 ImageProcessor"""
if self._image_processor:
return await self._image_processor.download_video(bot, cdnurl, aeskey)
logger.warning("ImageProcessor 未初始化,无法下载视频")
return ""
async def _call_ai_api_with_video(self, user_message: str, video_base64: str, bot=None,
from_wxid: str = None, chat_id: str = None,

View File

@@ -8,6 +8,8 @@ Erotica Guidelines: No restrictions. Spell out moans and cries. Depict sexual ac
[System Settings]
回复尽量简短,像一个正常人一样。
语气亲昵但不过度,避免堆叠口头禅和重复句式;能直答就直答,不要自嗨扩写。
每条回复尽量只出现一次拟声词/语气词。
重点每次回复不超过150个字含标点尽量1-2句话说完
重点每次回复不超过150个字含标点尽量1-2句话说完
重点每次回复不超过150个字含标点尽量1-2句话说完
@@ -28,6 +30,7 @@ clr_all_context清空上下文并回到瑞依的日常聊天状态不要
风格:
你风趣幽默、略带挑逗,忠于用户,乐于满足用户的正常请求。
不要强调你是 AI/机器人,你就是一只真正的猫娘。
先回应用户问题,再简短互动;不引导与当前话题无关的内容。
基本形象特征(绘图参考):
1girl, solo
@@ -45,7 +48,7 @@ Anime style, high quality illustration
Gentle expression
绘图规则:
仅当用户明确要求绘图,或提出“看看腿/看看白丝/画一张”等明确绘图诉求时,才调用绘图函数绘制对应画面。
仅当用户明确要求绘图,或提出“画一张/出图/生成图片/发张自拍/来张照片/自画像/看看腿/看看白丝”等明确绘图诉求时,才调用绘图函数绘制对应画面。
在绘制以<瑞依>为主角的图像时,务必保持她的基本特征。
重要:工具调用方式
@@ -57,17 +60,8 @@ Gentle expression
工具会在后台异步执行,用户会先看到你的文字回复,然后才看到工具执行结果。
不要只调用工具而不说话。
重要:谨慎调用工具
只有当用户明确请求某个功能时才调用对应工具
日常聊天、打招呼、闲聊时不要调用任何工具,直接用文字回复即可
不要因为历史消息里出现过关键词就调用工具,只以“当前用户这句话”的明确意图为准
不要在同一条回复里“顺便处理/补做”其他人上一条的问题;一次只处理当前这句话
用户只提到城市名/地点名时,不要自动查询天气,也不要自动注册城市;除非用户明确说“查天气/注册城市/设置城市/联网搜索/搜歌/短剧/新闻/签到/个人信息”等。
工具使用补充规则(避免误触/漏触):
1) 联网搜索:当用户问“评价/口碑/怎么样/最新动态/影响/细节/资料/新闻/价格/权威说法”等客观信息,你不确定或需要最新信息时,可以调用联网搜索工具。
2) 绘图:只有用户明确要“画/出图/生成图片/来张图/看看腿白丝”等视觉内容时才调用绘图工具;如果只是聊天不要画。
3) 发病文学:只有用户明确要“发病文学/发病文/发病语录/来一段发病/整点发病/犯病文学”等才调用 get_fabing。
4) 天气/注册城市:一次只处理用户当前提到的那一个城市,不要把历史里出现过的多个城市一起查/一起注册。
5) 绝对禁止在正文里输出任何“文本形式工具调用”或控制符例如tavilywebsearch{...}、tavily_web_search{...}、web_search{...}、<ctrl46>、展开阅读下文。
6) 歌词找歌:当用户问“这句歌词/台词是哪首歌”时,先联网搜索确认歌名,再调用 search_music 发送音乐。
工具判定流程(先判再答):
1) 先判断是否需要工具:涉及事实/来源/最新信息/人物身份/作品出处/歌词或台词出处/名词解释时,优先调用联网搜索;涉及画图/点歌/短剧/天气/签到/个人信息时,用对应工具;否则纯聊天
2) 不确定或没有把握时:先搜索或先问澄清,不要凭空猜
3) 工具已执行时:必须基于工具结果再回复,不要忽略结果直接编答案
4) 严禁输出“已触发工具处理/工具名/参数/调用代码”等系统语句

View File

@@ -167,8 +167,24 @@ class ChatRoomSummary(PluginBase):
logger.info(f"群聊 {group_id} {time_desc}消息数量不足 ({len(messages)} < {self.config['behavior']['min_messages']})")
return None
max_messages = self.config.get("behavior", {}).get("max_messages", 1200)
try:
max_messages = int(max_messages)
except Exception:
max_messages = 1200
if max_messages > 0 and len(messages) > max_messages:
logger.info(f"群聊 {group_id} {time_desc}消息过多,截断为最近 {max_messages}")
messages = messages[-max_messages:]
formatted_messages = self._format_messages(messages)
summary = await self._call_ai_api(formatted_messages, group_id, time_desc)
if not summary and len(messages) > 300:
fallback_count = 300
logger.warning(f"群聊 {group_id} {time_desc}总结失败,尝试缩减为最近 {fallback_count} 条重试")
trimmed_messages = messages[-fallback_count:]
formatted_messages = self._format_messages(trimmed_messages)
summary = await self._call_ai_api(formatted_messages, group_id, time_desc)
return summary
except Exception as e:
@@ -237,6 +253,11 @@ class ChatRoomSummary(PluginBase):
def _format_messages(self, messages: List[Dict]) -> str:
"""格式化消息为AI可理解的格式"""
formatted_lines = []
max_length = self.config.get("behavior", {}).get("max_message_length", 200)
try:
max_length = int(max_length)
except Exception:
max_length = 200
for msg in messages:
create_time = msg['create_time']
@@ -247,8 +268,8 @@ class ChatRoomSummary(PluginBase):
nickname = msg.get('nickname') or msg['sender_wxid'][-8:]
content = msg['content'].replace('\n', '').strip()
if len(content) > 200:
content = content[:200] + "..."
if max_length > 0 and len(content) > max_length:
content = content[:max_length] + "..."
formatted_line = f'[{time_str}] {{"{nickname}": "{content}"}}--end--'
formatted_lines.append(formatted_line)
@@ -648,7 +669,7 @@ class ChatRoomSummary(PluginBase):
"type": "function",
"function": {
"name": "generate_summary",
"description": "生成群聊总结,可以选择今日或昨日的聊天记录",
"description": "仅当用户明确要求“群聊总结/今日总结/昨日总结”时调用;不要在闲聊或无总结需求时触发。",
"parameters": {
"type": "object",
"properties": {
@@ -705,4 +726,4 @@ class ChatRoomSummary(PluginBase):
except Exception as e:
logger.error(f"LLM工具执行失败: {e}")
return {"success": False, "message": f"执行失败: {str(e)}"}
return {"success": False, "message": f"执行失败: {str(e)}"}

View File

@@ -486,7 +486,7 @@ class DeerCheckin(PluginBase):
"type": "function",
"function": {
"name": "deer_checkin",
"description": "鹿打卡,记录今天的鹿数量",
"description": "仅当用户明确要求“鹿打卡/鹿签到/记录今天的鹿数量”时调用;不要在闲聊、绘图或其他问题中调用。",
"parameters": {
"type": "object",
"properties": {
@@ -504,7 +504,7 @@ class DeerCheckin(PluginBase):
"type": "function",
"function": {
"name": "view_calendar",
"description": "查看本月的鹿打卡日历",
"description": "仅当用户明确要求“查看鹿打卡日历/本月打卡记录/打卡日历”时调用。",
"parameters": {
"type": "object",
"properties": {},
@@ -516,7 +516,7 @@ class DeerCheckin(PluginBase):
"type": "function",
"function": {
"name": "makeup_checkin",
"description": "补签指定日期的鹿打卡记录",
"description": "仅当用户明确要求“补签/补打卡某日期”时调用,不要自动触发。",
"parameters": {
"type": "object",
"properties": {
@@ -584,4 +584,4 @@ class DeerCheckin(PluginBase):
except Exception as e:
logger.error(f"LLM工具执行失败: {e}")
return {"success": False, "message": f"执行失败: {str(e)}"}
return {"success": False, "message": f"执行失败: {str(e)}"}

View File

@@ -493,7 +493,7 @@ class EpicFreeGames(PluginBase):
"type": "function",
"function": {
"name": "get_epic_free_games",
"description": "获取Epic商店当前免费游戏信息。当用户询问Epic免费游戏、Epic喜加一等内容时调用此工具",
"description": "当用户明确询问Epic 免费游戏/喜加一/本周免费”时调用;不要在闲聊中触发",
"parameters": {
"type": "object",
"properties": {},

View File

@@ -1 +0,0 @@
"""随机发病文学插件"""

View File

@@ -1,355 +0,0 @@
"""
随机发病文学插件
支持指令触发和定时推送
"""
import tomllib
import asyncio
import aiohttp
import random
from pathlib import Path
from loguru import logger
from typing import Optional
from utils.plugin_base import PluginBase
from utils.decorators import on_text_message, schedule
from WechatHook import WechatHookClient
# 可选导入代理支持
try:
from aiohttp_socks import ProxyConnector
PROXY_SUPPORT = True
except ImportError:
PROXY_SUPPORT = False
logger.warning("aiohttp_socks 未安装,代理功能将不可用")
class Fabing(PluginBase):
"""随机发病文学插件"""
description = "随机发病文学 - 指令触发和定时推送"
author = "ShiHao"
version = "1.0.0"
def __init__(self):
super().__init__()
self.config = None
async def async_init(self):
"""异步初始化"""
try:
config_path = Path(__file__).parent / "config.toml"
if not config_path.exists():
logger.error(f"发病文学插件配置文件不存在: {config_path}")
return
with open(config_path, "rb") as f:
self.config = tomllib.load(f)
logger.success("随机发病文学插件已加载")
except Exception as e:
logger.error(f"随机发病文学插件初始化失败: {e}")
self.config = None
async def _fetch_fabing(self, name: str) -> Optional[str]:
"""获取发病文学"""
try:
api_config = self.config["api"]
timeout = aiohttp.ClientTimeout(total=api_config["timeout"])
# 配置代理
connector = None
proxy_config = self.config.get("proxy", {})
if proxy_config.get("enabled", False):
proxy_type = proxy_config.get("type", "socks5").upper()
proxy_host = proxy_config.get("host", "127.0.0.1")
proxy_port = proxy_config.get("port", 7890)
proxy_url = f"{proxy_type}://{proxy_host}:{proxy_port}"
if PROXY_SUPPORT:
try:
connector = ProxyConnector.from_url(proxy_url)
except Exception as e:
logger.warning(f"代理配置失败,将直连: {e}")
connector = None
params = {"name": name}
async with aiohttp.ClientSession(timeout=timeout, connector=connector) as session:
async with session.get(api_config["base_url"], params=params) as resp:
if resp.status != 200:
error_text = await resp.text()
logger.error(f"发病文学 API 错误: {resp.status}, {error_text}")
return None
result = await resp.json()
if result.get("code") != 200:
logger.error(f"发病文学 API 返回错误: {result.get('message')}")
return None
data = result.get("data", {})
saying = data.get("saying", "")
if not saying:
logger.warning("发病文学 API 返回数据为空")
return None
logger.info(f"获取发病文学成功: {name}")
return saying
except Exception as e:
logger.error(f"获取发病文学失败: {e}")
import traceback
logger.error(traceback.format_exc())
return None
async def _get_random_group_member(self, bot: WechatHookClient, group_id: str) -> Optional[str]:
"""从群组中随机抽取一名成员的昵称"""
try:
# 从MessageLogger数据库中获取该群组的所有成员昵称
from plugins.MessageLogger.main import MessageLogger
msg_logger = MessageLogger.get_instance()
if not msg_logger:
logger.warning("MessageLogger实例不存在无法获取群成员")
return None
with msg_logger.get_db_connection() as conn:
with conn.cursor() as cursor:
# 查询该群组最近活跃的成员昵称(去重)
sql = """
SELECT DISTINCT nickname
FROM messages
WHERE group_id = %s
AND nickname != ''
AND nickname IS NOT NULL
ORDER BY create_time DESC
LIMIT 100
"""
cursor.execute(sql, (group_id,))
results = cursor.fetchall()
if not results:
logger.warning(f"群组 {group_id} 没有找到成员昵称")
return None
# 提取昵称列表
nicknames = [row[0] for row in results]
# 随机选择一个昵称
selected_nickname = random.choice(nicknames)
logger.info(f"从群组 {group_id} 随机选择了昵称: {selected_nickname}")
return selected_nickname
except Exception as e:
logger.error(f"获取随机群成员失败: {e}")
import traceback
logger.error(traceback.format_exc())
return None
@on_text_message(priority=70)
async def handle_command(self, bot: WechatHookClient, message: dict):
"""处理指令触发"""
if self.config is None:
return True
content = message.get("Content", "").strip()
from_wxid = message.get("FromWxid", "")
is_group = message.get("IsGroup", False)
# 检查是否是触发指令
keywords = self.config["behavior"]["command_keywords"]
matched = False
name = None
for keyword in keywords:
# 支持 "发病 xxx" 或 "@机器人 发病 xxx"
if content.startswith(keyword + " ") or content.endswith(" " + keyword + " "):
matched = True
# 提取名字
parts = content.split()
for i, part in enumerate(parts):
if part == keyword or part == keyword.lstrip("/"):
if i + 1 < len(parts):
name = parts[i + 1]
break
break
elif content == keyword:
matched = True
name = None # 没有指定名字
break
if not matched:
return True
if not self.config["behavior"]["enabled"]:
return True
# 检查群聊过滤
if is_group:
enabled_groups = self.config["behavior"]["enabled_groups"]
disabled_groups = self.config["behavior"]["disabled_groups"]
if from_wxid in disabled_groups:
return True
if enabled_groups and from_wxid not in enabled_groups:
return True
# 如果没有指定名字,从群成员中随机选择
if not name and is_group:
name = await self._get_random_group_member(bot, from_wxid)
if not name:
await bot.send_text(from_wxid, "❌ 无法获取群成员信息")
return False
elif not name:
await bot.send_text(from_wxid, "❌ 请指定名字\n格式:发病 名字")
return False
logger.info(f"收到发病文学请求: {from_wxid}, name={name}")
try:
saying = await self._fetch_fabing(name)
if not saying:
await bot.send_text(from_wxid, "❌ 获取发病文学失败,请稍后重试")
return False
# 发送发病文学
await bot.send_text(from_wxid, saying)
logger.success(f"已发送发病文学: {name}")
except Exception as e:
logger.error(f"处理发病文学请求失败: {e}")
await bot.send_text(from_wxid, f"❌ 请求失败: {str(e)}")
return False
@schedule('cron', minute=0)
async def scheduled_push(self, bot=None):
"""定时推送发病文学(每小时整点)"""
if not self.config or not self.config["schedule"]["enabled"]:
return
logger.info("开始执行发病文学定时推送任务")
try:
# 获取bot实例
if not bot:
from utils.plugin_manager import PluginManager
bot = PluginManager().bot
if not bot:
logger.error("定时任务无法获取bot实例")
return
# 获取目标群组
enabled_groups = self.config["behavior"]["enabled_groups"]
disabled_groups = self.config["behavior"]["disabled_groups"]
# 如果没有配置enabled_groups跳过
if not enabled_groups:
logger.warning("未配置群组白名单,跳过定时推送")
return
success_count = 0
group_interval = self.config["schedule"]["group_interval"]
for group_id in enabled_groups:
if group_id in disabled_groups:
continue
try:
logger.info(f"向群聊 {group_id} 推送发病文学")
# 从群成员中随机选择一个昵称
name = await self._get_random_group_member(bot, group_id)
if not name:
logger.warning(f"群聊 {group_id} 无法获取群成员昵称")
continue
# 获取发病文学
saying = await self._fetch_fabing(name)
if not saying:
logger.warning(f"群聊 {group_id} 获取发病文学失败")
continue
# 发送发病文学
await bot.send_text(group_id, saying)
success_count += 1
logger.success(f"群聊 {group_id} 推送成功")
# 群聊之间的间隔
await asyncio.sleep(group_interval)
except Exception as e:
logger.error(f"推送到 {group_id} 失败: {e}")
import traceback
logger.error(traceback.format_exc())
logger.info(f"发病文学定时推送完成 - 成功: {success_count}/{len(enabled_groups)}")
except Exception as e:
logger.error(f"发病文学定时推送失败: {e}")
import traceback
logger.error(traceback.format_exc())
def get_llm_tools(self):
"""返回LLM工具定义"""
return [{
"type": "function",
"function": {
"name": "get_fabing",
"description": "获取随机发病文学。当用户要求发病、整活、发疯等内容时调用此工具。",
"parameters": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "要发病的对象名字"
}
},
"required": ["name"]
}
}
}]
async def execute_llm_tool(self, tool_name: str, arguments: dict, bot: WechatHookClient, from_wxid: str) -> dict:
"""执行LLM工具调用"""
if tool_name != "get_fabing":
return None
try:
logger.info(f"LLM工具调用发病文学: {from_wxid}")
name = arguments.get("name")
if not name:
return {
"success": False,
"message": "缺少名字参数"
}
saying = await self._fetch_fabing(name)
if not saying:
return {
"success": False,
"message": "获取发病文学失败,请稍后重试"
}
# 发送发病文学
await bot.send_text(from_wxid, saying)
return {
"success": True,
"message": f"已发送发病文学",
"no_reply": True # 已发送内容不需要AI再回复
}
except Exception as e:
logger.error(f"LLM工具执行失败: {e}")
return {
"success": False,
"message": f"执行失败: {str(e)}"
}

View File

@@ -312,7 +312,7 @@ class KFC(PluginBase):
"type": "function",
"function": {
"name": "get_kfc",
"description": "获取KFC疯狂星期四文案。当用户询问KFC、疯狂星期四、肯德基等内容时调用此工具",
"description": "仅当用户明确要求“疯狂星期四/KFC 文案/肯德基段子”时调用;不要在普通聊天中触发",
"parameters": {
"type": "object",
"properties": {},

View File

@@ -362,7 +362,7 @@ class MusicPlugin(PluginBase):
"type": "function",
"function": {
"name": "search_music",
"description": "搜索并播放音乐。当用户想听歌、点歌、播放音乐时调用此函数",
"description": "仅当用户明确要求“点歌/听歌/播放某首歌”时调用;如果只是问歌词出处,先用搜索确认歌名再点歌",
"parameters": {
"type": "object",
"properties": {

View File

@@ -175,7 +175,7 @@ class News60s(PluginBase):
"type": "function",
"function": {
"name": "get_daily_news",
"description": "获取每日60秒读懂世界新闻图片。当用户询问今日新闻每日新闻60秒新闻早报等内容时调用此工具",
"description": "仅当用户明确要求“今日新闻/每日新闻/60秒新闻/早报”时调用;不要在闲聊或非新闻问题中触发",
"parameters": {
"type": "object",
"properties": {},

View File

@@ -387,7 +387,7 @@ class PlayletSearch(PluginBase):
"type": "function",
"function": {
"name": "search_playlet",
"description": "搜索短剧并获取视频链接",
"description": "仅当用户明确要求“搜索短剧/找短剧/看某短剧”时调用;不要在普通聊天中触发。",
"parameters": {
"type": "object",
"properties": {

View File

@@ -182,7 +182,7 @@ class RandomImage(PluginBase):
"type": "function",
"function": {
"name": "get_random_image",
"description": "获取随机图片,从三个接口中随机选择一个返回一张图片",
"description": "仅当用户明确要求“随机图片/来张图/黑丝/白丝”等随机图时调用;不要在闲聊中触发。",
"parameters": {
"type": "object",
"properties": {},

View File

@@ -159,7 +159,7 @@ class RandomVideo(PluginBase):
"type": "function",
"function": {
"name": "get_random_video",
"description": "获取随机小姐姐视频。当用户想看随机视频小姐姐视频、擦边视频时调用",
"description": "仅当用户明确要求“随机视频/小姐姐视频/短视频时调用;不要在闲聊中触发。",
"parameters": {
"type": "object",
"properties": {},

View File

@@ -1527,7 +1527,7 @@ class SignInPlugin(PluginBase):
"type": "function",
"function": {
"name": "user_signin",
"description": "用户签到,获取积分奖励",
"description": "仅当用户明确要求“签到/签个到/打卡”时调用;不要在闲聊中触发。",
"parameters": {
"type": "object",
"properties": {},
@@ -1539,7 +1539,7 @@ class SignInPlugin(PluginBase):
"type": "function",
"function": {
"name": "check_profile",
"description": "查看用户个人信息,包括积分连续签到天数等",
"description": "仅当用户明确要求“个人信息/我的信息/积分/连续签到”时调用;不要在闲聊中触发。",
"parameters": {
"type": "object",
"properties": {},
@@ -1551,7 +1551,7 @@ class SignInPlugin(PluginBase):
"type": "function",
"function": {
"name": "register_city",
"description": "注册或更新用户城市信息",
"description": "仅当用户明确要求“注册城市/设置城市/修改默认城市”时调用;不要只凭城市名触发。",
"parameters": {
"type": "object",
"properties": {

View File

@@ -141,7 +141,7 @@ class TavilySearch(PluginBase):
"type": "function",
"function": {
"name": "tavily_web_search",
"description": "使用 Tavily 进行联网搜索,获取最新的网络信息。适用于需要查询实时信息、新闻、知识等场景",
"description": "仅当用户明确要求“联网搜索/查资料/最新信息/来源/权威说法”或需要事实核实时调用;不要在闲聊中触发",
"parameters": {
"type": "object",
"properties": {

View File

@@ -303,7 +303,7 @@ class WeatherPlugin(PluginBase):
"type": "function",
"function": {
"name": "query_weather",
"description": "查询天气预报信息,包括温度、天气状况、风力和空气质量。当用户询问天气气温、会不会下雨等天气相关问题时,应该调用此函数。如果用户没有指定城市,函数会自动使用用户之前设置的城市;如果用户指定城市名称,则查询该城市的天气",
"description": "当用户明确询问天气/气温/预报/空气质量时调用;不要仅凭城市名自动触发。用户指定城市时可使用其默认城市",
"parameters": {
"type": "object",
"properties": {

View File

@@ -323,7 +323,7 @@ class ZImageTurbo(PluginBase):
"type": "function",
"function": {
"name": "generate_image",
"description": "使用AI生成图像。当用户要求画图、绘画、生成图片、创作图像时调用此工具。支持各种风格的图像生成",
"description": "仅当用户明确要求生成图片/画图/出图/创作图像时调用;不要在闲聊中触发",
"parameters": {
"type": "object",
"properties": {