feat: 新增平台

This commit is contained in:
2025-11-30 19:49:25 +08:00
parent c3e56a954d
commit fbd2c491b2
41 changed files with 4293 additions and 76 deletions

105
parsers/weibo.py Normal file
View File

@@ -0,0 +1,105 @@
from parsers.base import BaseParser
from typing import Dict
from urllib.parse import urlencode
class WeiboUctbParser(BaseParser):
"""微博解析器 - 优创 API"""
def parse(self, video_url: str) -> Dict:
"""解析微博视频"""
try:
url = f"{self.api_url}/api/videojx?{urlencode({'url': video_url})}"
response = self._make_request(url)
data = response.json()
return self._extract_data(data)
except Exception as e:
raise Exception(f"微博解析失败(优创 API): {str(e)}")
def _extract_data(self, data: Dict) -> Dict:
"""提取并标准化数据
实际返回格式:
{
"code": 200,
"data": {
"desc": "视频描述",
"cover": "封面URL",
"playurl": "视频URL"
},
"msg": "请求成功"
}
"""
try:
if data.get("code") == 200:
video_data = data.get("data", {})
cover = video_data.get("cover", "")
video_url = video_data.get("playurl", "")
title = video_data.get("desc", "") or ""
description = ""
author = "" # 优创API不返回作者
return self._normalize_response(cover, video_url, title, description, author)
else:
raise Exception(f"解析失败: {data.get('msg', '未知错误')}")
except Exception as e:
raise Exception(f"数据提取失败: {str(e)}")
class WeiboYaohuParser(BaseParser):
"""微博解析器 - 妖狐 API"""
def parse(self, video_url: str) -> Dict:
"""解析微博视频"""
try:
url = f"{self.api_url}/api/v6/video/weibo?{urlencode({'key': self.api_key, 'url': video_url})}"
response = self._make_request(url, verify=False)
data = response.json()
return self._extract_data(data)
except Exception as e:
raise Exception(f"微博解析失败(妖狐 API): {str(e)}")
def _extract_data(self, data: Dict) -> Dict:
"""提取并标准化数据
实际返回格式:
{
"code": 200,
"msg": "解析成功",
"data": {
"author": "作者名",
"title": "标题",
"cover": "封面URL",
"quality_urls": {
"超清 2K60": "视频URL",
"高清 1080P": "视频URL",
...
}
}
}
"""
try:
if data.get("code") == 200:
video_data = data.get("data", {})
cover = video_data.get("cover", "")
title = video_data.get("title", "")
description = ""
author = video_data.get("author", "")
# 从 quality_urls 中获取最高质量的视频URL
quality_urls = video_data.get("quality_urls", {})
video_url = ""
# 按优先级选择:超清 > 高清1080P > 高清720P > 其他
for quality in ["超清 2K60", "高清 1080P", "高清 720P"]:
if quality in quality_urls:
video_url = quality_urls[quality]
break
# 如果没找到,取第一个可用的
if not video_url and quality_urls:
video_url = list(quality_urls.values())[0]
return self._normalize_response(cover, video_url, title, description, author)
else:
raise Exception(f"解析失败: {data.get('msg', '未知错误')}")
except Exception as e:
raise Exception(f"数据提取失败: {str(e)}")